IVGCVSW-5963 'Move unit tests to new framework'

* Used doctest in ArmNN unit tests

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ia9cf5fc72775878885c5f864abf2c56b3a935f1a
diff --git a/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp b/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
index 4ab7488..fa933a0 100644
--- a/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
+++ b/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
@@ -5,13 +5,13 @@
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn::armcomputetensorutils;
 
-BOOST_AUTO_TEST_SUITE(ArmComputeTensorUtils)
-
-BOOST_AUTO_TEST_CASE(BuildArmComputeTensorInfoTest)
+TEST_SUITE("ArmComputeTensorUtils")
+{
+TEST_CASE("BuildArmComputeTensorInfoTest")
 {
 
     const armnn::TensorShape tensorShape = { 1, 2, 3, 4 };
@@ -26,21 +26,21 @@
     const arm_compute::TensorInfo aclTensorInfo0 = BuildArmComputeTensorInfo(tensorInfo0);
 
     const arm_compute::TensorShape& aclTensorShape = aclTensorInfo0.tensor_shape();
-    BOOST_CHECK(aclTensorShape.num_dimensions() == tensorShape.GetNumDimensions());
+    CHECK(aclTensorShape.num_dimensions() == tensorShape.GetNumDimensions());
     for(unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
     {
         // NOTE: arm_compute tensor dimensions are stored in the opposite order
-        BOOST_CHECK(aclTensorShape[i] == tensorShape[tensorShape.GetNumDimensions() - i - 1]);
+        CHECK(aclTensorShape[i] == tensorShape[tensorShape.GetNumDimensions() - i - 1]);
     }
 
-    BOOST_CHECK(aclTensorInfo0.data_type() == arm_compute::DataType::QASYMM8);
-    BOOST_CHECK(aclTensorInfo0.quantization_info().scale()[0] == quantScale);
+    CHECK(aclTensorInfo0.data_type() == arm_compute::DataType::QASYMM8);
+    CHECK(aclTensorInfo0.quantization_info().scale()[0] == quantScale);
 
     // Tensor info with per-axis quantization
     const armnn::TensorInfo tensorInfo1(tensorShape, dataType, quantScales, 0);
     const arm_compute::TensorInfo aclTensorInfo1 = BuildArmComputeTensorInfo(tensorInfo1);
 
-    BOOST_CHECK(aclTensorInfo1.quantization_info().scale() == quantScales);
+    CHECK(aclTensorInfo1.quantization_info().scale() == quantScales);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
index 0a30907..bdae998 100644
--- a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
+++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
@@ -19,6 +19,8 @@
 #include <neon/NeonTensorHandle.hpp>
 #endif
 
+#include <doctest/doctest.h>
+
 using namespace armnn;
 
 namespace
@@ -92,23 +94,23 @@
     auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, refFactory);
 
     MemCopyQueueDescriptor queueDescriptor1 = workload1->GetData();
-    BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1);
+    CHECK(queueDescriptor1.m_Inputs.size() == 1);
+    CHECK(queueDescriptor1.m_Outputs.size() == 1);
     auto inputHandle1  = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
     auto outputHandle1 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
-    BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
+    CHECK((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
     auto result = CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3});
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
 
     MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData();
-    BOOST_TEST(queueDescriptor2.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1);
+    CHECK(queueDescriptor2.m_Inputs.size() == 1);
+    CHECK(queueDescriptor2.m_Outputs.size() == 1);
     auto inputHandle2  = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
     auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor2.m_Outputs[0]);
     result = CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3});
-    BOOST_TEST(result.m_Result, result.m_Message.str());
-    BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
+    CHECK((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
 }
 
 } //namespace
diff --git a/src/backends/aclCommon/test/MemCopyTests.cpp b/src/backends/aclCommon/test/MemCopyTests.cpp
index 7612cbf..1325503 100644
--- a/src/backends/aclCommon/test/MemCopyTests.cpp
+++ b/src/backends/aclCommon/test/MemCopyTests.cpp
@@ -15,71 +15,67 @@
 #include <neon/test/NeonWorkloadFactoryHelper.hpp>
 #endif
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(MemCopyCommon)
-
-BOOST_AUTO_TEST_CASE(AclTypeConversions)
+TEST_SUITE("MemCopyCommon")
+{
+TEST_CASE("AclTypeConversions")
 {
     arm_compute::Strides strides(1, 2, 3, 4);
     armnn::TensorShape convertedStrides = armnn::armcomputetensorutils::GetStrides(strides);
 
-    BOOST_TEST(convertedStrides[0] == 4);
-    BOOST_TEST(convertedStrides[1] == 3);
-    BOOST_TEST(convertedStrides[2] == 2);
-    BOOST_TEST(convertedStrides[3] == 1);
+    CHECK(convertedStrides[0] == 4);
+    CHECK(convertedStrides[1] == 3);
+    CHECK(convertedStrides[2] == 2);
+    CHECK(convertedStrides[3] == 1);
 
     arm_compute::TensorShape shape(5, 6, 7, 8);
     armnn::TensorShape convertedshape = armnn::armcomputetensorutils::GetShape(shape);
 
-    BOOST_TEST(convertedshape[0] == 8);
-    BOOST_TEST(convertedshape[1] == 7);
-    BOOST_TEST(convertedshape[2] == 6);
-    BOOST_TEST(convertedshape[3] == 5);
+    CHECK(convertedshape[0] == 8);
+    CHECK(convertedshape[1] == 7);
+    CHECK(convertedshape[2] == 6);
+    CHECK(convertedshape[3] == 5);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
 #if defined(ARMCOMPUTECL_ENABLED) && defined(ARMCOMPUTENEON_ENABLED)
 
-BOOST_FIXTURE_TEST_SUITE(MemCopyClNeon, ClContextControlFixture)
-
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpu)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenNeonAndGpu")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenGpuAndNeon")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenNeonAndGpuWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenGpuAndNeonWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
-
 #endif
diff --git a/src/backends/backendsCommon/test/BackendIdTests.cpp b/src/backends/backendsCommon/test/BackendIdTests.cpp
index e11c13e..de6bef4 100644
--- a/src/backends/backendsCommon/test/BackendIdTests.cpp
+++ b/src/backends/backendsCommon/test/BackendIdTests.cpp
@@ -6,23 +6,23 @@
 #include <armnn/BackendId.hpp>
 #include <armnn/Types.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(BackendIdTests)
-
-BOOST_AUTO_TEST_CASE(CreateBackendIdFromCompute)
+TEST_SUITE("BackendIdTests")
+{
+TEST_CASE("CreateBackendIdFromCompute")
 {
     BackendId fromCompute{Compute::GpuAcc};
-    BOOST_TEST(fromCompute.Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
+    CHECK(fromCompute.Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
 }
 
-BOOST_AUTO_TEST_CASE(CreateBackendIdVectorFromCompute)
+TEST_CASE("CreateBackendIdVectorFromCompute")
 {
     std::vector<BackendId> fromComputes = {Compute::GpuAcc, Compute::CpuRef};
-    BOOST_TEST(fromComputes[0].Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
-    BOOST_TEST(fromComputes[1].Get() == GetComputeDeviceAsCString(Compute::CpuRef));
+    CHECK(fromComputes[0].Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
+    CHECK(fromComputes[1].Get() == GetComputeDeviceAsCString(Compute::CpuRef));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/BackendProfilingTests.cpp b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
index 91399b4..62c06fe 100644
--- a/src/backends/backendsCommon/test/BackendProfilingTests.cpp
+++ b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
@@ -21,7 +21,7 @@
 #include <armnn/Logging.hpp>
 #include <armnn/profiling/ISendTimelinePacket.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include <vector>
 
 #include <cstdint>
@@ -111,9 +111,9 @@
     return {packetId, dataLength, uniqueData};
 }
 
-BOOST_AUTO_TEST_SUITE(BackendProfilingTestSuite)
-
-BOOST_AUTO_TEST_CASE(BackendProfilingCounterRegisterMockBackendTest)
+TEST_SUITE("BackendProfilingTestSuite")
+{
+TEST_CASE("BackendProfilingCounterRegisterMockBackendTest")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions options;
@@ -133,17 +133,17 @@
     // Check if the MockBackends 3 dummy counters {0, 1, 2-5 (four cores)} are registered
     armnn::BackendId mockId = armnn::MockBackendId();
     const armnn::profiling::ICounterMappings& counterMap = GetProfilingService(&runtime).GetCounterMappings();
-    BOOST_CHECK(counterMap.GetGlobalId(0, mockId) == 5 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(1, mockId) == 6 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(2, mockId) == 7 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(3, mockId) == 8 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(4, mockId) == 9 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(5, mockId) == 10 + shiftedId);
+    CHECK(counterMap.GetGlobalId(0, mockId) == 5 + shiftedId);
+    CHECK(counterMap.GetGlobalId(1, mockId) == 6 + shiftedId);
+    CHECK(counterMap.GetGlobalId(2, mockId) == 7 + shiftedId);
+    CHECK(counterMap.GetGlobalId(3, mockId) == 8 + shiftedId);
+    CHECK(counterMap.GetGlobalId(4, mockId) == 9 + shiftedId);
+    CHECK(counterMap.GetGlobalId(5, mockId) == 10 + shiftedId);
     options.m_ProfilingOptions.m_EnableProfiling = false;
     GetProfilingService(&runtime).ResetExternalProfilingOptions(options.m_ProfilingOptions, true);
 }
 
-BOOST_AUTO_TEST_CASE(TestBackendCounters)
+TEST_CASE("TestBackendCounters")
 {
     Holder holder;
     arm::pipe::PacketVersionResolver packetVersionResolver;
@@ -220,18 +220,18 @@
     periodicCounterCapture.Stop();
 
     std::set<armnn::BackendId> activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 1);
-    BOOST_CHECK(activeIds.find(gpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 1);
+    CHECK((activeIds.find(gpuAccId) != activeIds.end()));
 
     std::vector<Timestamp> recievedTimestamp = sendCounterPacket.GetTimestamps();
 
-    BOOST_CHECK(recievedTimestamp[0].timestamp == period);
-    BOOST_CHECK(recievedTimestamp.size() == 1);
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == gpuCounters.size());
+    CHECK(recievedTimestamp[0].timestamp == period);
+    CHECK(recievedTimestamp.size() == 1);
+    CHECK(recievedTimestamp[0].counterValues.size() == gpuCounters.size());
     for (unsigned long i=0; i< gpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterId == gpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[0].counterValues[i].counterId == gpuCounters[i]);
+        CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
     }
     sendCounterPacket.ClearTimestamps();
 
@@ -240,18 +240,18 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 1);
-    BOOST_CHECK(activeIds.find(cpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 1);
+    CHECK((activeIds.find(cpuAccId) != activeIds.end()));
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
 
-    BOOST_CHECK(recievedTimestamp[0].timestamp == period);
-    BOOST_CHECK(recievedTimestamp.size() == 1);
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == cpuCounters.size());
+    CHECK(recievedTimestamp[0].timestamp == period);
+    CHECK(recievedTimestamp.size() == 1);
+    CHECK(recievedTimestamp[0].counterValues.size() == cpuCounters.size());
     for (unsigned long i=0; i< cpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterId == cpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[0].counterValues[i].counterId == cpuCounters[i]);
+        CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
     }
     sendCounterPacket.ClearTimestamps();
 
@@ -262,28 +262,28 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 2);
-    BOOST_CHECK(activeIds.find(cpuAccId) != activeIds.end());
-    BOOST_CHECK(activeIds.find(gpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 2);
+    CHECK((activeIds.find(cpuAccId) != activeIds.end()));
+    CHECK((activeIds.find(gpuAccId) != activeIds.end()));
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
+//
+    CHECK(recievedTimestamp[0].timestamp == period);
+    CHECK(recievedTimestamp[1].timestamp == period);
 
-    BOOST_CHECK(recievedTimestamp[0].timestamp == period);
-    BOOST_CHECK(recievedTimestamp[1].timestamp == period);
+    CHECK(recievedTimestamp.size() == 2);
+    CHECK(recievedTimestamp[0].counterValues.size() == 2);
+    CHECK(recievedTimestamp[1].counterValues.size() == gpuCounters.size());
 
-    BOOST_CHECK(recievedTimestamp.size() == 2);
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == 2);
-    BOOST_CHECK(recievedTimestamp[1].counterValues.size() == gpuCounters.size());
-
-    BOOST_CHECK(recievedTimestamp[0].counterValues[0].counterId == cpuCounters[0]);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[0].counterValue == 1u);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[1].counterId == cpuCounters[1]);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[1].counterValue == 2u);
+    CHECK(recievedTimestamp[0].counterValues[0].counterId == cpuCounters[0]);
+    CHECK(recievedTimestamp[0].counterValues[0].counterValue == 1u);
+    CHECK(recievedTimestamp[0].counterValues[1].counterId == cpuCounters[1]);
+    CHECK(recievedTimestamp[0].counterValues[1].counterValue == 2u);
 
     for (unsigned long i=0; i< gpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[1].counterValues[i].counterId == gpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[1].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[1].counterValues[i].counterId == gpuCounters[i]);
+        CHECK(recievedTimestamp[1].counterValues[i].counterValue == i + 1u);
     }
 
     sendCounterPacket.ClearTimestamps();
@@ -297,24 +297,24 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 2);
-    BOOST_CHECK(activeIds.find(cpuAccId) != activeIds.end());
-    BOOST_CHECK(activeIds.find(gpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 2);
+    CHECK((activeIds.find(cpuAccId) != activeIds.end()));
+    CHECK((activeIds.find(gpuAccId) != activeIds.end()));
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
 
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == cpuCounters.size());
+    CHECK(recievedTimestamp[0].counterValues.size() == cpuCounters.size());
     for (unsigned long i=0; i< cpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterId == cpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[0].counterValues[i].counterId == cpuCounters[i]);
+        CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
     }
 
-    BOOST_CHECK(recievedTimestamp[1].counterValues.size() == gpuCounters.size());
+    CHECK(recievedTimestamp[1].counterValues.size() == gpuCounters.size());
     for (unsigned long i=0; i< gpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[1].counterValues[i].counterId == gpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[1].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[1].counterValues[i].counterId == gpuCounters[i]);
+        CHECK(recievedTimestamp[1].counterValues[i].counterValue == i + 1u);
     }
     sendCounterPacket.ClearTimestamps();
 
@@ -326,27 +326,27 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 2);
-    BOOST_CHECK(activeIds.find(cpuAccId) != activeIds.end());
-    BOOST_CHECK(activeIds.find(gpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 2);
+    CHECK((activeIds.find(cpuAccId) != activeIds.end()));
+    CHECK((activeIds.find(gpuAccId) != activeIds.end()));
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
 
-    BOOST_CHECK(recievedTimestamp.size() == 2);
+    CHECK(recievedTimestamp.size() == 2);
 
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == 2);
+    CHECK(recievedTimestamp[0].counterValues.size() == 2);
 
-    BOOST_CHECK(recievedTimestamp[0].counterValues[0].counterId == cpuCounters[0]);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[0].counterValue == 1u);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[1].counterId == cpuCounters[2]);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[1].counterValue == 3u);
+    CHECK(recievedTimestamp[0].counterValues[0].counterId == cpuCounters[0]);
+    CHECK(recievedTimestamp[0].counterValues[0].counterValue == 1u);
+    CHECK(recievedTimestamp[0].counterValues[1].counterId == cpuCounters[2]);
+    CHECK(recievedTimestamp[0].counterValues[1].counterValue == 3u);
 
-    BOOST_CHECK(recievedTimestamp[1].counterValues.size() == 2);
+    CHECK(recievedTimestamp[1].counterValues.size() == 2);
 
-    BOOST_CHECK(recievedTimestamp[1].counterValues[0].counterId == gpuCounters[0]);
-    BOOST_CHECK(recievedTimestamp[1].counterValues[0].counterValue == 1u);
-    BOOST_CHECK(recievedTimestamp[1].counterValues[1].counterId == gpuCounters[1]);
-    BOOST_CHECK(recievedTimestamp[1].counterValues[1].counterValue == 2u);
+    CHECK(recievedTimestamp[1].counterValues[0].counterId == gpuCounters[0]);
+    CHECK(recievedTimestamp[1].counterValues[0].counterValue == 1u);
+    CHECK(recievedTimestamp[1].counterValues[1].counterId == gpuCounters[1]);
+    CHECK(recievedTimestamp[1].counterValues[1].counterValue == 2u);
 
     sendCounterPacket.ClearTimestamps();
 
@@ -355,10 +355,10 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 0);
+    CHECK(activeIds.size() == 0);
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
-    BOOST_CHECK(recievedTimestamp.size() == 0);
+    CHECK(recievedTimestamp.size() == 0);
 
     sendCounterPacket.ClearTimestamps();
 
@@ -367,13 +367,13 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 0);
+    CHECK(activeIds.size() == 0);
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
-    BOOST_CHECK(recievedTimestamp.size() == 0);
+    CHECK(recievedTimestamp.size() == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TestBackendCounterLogging)
+TEST_CASE("TestBackendCounterLogging")
 {
     std::stringstream ss;
 
@@ -450,10 +450,10 @@
     periodicCounterCapture.Stop();
     SetLogFilter(armnn::LogSeverity::Fatal);
 
-    BOOST_CHECK(ss.str().find("ActivateCounters example test error") != std::string::npos);
+    CHECK(ss.str().find("ActivateCounters example test error") != std::string::npos);
 }
 
-BOOST_AUTO_TEST_CASE(BackendProfilingContextGetSendTimelinePacket)
+TEST_CASE("BackendProfilingContextGetSendTimelinePacket")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions options;
@@ -467,10 +467,10 @@
     armnn::MockBackendProfilingService mockProfilingService = armnn::MockBackendProfilingService::Instance();
     armnn::MockBackendProfilingContext *mockBackEndProfilingContext = mockProfilingService.GetContext();
     // Check that there is a valid context set.
-    BOOST_CHECK(mockBackEndProfilingContext);
+    CHECK(mockBackEndProfilingContext);
     armnn::IBackendInternal::IBackendProfilingPtr& backendProfilingIface =
         mockBackEndProfilingContext->GetBackendProfiling();
-    BOOST_CHECK(backendProfilingIface);
+    CHECK(backendProfilingIface);
 
     // Now for the meat of the test. We're just going to send a random packet and make sure there
     // are no exceptions or errors. The sending of packets is already tested in SendTimelinePacketTests.
@@ -486,7 +486,7 @@
     profilingService.ResetExternalProfilingOptions(options.m_ProfilingOptions, true);
 }
 
-BOOST_AUTO_TEST_CASE(GetProfilingGuidGenerator)
+TEST_CASE("GetProfilingGuidGenerator")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions options;
@@ -498,19 +498,19 @@
     armnn::MockBackendProfilingService mockProfilingService = armnn::MockBackendProfilingService::Instance();
     armnn::MockBackendProfilingContext *mockBackEndProfilingContext = mockProfilingService.GetContext();
     // Check that there is a valid context set.
-    BOOST_CHECK(mockBackEndProfilingContext);
+    CHECK(mockBackEndProfilingContext);
     armnn::IBackendInternal::IBackendProfilingPtr& backendProfilingIface =
         mockBackEndProfilingContext->GetBackendProfiling();
-    BOOST_CHECK(backendProfilingIface);
+    CHECK(backendProfilingIface);
 
     // Get the Guid generator and check the getting two Guid's results in the second being greater than the first.
     armnn::profiling::IProfilingGuidGenerator& guidGenerator = backendProfilingIface->GetProfilingGuidGenerator();
     const armnn::profiling::ProfilingDynamicGuid& firstGuid = guidGenerator.NextGuid();
     const armnn::profiling::ProfilingDynamicGuid& secondGuid = guidGenerator.NextGuid();
-    BOOST_CHECK(secondGuid > firstGuid);
+    CHECK(secondGuid > firstGuid);
 
     // Reset the profiling servie after the test.
     options.m_ProfilingOptions.m_EnableProfiling = false;
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/BackendRegistryTests.cpp b/src/backends/backendsCommon/test/BackendRegistryTests.cpp
index ce8acbb..d9c19d6 100644
--- a/src/backends/backendsCommon/test/BackendRegistryTests.cpp
+++ b/src/backends/backendsCommon/test/BackendRegistryTests.cpp
@@ -9,7 +9,7 @@
 #include <armnn/backends/IBackendInternal.hpp>
 #include <reference/RefBackend.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -33,20 +33,20 @@
 
 }
 
-BOOST_AUTO_TEST_SUITE(BackendRegistryTests)
-
-BOOST_AUTO_TEST_CASE(SwapRegistry)
+TEST_SUITE("BackendRegistryTests")
+{
+TEST_CASE("SwapRegistry")
 {
     using namespace armnn;
     auto nFactories = BackendRegistryInstance().Size();
     {
         SwapRegistryStorage helper;
-        BOOST_TEST(BackendRegistryInstance().Size() == 0);
+        CHECK(BackendRegistryInstance().Size() == 0);
     }
-    BOOST_TEST(BackendRegistryInstance().Size() == nFactories);
+    CHECK(BackendRegistryInstance().Size() == nFactories);
 }
 
-BOOST_AUTO_TEST_CASE(TestRegistryHelper)
+TEST_CASE("TestRegistryHelper")
 {
     using namespace armnn;
     SwapRegistryStorage helper;
@@ -64,19 +64,19 @@
     );
 
     // sanity check: the factory has not been called yet
-    BOOST_TEST(called == false);
+    CHECK(called == false);
 
     auto factoryFunction = BackendRegistryInstance().GetFactory("HelloWorld");
 
     // sanity check: the factory still not called
-    BOOST_TEST(called == false);
+    CHECK(called == false);
 
     factoryFunction();
-    BOOST_TEST(called == true);
+    CHECK(called == true);
     BackendRegistryInstance().Deregister("HelloWorld");
 }
 
-BOOST_AUTO_TEST_CASE(TestDirectCallToRegistry)
+TEST_CASE("TestDirectCallToRegistry")
 {
     using namespace armnn;
     SwapRegistryStorage helper;
@@ -92,15 +92,15 @@
     );
 
     // sanity check: the factory has not been called yet
-    BOOST_TEST(called == false);
+    CHECK(called == false);
 
     auto factoryFunction = BackendRegistryInstance().GetFactory("HelloWorld");
 
     // sanity check: the factory still not called
-    BOOST_TEST(called == false);
+    CHECK(called == false);
 
     factoryFunction();
-    BOOST_TEST(called == true);
+    CHECK(called == true);
     BackendRegistryInstance().Deregister("HelloWorld");
 }
 
@@ -111,7 +111,7 @@
 // of Neon not being detected the exception is raised and so the backend is not added to the supportedBackends
 // list
 
-BOOST_AUTO_TEST_CASE(ThrowBackendUnavailableException)
+TEST_CASE("ThrowBackendUnavailableException")
 {
     using namespace armnn;
 
@@ -142,9 +142,9 @@
     catch (const BackendUnavailableException& e)
     {
         // Caught
-        BOOST_CHECK_EQUAL(e.what(), exceptionMessage);
-        BOOST_TEST_MESSAGE("ThrowBackendUnavailableExceptionImpl: BackendUnavailableException caught.");
+        CHECK_EQ(e.what(), exceptionMessage);
+        MESSAGE("ThrowBackendUnavailableExceptionImpl: BackendUnavailableException caught.");
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
index d1be409..254b3c2 100644
--- a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
@@ -10,7 +10,7 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -62,7 +62,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateBatchToSpaceNdNetwork<ArmnnType>(inputShape, outputShape, blockShape, crops, dataLayout);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{ 1, 2, 3, 4 };
@@ -93,7 +93,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateBatchToSpaceNdNetwork<ArmnnType>(inputShape, outputShape, blockShape, crops, dataLayout);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 6313bd5..d0c95c5 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -189,6 +189,7 @@
 target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
 target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling)
 target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include)
+target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/third-party)
 
 # Dummy shared object for testing.
 # This is a simple library used to test the utility functions that will be used to handle the shared objects.
diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
index c705f87..40e3fd6 100644
--- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -61,7 +61,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateComparisonNetwork<ArmnnInType>(inputShapes, outputShape, operation);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     const std::vector<TInput> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
                                        3, 3, 3, 3,  4, 4, 4, 4 });
@@ -89,8 +89,6 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateComparisonNetwork<ArmnnInType>(inputShapes, outputShape, operation);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     const std::vector<TInput> input0({ 1, 2, 3, 1, 0, 6,
                                        7, 8, 9, 10, 11, 12 });
 
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index ed00088..4abab27 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -3,8 +3,6 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
-
 #include <cl/ClBackend.hpp>
 #include <neon/NeonBackend.hpp>
 #include <reference/RefBackend.hpp>
@@ -12,192 +10,195 @@
 
 #include <Network.hpp>
 
+#include <doctest/doctest.h>
+
 #include <vector>
 #include <string>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(BackendsCompatibility, * boost::unit_test::disabled())
-
 #if defined(ARMCOMPUTENEON_ENABLED)
-BOOST_AUTO_TEST_CASE(Neon_Cl_DirectCompatibility_Test)
-{
-    auto neonBackend = std::make_unique<NeonBackend>();
-    auto clBackend = std::make_unique<ClBackend>();
-
-    TensorHandleFactoryRegistry registry;
-    neonBackend->RegisterTensorHandleFactories(registry);
-    clBackend->RegisterTensorHandleFactories(registry);
-
-    const BackendId& neonBackendId = neonBackend->GetId();
-    const BackendId& clBackendId = clBackend->GetId();
-
-    BackendsMap backends;
-    backends[neonBackendId] = std::move(neonBackend);
-    backends[clBackendId] = std::move(clBackend);
-
-    armnn::Graph graph;
-
-    armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
-
-    inputLayer->SetBackendId(neonBackendId);
-
-    armnn::SoftmaxDescriptor smDesc;
-    armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
-    softmaxLayer1->SetBackendId(clBackendId);
-
-    armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
-    softmaxLayer2->SetBackendId(neonBackendId);
-
-    armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
-    softmaxLayer3->SetBackendId(clBackendId);
-
-    armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
-    softmaxLayer4->SetBackendId(neonBackendId);
-
-    armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
-    outputLayer->SetBackendId(clBackendId);
-
-    inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
-    softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
-    softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
-    softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
-    softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    graph.TopologicalSort();
-
-    std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
-
-    BOOST_TEST(result.m_Error == false);
-    BOOST_TEST(result.m_Warning == false);
-
-    OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
-    OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
-    OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
-    OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
-    OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
-
-    // Check that the correct factory was selected
-    BOOST_TEST(inputLayerOut.GetTensorHandleFactoryId()    == "Arm/Cl/TensorHandleFactory");
-    BOOST_TEST(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-    BOOST_TEST(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-    BOOST_TEST(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-    BOOST_TEST(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-
-    // Check that the correct strategy was selected
-    BOOST_TEST((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-
-    graph.AddCompatibilityLayers(backends, registry);
-
-    // Test for copy layers
-    int copyCount= 0;
-    graph.ForEachLayer([&copyCount](Layer* layer)
-    {
-        if (layer->GetType() == LayerType::MemCopy)
-        {
-            copyCount++;
-        }
-    });
-    BOOST_TEST(copyCount == 0);
-
-    // Test for import layers
-    int importCount= 0;
-    graph.ForEachLayer([&importCount](Layer *layer)
-    {
-        if (layer->GetType() == LayerType::MemImport)
-        {
-            importCount++;
-        }
-    });
-    BOOST_TEST(importCount == 0);
-}
+// Disabled Test Suite
+//TEST_SUITE("BackendsCompatibility")
+//TEST_CASE("Neon_Cl_DirectCompatibility_Test")
+//{
+//    auto neonBackend = std::make_unique<NeonBackend>();
+//    auto clBackend = std::make_unique<ClBackend>();
+//
+//    TensorHandleFactoryRegistry registry;
+//    neonBackend->RegisterTensorHandleFactories(registry);
+//    clBackend->RegisterTensorHandleFactories(registry);
+//
+//    const BackendId& neonBackendId = neonBackend->GetId();
+//    const BackendId& clBackendId = clBackend->GetId();
+//
+//    BackendsMap backends;
+//    backends[neonBackendId] = std::move(neonBackend);
+//    backends[clBackendId] = std::move(clBackend);
+//
+//    armnn::Graph graph;
+//
+//    armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
+//
+//    inputLayer->SetBackendId(neonBackendId);
+//
+//    armnn::SoftmaxDescriptor smDesc;
+//    armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
+//    softmaxLayer1->SetBackendId(clBackendId);
+//
+//    armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
+//    softmaxLayer2->SetBackendId(neonBackendId);
+//
+//    armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
+//    softmaxLayer3->SetBackendId(clBackendId);
+//
+//    armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
+//    softmaxLayer4->SetBackendId(neonBackendId);
+//
+//    armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
+//    outputLayer->SetBackendId(clBackendId);
+//
+//    inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
+//    softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
+//    softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
+//    softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
+//    softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+//
+//    graph.TopologicalSort();
+//
+//    std::vector<std::string> errors;
+//    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
+//
+//    CHECK(result.m_Error == false);
+//    CHECK(result.m_Warning == false);
+//
+//    OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
+//    OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
+//    OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
+//    OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
+//    OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
+//
+//    // Check that the correct factory was selected
+//    CHECK(inputLayerOut.GetTensorHandleFactoryId()    == "Arm/Cl/TensorHandleFactory");
+//    CHECK(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+//    CHECK(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+//    CHECK(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+//    CHECK(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+//
+//    // Check that the correct strategy was selected
+//    CHECK((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//    CHECK((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//    CHECK((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//    CHECK((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//    CHECK((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//
+//    graph.AddCompatibilityLayers(backends, registry);
+//
+//    // Test for copy layers
+//    int copyCount= 0;
+//    graph.ForEachLayer([&copyCount](Layer* layer)
+//    {
+//        if (layer->GetType() == LayerType::MemCopy)
+//        {
+//            copyCount++;
+//        }
+//    });
+//    CHECK(copyCount == 0);
+//
+//    // Test for import layers
+//    int importCount= 0;
+//    graph.ForEachLayer([&importCount](Layer *layer)
+//    {
+//        if (layer->GetType() == LayerType::MemImport)
+//        {
+//            importCount++;
+//        }
+//    });
+//    CHECK(importCount == 0);
+//}
+//
+//}
 #endif
-BOOST_AUTO_TEST_SUITE_END()
 
-BOOST_AUTO_TEST_SUITE(BackendCapability)
-
+TEST_SUITE("BackendCapability")
+{
 #if defined(ARMNNREF_ENABLED)
 
-BOOST_AUTO_TEST_CASE(Ref_Backends_Capability_Test)
+TEST_CASE("Ref_Backends_Capability_Test")
 {
     auto refBackend  = std::make_unique<RefBackend>();
     auto refCapabilities = refBackend->GetCapabilities();
 
-    BOOST_CHECK(armnn::HasCapability("NonConstWeights", refCapabilities));
-    BOOST_CHECK(armnn::HasCapability("AsyncExecution", refCapabilities));
+    CHECK(armnn::HasCapability("NonConstWeights", refCapabilities));
+    CHECK(armnn::HasCapability("AsyncExecution", refCapabilities));
 
     armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", true};
     armnn::BackendOptions::BackendOption AsyncExecution{"AsyncExecution", true};
 
-    BOOST_CHECK(armnn::HasCapability(nonConstWeights, refCapabilities));
-    BOOST_CHECK(armnn::HasCapability(AsyncExecution, refCapabilities));
+    CHECK(armnn::HasCapability(nonConstWeights, refCapabilities));
+    CHECK(armnn::HasCapability(AsyncExecution, refCapabilities));
 }
 
-BOOST_AUTO_TEST_CASE(Ref_Backends_Unkown_Capability_Test)
+TEST_CASE("Ref_Backends_Unkown_Capability_Test")
 {
     auto refBackend  = std::make_unique<RefBackend>();
     auto refCapabilities = refBackend->GetCapabilities();
 
     armnn::BackendOptions::BackendOption AsyncExecutionFalse{"AsyncExecution", false};
-    BOOST_CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
+    CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
 
     armnn::BackendOptions::BackendOption AsyncExecutionInt{"AsyncExecution", 50};
-    BOOST_CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
+    CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
 
     armnn::BackendOptions::BackendOption AsyncExecutionFloat{"AsyncExecution", 0.0f};
-    BOOST_CHECK(!armnn::HasCapability(AsyncExecutionFloat, refCapabilities));
+    CHECK(!armnn::HasCapability(AsyncExecutionFloat, refCapabilities));
 
     armnn::BackendOptions::BackendOption AsyncExecutionString{"AsyncExecution", "true"};
-    BOOST_CHECK(!armnn::HasCapability(AsyncExecutionString, refCapabilities));
+    CHECK(!armnn::HasCapability(AsyncExecutionString, refCapabilities));
 
-    BOOST_CHECK(!armnn::HasCapability("Telekinesis", refCapabilities));
+    CHECK(!armnn::HasCapability("Telekinesis", refCapabilities));
     armnn::BackendOptions::BackendOption unkownCapability{"Telekinesis", true};
-    BOOST_CHECK(!armnn::HasCapability(unkownCapability, refCapabilities));
+    CHECK(!armnn::HasCapability(unkownCapability, refCapabilities));
 }
 
 #endif
 
 #if defined(ARMCOMPUTENEON_ENABLED)
 
-BOOST_AUTO_TEST_CASE(Neon_Backends_Capability_Test)
+TEST_CASE("Neon_Backends_Capability_Test")
 {
     auto neonBackend = std::make_unique<NeonBackend>();
     auto neonCapabilities = neonBackend->GetCapabilities();
 
-    BOOST_CHECK(armnn::HasCapability("NonConstWeights", neonCapabilities));
-    BOOST_CHECK(armnn::HasCapability("AsyncExecution", neonCapabilities));
+    CHECK(armnn::HasCapability("NonConstWeights", neonCapabilities));
+    CHECK(armnn::HasCapability("AsyncExecution", neonCapabilities));
 
     armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", false};
     armnn::BackendOptions::BackendOption AsyncExecution{"AsyncExecution", false};
 
-    BOOST_CHECK(armnn::HasCapability(nonConstWeights, neonCapabilities));
-    BOOST_CHECK(armnn::HasCapability(AsyncExecution, neonCapabilities));
+    CHECK(armnn::HasCapability(nonConstWeights, neonCapabilities));
+    CHECK(armnn::HasCapability(AsyncExecution, neonCapabilities));
 }
 
 #endif
 
 #if defined(ARMCOMPUTECL_ENABLED)
 
-BOOST_AUTO_TEST_CASE(Cl_Backends_Capability_Test)
+TEST_CASE("Cl_Backends_Capability_Test")
 {
     auto clBackend = std::make_unique<ClBackend>();
     auto clCapabilities = clBackend->GetCapabilities();
 
-    BOOST_CHECK(armnn::HasCapability("NonConstWeights", clCapabilities));
-    BOOST_CHECK(armnn::HasCapability("AsyncExecution", clCapabilities));
+    CHECK(armnn::HasCapability("NonConstWeights", clCapabilities));
+    CHECK(armnn::HasCapability("AsyncExecution", clCapabilities));
 
     armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", false};
     armnn::BackendOptions::BackendOption AsyncExecution{"AsyncExecution", false};
 
-    BOOST_CHECK(armnn::HasCapability(nonConstWeights, clCapabilities));
-    BOOST_CHECK(armnn::HasCapability(AsyncExecution, clCapabilities));
+    CHECK(armnn::HasCapability(nonConstWeights, clCapabilities));
+    CHECK(armnn::HasCapability(AsyncExecution, clCapabilities));
 }
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
index 58a1f39..5b2f33f 100644
--- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -64,7 +64,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -128,8 +128,6 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     // Creates structures for input & output.
     std::vector<T> inputData{
         1, 2,
@@ -192,8 +190,6 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     // Creates structures for input & output.
     std::vector<T> inputData{
         1, 2,
@@ -255,8 +251,6 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     // Creates structures for input & output.
     std::vector<T> inputData{
         1, 2,
diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
index 2dd5298..ea99729 100644
--- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
+++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
@@ -8,19 +8,21 @@
 #include <backendsCommon/TensorHandle.hpp>
 #include <backendsCommon/Workload.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(WorkloadAsyncExecuteTests)
 
 namespace
 {
 
+TEST_SUITE("WorkloadAsyncExecuteTests")
+{
+
 struct Workload0 : BaseWorkload<ElementwiseUnaryQueueDescriptor>
 {
     Workload0(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
-            : BaseWorkload(descriptor, info)
+        : BaseWorkload(descriptor, info)
     {
     }
 
@@ -33,7 +35,9 @@
         int* inVals = static_cast<int*>(m_Data.m_Inputs[0][0].Map());
         int* outVals = static_cast<int*>(m_Data.m_Outputs[0][0].Map());
 
-        for (unsigned int i = 0; i < m_Data.m_Inputs[0][0].GetShape().GetNumElements(); ++i)
+        for (unsigned int i = 0;
+             i < m_Data.m_Inputs[0][0].GetShape().GetNumElements();
+             ++i)
         {
             outVals[i] = inVals[i] * outVals[i];
             inVals[i] = outVals[i];
@@ -45,7 +49,9 @@
         int* inVals = static_cast<int*>(desc.m_Inputs[0][0].Map());
         int* outVals = static_cast<int*>(desc.m_Outputs[0][0].Map());
 
-        for (unsigned int i = 0; i < desc.m_Inputs[0][0].GetShape().GetNumElements(); ++i)
+        for (unsigned int i = 0;
+             i < desc.m_Inputs[0][0].GetShape().GetNumElements();
+             ++i)
         {
             outVals[i] = inVals[i] + outVals[i];
             inVals[i] = outVals[i];
@@ -61,7 +67,7 @@
 struct Workload1 : BaseWorkload<ElementwiseUnaryQueueDescriptor>
 {
     Workload1(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
-            : BaseWorkload(descriptor, info)
+        : BaseWorkload(descriptor, info)
     {
     }
 
@@ -70,7 +76,9 @@
         int* inVals = static_cast<int*>(m_Data.m_Inputs[0][0].Map());
         int* outVals = static_cast<int*>(m_Data.m_Outputs[0][0].Map());
 
-        for (unsigned int i = 0; i < m_Data.m_Inputs[0][0].GetShape().GetNumElements(); ++i)
+        for (unsigned int i = 0;
+             i < m_Data.m_Inputs[0][0].GetShape().GetNumElements();
+             ++i)
         {
             outVals[i] = inVals[i] * outVals[i];
             inVals[i] = outVals[i];
@@ -83,7 +91,9 @@
     int* actualOutput = static_cast<int*>(tensorHandle->Map());
 
     bool allValuesCorrect = true;
-    for (unsigned int i = 0; i < tensorHandle->GetShape().GetNumElements(); ++i)
+    for (unsigned int i = 0;
+         i < tensorHandle->GetShape().GetNumElements();
+         ++i)
     {
         if (actualOutput[i] != expectedValue)
         {
@@ -91,7 +101,7 @@
         }
     }
 
-    BOOST_CHECK(allValuesCorrect);
+    CHECK(allValuesCorrect);
 }
 
 template<typename Workload>
@@ -108,7 +118,7 @@
     return std::make_unique<Workload>(elementwiseUnaryQueueDescriptor, workloadInfo);
 }
 
-BOOST_AUTO_TEST_CASE(TestAsyncExecute)
+TEST_CASE("TestAsyncExecute")
 {
     TensorInfo info({5}, DataType::Signed32);
 
@@ -145,7 +155,7 @@
     ValidateTensor(&workload0.get()->GetQueueDescriptor()->m_Inputs[0][0], expectedExecuteval);
 }
 
-BOOST_AUTO_TEST_CASE(TestDefaultAsyncExecute)
+TEST_CASE("TestDefaultAsyncExecute")
 {
     TensorInfo info({5}, DataType::Signed32);
 
@@ -179,7 +189,7 @@
     ValidateTensor(workingMemDescriptor.m_Inputs[0], expectedExecuteval);
 }
 
-BOOST_AUTO_TEST_CASE(TestDefaultAsyncExeuteWithThreads)
+TEST_CASE("TestDefaultAsyncExeuteWithThreads")
 {
     // Use a large vector so the threads have a chance to interact
     unsigned int vecSize = 1000;
@@ -243,6 +253,6 @@
     ValidateTensor(workingMemDescriptor2.m_Inputs[0], expectedExecuteval2);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
 }
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
index e624159..a5e2fac 100644
--- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 #include <armnn/INetwork.hpp>
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -45,7 +47,7 @@
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateDequantizeNetwork<T>(inputInfo, outputInfo);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     std::map<int, std::vector<T>> inputTensorData = { { 0, input } };
     std::map<int, std::vector<float>> expectedOutputData = { { 0, expectedOutput } };
diff --git a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
index 5a42550..a566964 100644
--- a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 #include <armnn/INetwork.hpp>
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace{
 
 template<typename T>
@@ -89,7 +91,7 @@
     armnn::INetworkPtr net = CreateDetectionPostProcessNetwork<T>(boxEncodingsInfo, scoresInfo,
                                                                   anchorsInfo, anchors, useRegularNms);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     std::map<int, std::vector<T>> inputTensorData = {{ 0, boxEncodings }, { 1, scores }};
     std::map<int, std::vector<float>> expectedOutputData = {{ 0, expectedDetectionBoxes },
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.cpp b/src/backends/backendsCommon/test/DynamicBackendTests.cpp
index b1c8234..669ce60 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.cpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.cpp
@@ -7,13 +7,14 @@
 
 #include <test/UnitTests.hpp>
 
-BOOST_AUTO_TEST_SUITE(DynamicBackendTests)
+#include <doctest/doctest.h>
 
+TEST_SUITE("DynamicBackendTests")
+{
 ARMNN_SIMPLE_TEST_CASE(OpenCloseHandle, OpenCloseHandleTestImpl);
 ARMNN_SIMPLE_TEST_CASE(CloseInvalidHandle, CloseInvalidHandleTestImpl);
 ARMNN_SIMPLE_TEST_CASE(OpenEmptyFileName, OpenEmptyFileNameTestImpl);
 ARMNN_SIMPLE_TEST_CASE(OpenNotExistingFile, OpenNotExistingFileTestImpl);
-ARMNN_SIMPLE_TEST_CASE(OpenNotSharedObjectFile, OpenNotSharedObjectTestImpl);
 ARMNN_SIMPLE_TEST_CASE(GetValidEntryPoint, GetValidEntryPointTestImpl);
 ARMNN_SIMPLE_TEST_CASE(GetNameMangledEntryPoint, GetNameMangledEntryPointTestImpl);
 ARMNN_SIMPLE_TEST_CASE(GetNoExternEntryPoint, GetNoExternEntryPointTestImpl);
@@ -21,7 +22,6 @@
 
 ARMNN_SIMPLE_TEST_CASE(BackendVersioning, BackendVersioningTestImpl);
 
-
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendObjectInvalidHandle,
                        CreateDynamicBackendObjectInvalidHandleTestImpl);
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendObjectInvalidInterface1,
@@ -39,11 +39,12 @@
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendObjectInvalidInterface7,
                        CreateDynamicBackendObjectInvalidInterface7TestImpl);
 
+ARMNN_SIMPLE_TEST_CASE(OpenNotSharedObjectFile, OpenNotSharedObjectTestImpl);
+ARMNN_SIMPLE_TEST_CASE(GetSharedObjects, GetSharedObjectsTestImpl);
+
 ARMNN_SIMPLE_TEST_CASE(GetBackendPaths, GetBackendPathsTestImpl)
 ARMNN_SIMPLE_TEST_CASE(GetBackendPathsOverride, GetBackendPathsOverrideTestImpl)
 
-ARMNN_SIMPLE_TEST_CASE(GetSharedObjects, GetSharedObjectsTestImpl);
-
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackends, CreateDynamicBackendsTestImpl);
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendsNoPaths, CreateDynamicBackendsNoPathsTestImpl);
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendsAllInvalid, CreateDynamicBackendsAllInvalidTestImpl);
@@ -76,4 +77,4 @@
 ARMNN_SIMPLE_TEST_CASE(SampleDynamicBackendEndToEnd, SampleDynamicBackendEndToEndTestImpl);
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index a4f1613..53ff5ca 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -18,7 +18,11 @@
 #include <string>
 #include <memory>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
+
+#if defined(_MSC_VER)
+#include <Windows.h>
+#endif
 
 #if !defined(DYNAMIC_BACKEND_BUILD_DIR)
 #define DYNAMIC_BACKEND_BUILD_DIR fs::path("./")
@@ -106,50 +110,43 @@
     FactoryStorage m_TempStorage;
 };
 
+#if defined(_MSC_VER)
+std::string GetUnitTestExecutablePath()
+{
+    char buffer[MAX_PATH] = "";
+    GetModuleFileNameA(NULL, buffer, MAX_PATH);
+    fs::path executablePath(buffer);
+    return executablePath.parent_path();
+}
+
+#else
+std::string GetUnitTestExecutablePath()
+{
+    char buffer[PATH_MAX] = "";
+    if (readlink("/proc/self/exe", buffer, PATH_MAX) != -1)
+    {
+        fs::path executablePath(buffer);
+        return executablePath.parent_path();
+    }
+    return "";
+}
+#endif
+
 std::string GetBasePath(const std::string& basePath)
 {
     using namespace fs;
     // What we're looking for here is the location of the UnitTests executable.
-    // In the normal build environment there are a series of files and
-    // directories created by cmake. If the executable has been relocated they
-    // may not be there. The search hierarchy is:
-    // * User specified --dynamic-backend-build-dir
-    // * Compile time value of DYNAMIC_BACKEND_BUILD_DIR.
-    // * Arg0 location.
-    // * Fall back value of current directory.
-    path programLocation = DYNAMIC_BACKEND_BUILD_DIR;
-    // Look for the specific argument --dynamic-backend-build-dir?
-    if (boost::unit_test::framework::master_test_suite().argc == 3)
+    // Fall back value of current directory.
+    path programLocation = GetUnitTestExecutablePath();
+    if (!exists(programLocation))
     {
-        // Boost custom arguments begin after a '--' on the command line.
-        if (g_TestDirCLI.compare(boost::unit_test::framework::master_test_suite().argv[1]) == 0)
-        {
-            // Then the next argument is the path.
-            programLocation = boost::unit_test::framework::master_test_suite().argv[2];
-        }
+        programLocation = DYNAMIC_BACKEND_BUILD_DIR;
     }
-    else
-    {
-        // Start by checking if DYNAMIC_BACKEND_BUILD_DIR value exist.
-        if (!exists(programLocation))
-        {
-            // That doesn't exist try looking at arg[0].
-            path arg0Path(boost::unit_test::framework::master_test_suite().argv[0]);
-            arg0Path.remove_filename();
-            path arg0SharedObjectPath(arg0Path);
-            arg0SharedObjectPath.append(basePath);
-            if (exists(arg0SharedObjectPath))
-            {
-                // Yeah arg0 worked.
-                programLocation = arg0Path;
-            }
-        }
-    }
+
     // This is the base path from the build where the test libraries were built.
     path sharedObjectPath = programLocation.append(basePath);
-    BOOST_REQUIRE_MESSAGE(exists(sharedObjectPath), "Base path for shared objects does not exist: " +
-                          sharedObjectPath.string() + "\nTo specify the root of this base path on the " +
-                          "command line add: \'-- --dynamic-backend-build-dir <path>\'");
+    REQUIRE_MESSAGE(exists(sharedObjectPath),
+                    "Base path for shared objects does not exist: " + sharedObjectPath.string());
     return sharedObjectPath.string();
 }
 
@@ -192,7 +189,7 @@
 
     path directoryPath(directory);
     path fileNamePath = directoryPath.append(fileName);
-    BOOST_CHECK(exists(fileNamePath));
+    CHECK(exists(fileNamePath));
 
     return fileNamePath.string();
 }
@@ -205,8 +202,8 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -224,8 +221,8 @@
     using namespace armnn;
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(""), RuntimeException);
-    BOOST_TEST((sharedObjectHandle == nullptr));
+    CHECK_THROWS_AS(sharedObjectHandle = DynamicBackendUtils::OpenHandle(""), RuntimeException);
+    CHECK((sharedObjectHandle == nullptr));
 }
 
 void OpenNotExistingFileTestImpl()
@@ -233,8 +230,8 @@
     using namespace armnn;
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle("NotExistingFileName"), RuntimeException);
-    BOOST_TEST((sharedObjectHandle == nullptr));
+    CHECK_THROWS_AS(sharedObjectHandle = DynamicBackendUtils::OpenHandle("NotExistingFileName"), RuntimeException);
+    CHECK((sharedObjectHandle == nullptr));
 }
 
 void OpenNotSharedObjectTestImpl()
@@ -245,8 +242,8 @@
     std::string notSharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestNoSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(notSharedObjectFilePath), RuntimeException);
-    BOOST_TEST((sharedObjectHandle == nullptr));
+    CHECK_THROWS_AS(sharedObjectHandle = DynamicBackendUtils::OpenHandle(notSharedObjectFilePath), RuntimeException);
+    CHECK((sharedObjectHandle == nullptr));
 }
 
 void GetValidEntryPointTestImpl()
@@ -257,15 +254,15 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     using TestFunctionType = int(*)(int);
     TestFunctionType testFunctionPointer = nullptr;
-    BOOST_CHECK_NO_THROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
+    CHECK_NOTHROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
                                                                                                     "TestFunction1"));
-    BOOST_TEST((testFunctionPointer != nullptr));
-    BOOST_TEST(testFunctionPointer(7) == 7);
+    CHECK((testFunctionPointer != nullptr));
+    CHECK(testFunctionPointer(7) == 7);
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -278,15 +275,15 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     using TestFunctionType = int(*)(int);
     TestFunctionType testFunctionPointer = nullptr;
-    BOOST_CHECK_THROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
+    CHECK_THROWS_AS(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
                                                                                                  "TestFunction2"),
                       RuntimeException);
-    BOOST_TEST((testFunctionPointer == nullptr));
+    CHECK((testFunctionPointer == nullptr));
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -299,15 +296,15 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     using TestFunctionType = int(*)(int);
     TestFunctionType testFunctionPointer = nullptr;
-    BOOST_CHECK_THROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
+    CHECK_THROWS_AS(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
                                                                                                  "TestFunction3"),
                       RuntimeException);
-    BOOST_TEST((testFunctionPointer == nullptr));
+    CHECK((testFunctionPointer == nullptr));
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -320,15 +317,15 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     using TestFunctionType = int(*)(int);
     TestFunctionType testFunctionPointer = nullptr;
-    BOOST_CHECK_THROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
+    CHECK_THROWS_AS(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
                                                                                                  "TestFunction4"),
                       RuntimeException);
-    BOOST_TEST((testFunctionPointer == nullptr));
+    CHECK((testFunctionPointer == nullptr));
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -342,36 +339,36 @@
 
     // Same backend and backend API versions are compatible with the backend API
     BackendVersion sameBackendVersion{ 2, 4 };
-    BOOST_TEST(sameBackendVersion == backendApiVersion);
-    BOOST_TEST(sameBackendVersion <= backendApiVersion);
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, sameBackendVersion) == true);
+    CHECK(sameBackendVersion == backendApiVersion);
+    CHECK(sameBackendVersion <= backendApiVersion);
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, sameBackendVersion) == true);
 
     // Backend versions that differ from the backend API version by major revision are not compatible
     // with the backend API
     BackendVersion laterMajorBackendVersion{ 3, 4 };
-    BOOST_TEST(!(laterMajorBackendVersion == backendApiVersion));
-    BOOST_TEST(!(laterMajorBackendVersion <= backendApiVersion));
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, laterMajorBackendVersion) == false);
+    CHECK(!(laterMajorBackendVersion == backendApiVersion));
+    CHECK(!(laterMajorBackendVersion <= backendApiVersion));
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, laterMajorBackendVersion) == false);
 
     BackendVersion earlierMajorBackendVersion{ 1, 4 };
-    BOOST_TEST(!(earlierMajorBackendVersion == backendApiVersion));
-    BOOST_TEST(earlierMajorBackendVersion <= backendApiVersion);
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion,
+    CHECK(!(earlierMajorBackendVersion == backendApiVersion));
+    CHECK(earlierMajorBackendVersion <= backendApiVersion);
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion,
                                                                 earlierMajorBackendVersion) == false);
 
     // Backend versions with the same major revision but later minor revision than
     // the backend API version are not compatible with the backend API
     BackendVersion laterMinorBackendVersion{ 2, 5 };
-    BOOST_TEST(!(laterMinorBackendVersion == backendApiVersion));
-    BOOST_TEST(!(laterMinorBackendVersion <= backendApiVersion));
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, laterMinorBackendVersion) == false);
+    CHECK(!(laterMinorBackendVersion == backendApiVersion));
+    CHECK(!(laterMinorBackendVersion <= backendApiVersion));
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, laterMinorBackendVersion) == false);
 
     // Backend versions with the same major revision but earlier minor revision than
     // the backend API version are compatible with the backend API
     BackendVersion earlierMinorBackendVersion{ 2, 3 };
-    BOOST_TEST(!(earlierMinorBackendVersion == backendApiVersion));
-    BOOST_TEST(earlierMinorBackendVersion <= backendApiVersion);
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, earlierMinorBackendVersion) == true);
+    CHECK(!(earlierMinorBackendVersion == backendApiVersion));
+    CHECK(earlierMinorBackendVersion <= backendApiVersion);
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, earlierMinorBackendVersion) == true);
 }
 
 #if defined(ARMNNREF_ENABLED)
@@ -387,41 +384,41 @@
     std::string testSubDirectory = GetTestSubDirectory(g_TestDynamicBackendSubDir);
 
     // We expect this path to exists so we can load a valid dynamic backend.
-    BOOST_CHECK_MESSAGE(fs::exists(testSubDirectory),
+    CHECK_MESSAGE(fs::exists(testSubDirectory),
                        "Base path for shared objects does not exist: " + testSubDirectory);
 
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestValidTestDynamicBackendFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_NO_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)));
-    BOOST_TEST((dynamicBackend != nullptr));
+    CHECK_NOTHROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)));
+    CHECK((dynamicBackend != nullptr));
 
     BackendId dynamicBackendId;
-    BOOST_CHECK_NO_THROW(dynamicBackendId = dynamicBackend->GetBackendId());
-    BOOST_TEST((dynamicBackendId == "ValidTestDynamicBackend"));
+    CHECK_NOTHROW(dynamicBackendId = dynamicBackend->GetBackendId());
+    CHECK((dynamicBackendId == "ValidTestDynamicBackend"));
 
     BackendVersion dynamicBackendVersion;
-    BOOST_CHECK_NO_THROW(dynamicBackendVersion = dynamicBackend->GetBackendVersion());
-    BOOST_TEST((dynamicBackendVersion == IBackendInternal::GetApiVersion()));
+    CHECK_NOTHROW(dynamicBackendVersion = dynamicBackend->GetBackendVersion());
+    CHECK((dynamicBackendVersion == IBackendInternal::GetApiVersion()));
 
     IBackendInternalUniquePtr dynamicBackendInstance1;
-    BOOST_CHECK_NO_THROW(dynamicBackendInstance1 = dynamicBackend->GetBackend());
-    BOOST_TEST((dynamicBackendInstance1 != nullptr));
+    CHECK_NOTHROW(dynamicBackendInstance1 = dynamicBackend->GetBackend());
+    CHECK((dynamicBackendInstance1 != nullptr));
 
     BackendRegistry::FactoryFunction dynamicBackendFactoryFunction = nullptr;
-    BOOST_CHECK_NO_THROW(dynamicBackendFactoryFunction = dynamicBackend->GetFactoryFunction());
-    BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+    CHECK_NOTHROW(dynamicBackendFactoryFunction = dynamicBackend->GetFactoryFunction());
+    CHECK((dynamicBackendFactoryFunction != nullptr));
 
     IBackendInternalUniquePtr dynamicBackendInstance2;
-    BOOST_CHECK_NO_THROW(dynamicBackendInstance2 = dynamicBackendFactoryFunction());
-    BOOST_TEST((dynamicBackendInstance2 != nullptr));
+    CHECK_NOTHROW(dynamicBackendInstance2 = dynamicBackendFactoryFunction());
+    CHECK((dynamicBackendInstance2 != nullptr));
 
-    BOOST_TEST((dynamicBackendInstance1->GetId() == "ValidTestDynamicBackend"));
-    BOOST_TEST((dynamicBackendInstance2->GetId() == "ValidTestDynamicBackend"));
+    CHECK((dynamicBackendInstance1->GetId() == "ValidTestDynamicBackend"));
+    CHECK((dynamicBackendInstance2->GetId() == "ValidTestDynamicBackend"));
 }
 #endif
 
@@ -433,8 +430,8 @@
 
     void* sharedObjectHandle = nullptr;
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), InvalidArgumentException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), InvalidArgumentException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface1TestImpl()
@@ -448,12 +445,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend1FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface2TestImpl()
@@ -468,12 +465,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend2FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface3TestImpl()
@@ -488,12 +485,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend3FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface4TestImpl()
@@ -508,12 +505,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend4FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface5TestImpl()
@@ -529,12 +526,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend5FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface6TestImpl()
@@ -550,32 +547,32 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend6FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_NO_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)));
-    BOOST_TEST((dynamicBackend != nullptr));
+    CHECK_NOTHROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)));
+    CHECK((dynamicBackend != nullptr));
 
     BackendId dynamicBackendId;
-    BOOST_CHECK_NO_THROW(dynamicBackendId = dynamicBackend->GetBackendId());
-    BOOST_TEST((dynamicBackendId == "InvalidTestDynamicBackend"));
+    CHECK_NOTHROW(dynamicBackendId = dynamicBackend->GetBackendId());
+    CHECK((dynamicBackendId == "InvalidTestDynamicBackend"));
 
     BackendVersion dynamicBackendVersion;
-    BOOST_CHECK_NO_THROW(dynamicBackendVersion = dynamicBackend->GetBackendVersion());
-    BOOST_TEST((dynamicBackendVersion == BackendVersion({ 1, 0 })));
+    CHECK_NOTHROW(dynamicBackendVersion = dynamicBackend->GetBackendVersion());
+    CHECK((dynamicBackendVersion == BackendVersion({ 1, 0 })));
 
     IBackendInternalUniquePtr dynamicBackendInstance1;
-    BOOST_CHECK_THROW(dynamicBackendInstance1 = dynamicBackend->GetBackend(), RuntimeException);
-    BOOST_TEST((dynamicBackendInstance1 == nullptr));
+    CHECK_THROWS_AS(dynamicBackendInstance1 = dynamicBackend->GetBackend(), RuntimeException);
+    CHECK((dynamicBackendInstance1 == nullptr));
 
     BackendRegistry::FactoryFunction dynamicBackendFactoryFunction = nullptr;
-    BOOST_CHECK_NO_THROW(dynamicBackendFactoryFunction = dynamicBackend->GetFactoryFunction());
-    BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+    CHECK_NOTHROW(dynamicBackendFactoryFunction = dynamicBackend->GetFactoryFunction());
+    CHECK((dynamicBackendFactoryFunction != nullptr));
 
     IBackendInternalUniquePtr dynamicBackendInstance2;
-    BOOST_CHECK_THROW(dynamicBackendInstance2 = dynamicBackendFactoryFunction(), RuntimeException);
-    BOOST_TEST((dynamicBackendInstance2 == nullptr));
+    CHECK_THROWS_AS(dynamicBackendInstance2 = dynamicBackendFactoryFunction(), RuntimeException);
+    CHECK((dynamicBackendInstance2 == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface7TestImpl()
@@ -591,12 +588,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend7FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void GetBackendPathsTestImpl()
@@ -616,67 +613,67 @@
     std::string subDir3 = GetTestSubDirectory(g_TestDynamicBackendsSubDir3);
     std::string subDir4 = GetTestSubDirectory(g_TestDynamicBackendsSubDir4);
 
-    BOOST_CHECK(exists(subDir1));
-    BOOST_CHECK(exists(subDir2));
-    BOOST_CHECK(exists(subDir3));
-    BOOST_CHECK(!exists(subDir4));
+    CHECK(exists(subDir1));
+    CHECK(exists(subDir2));
+    CHECK(exists(subDir3));
+    CHECK(!exists(subDir4));
 
     // No path
-    BOOST_TEST(TestDynamicBackendUtils::GetBackendPathsImplTest("").empty());
+    CHECK(TestDynamicBackendUtils::GetBackendPathsImplTest("").empty());
 
     // Malformed path
     std::string malformedDir(subDir1 + "/" + subDir1);
-    BOOST_TEST(TestDynamicBackendUtils::GetBackendPathsImplTest(malformedDir).size()==0);
+    CHECK(TestDynamicBackendUtils::GetBackendPathsImplTest(malformedDir).size()==0);
 
     // Single valid path
     std::vector<std::string> DynamicBackendPaths2 = TestDynamicBackendUtils::GetBackendPathsImplTest(subDir1);
-    BOOST_TEST(DynamicBackendPaths2.size() == 1);
-    BOOST_TEST(DynamicBackendPaths2[0] == subDir1);
+    CHECK(DynamicBackendPaths2.size() == 1);
+    CHECK(DynamicBackendPaths2[0] == subDir1);
 
     // Multiple equal and valid paths
     std::string multipleEqualDirs(subDir1 + ":" + subDir1);
     std::vector<std::string> DynamicBackendPaths3 = TestDynamicBackendUtils::GetBackendPathsImplTest(multipleEqualDirs);
-    BOOST_TEST(DynamicBackendPaths3.size() == 1);
-    BOOST_TEST(DynamicBackendPaths3[0] == subDir1);
+    CHECK(DynamicBackendPaths3.size() == 1);
+    CHECK(DynamicBackendPaths3[0] == subDir1);
 
     // Multiple empty paths
-    BOOST_TEST(TestDynamicBackendUtils::GetBackendPathsImplTest(":::").empty());
+    CHECK(TestDynamicBackendUtils::GetBackendPathsImplTest(":::").empty());
 
     // Multiple valid paths
     std::string multipleValidPaths(subDir1 + ":" + subDir2 + ":" + subDir3);
     std::vector<std::string> DynamicBackendPaths5 =
         TestDynamicBackendUtils::GetBackendPathsImplTest(multipleValidPaths);
-    BOOST_TEST(DynamicBackendPaths5.size() == 3);
-    BOOST_TEST(DynamicBackendPaths5[0] == subDir1);
-    BOOST_TEST(DynamicBackendPaths5[1] == subDir2);
-    BOOST_TEST(DynamicBackendPaths5[2] == subDir3);
+    CHECK(DynamicBackendPaths5.size() == 3);
+    CHECK(DynamicBackendPaths5[0] == subDir1);
+    CHECK(DynamicBackendPaths5[1] == subDir2);
+    CHECK(DynamicBackendPaths5[2] == subDir3);
 
     // Valid among empty paths
     std::string validAmongEmptyDirs("::" + subDir1 + ":");
     std::vector<std::string> DynamicBackendPaths6 =
         TestDynamicBackendUtils::GetBackendPathsImplTest(validAmongEmptyDirs);
-    BOOST_TEST(DynamicBackendPaths6.size() == 1);
-    BOOST_TEST(DynamicBackendPaths6[0] == subDir1);
+    CHECK(DynamicBackendPaths6.size() == 1);
+    CHECK(DynamicBackendPaths6[0] == subDir1);
 
     // Invalid among empty paths
     std::string invalidAmongEmptyDirs(":" + subDir4 + "::");
-    BOOST_TEST(TestDynamicBackendUtils::GetBackendPathsImplTest(invalidAmongEmptyDirs).empty());
+    CHECK(TestDynamicBackendUtils::GetBackendPathsImplTest(invalidAmongEmptyDirs).empty());
 
     // Valid, invalid and empty paths
     std::string validInvalidEmptyDirs(subDir1 + ":" + subDir4 + ":");
     std::vector<std::string> DynamicBackendPaths8 =
         TestDynamicBackendUtils::GetBackendPathsImplTest(validInvalidEmptyDirs);
-    BOOST_TEST(DynamicBackendPaths8.size() == 1);
-    BOOST_TEST(DynamicBackendPaths8[0] == subDir1);
+    CHECK(DynamicBackendPaths8.size() == 1);
+    CHECK(DynamicBackendPaths8[0] == subDir1);
 
     // Mix of duplicates of valid, invalid and empty paths
     std::string duplicateValidInvalidEmptyDirs(validInvalidEmptyDirs + ":" + validInvalidEmptyDirs + ":" +
                                                subDir2 + ":" + subDir2);
     std::vector<std::string> DynamicBackendPaths9 =
         TestDynamicBackendUtils::GetBackendPathsImplTest(duplicateValidInvalidEmptyDirs);
-    BOOST_TEST(DynamicBackendPaths9.size() == 2);
-    BOOST_TEST(DynamicBackendPaths9[0] == subDir1);
-    BOOST_TEST(DynamicBackendPaths9[1] == subDir2);
+    CHECK(DynamicBackendPaths9.size() == 2);
+    CHECK(DynamicBackendPaths9[0] == subDir1);
+    CHECK(DynamicBackendPaths9[1] == subDir2);
 }
 
 void GetBackendPathsOverrideTestImpl()
@@ -687,17 +684,17 @@
     std::string subDir1 = GetTestSubDirectory(g_TestDynamicBackendsSubDir1);
     std::string subDir4 = GetTestSubDirectory(g_TestDynamicBackendsSubDir4);
 
-    BOOST_CHECK(exists(subDir1));
-    BOOST_CHECK(!exists(subDir4));
+    CHECK(exists(subDir1));
+    CHECK(!exists(subDir4));
 
     // Override with valid path
     std::vector<std::string> validResult = DynamicBackendUtils::GetBackendPaths(subDir1);
-    BOOST_TEST(validResult.size() == 1);
-    BOOST_TEST(validResult[0] == subDir1);
+    CHECK(validResult.size() == 1);
+    CHECK(validResult[0] == subDir1);
 
     // Override with invalid path
     std::vector<std::string> invalidResult = DynamicBackendUtils::GetBackendPaths(subDir4);
-    BOOST_TEST(invalidResult.empty());
+    CHECK(invalidResult.empty());
 }
 
 void GetSharedObjectsTestImpl()
@@ -753,10 +750,10 @@
     std::string testDynamicBackendsSubDir2 = GetTestSubDirectory(g_TestDynamicBackendsSubDir2);
     std::string testDynamicBackendsSubDir3 = GetTestSubDirectory(g_TestDynamicBackendsSubDir3);
     std::string testDynamicBackendsSubDir4 = GetTestSubDirectory(g_TestDynamicBackendsSubDir4);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir1));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir2));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir3));
-    BOOST_CHECK(!exists(testDynamicBackendsSubDir4));
+    CHECK(exists(testDynamicBackendsSubDir1));
+    CHECK(exists(testDynamicBackendsSubDir2));
+    CHECK(exists(testDynamicBackendsSubDir3));
+    CHECK(!exists(testDynamicBackendsSubDir4));
 
     std::vector<std::string> backendPaths
     {
@@ -779,16 +776,16 @@
         path(testDynamicBackendsSubDir2 + "Arm_GpuAcc_backend.so")          // Duplicates on different paths are allowed
     };
 
-    BOOST_TEST(sharedObjects.size() == expectedSharedObjects.size());
-    BOOST_TEST(fs::equivalent(path(sharedObjects[0]), expectedSharedObjects[0]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[1]), expectedSharedObjects[1]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[2]), expectedSharedObjects[2]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[3]), expectedSharedObjects[3]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[4]), expectedSharedObjects[4]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[5]), expectedSharedObjects[5]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[6]), expectedSharedObjects[6]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[7]), expectedSharedObjects[7]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[8]), expectedSharedObjects[8]));
+    CHECK(sharedObjects.size() == expectedSharedObjects.size());
+    CHECK(fs::equivalent(path(sharedObjects[0]), expectedSharedObjects[0]));
+    CHECK(fs::equivalent(path(sharedObjects[1]), expectedSharedObjects[1]));
+    CHECK(fs::equivalent(path(sharedObjects[2]), expectedSharedObjects[2]));
+    CHECK(fs::equivalent(path(sharedObjects[3]), expectedSharedObjects[3]));
+    CHECK(fs::equivalent(path(sharedObjects[4]), expectedSharedObjects[4]));
+    CHECK(fs::equivalent(path(sharedObjects[5]), expectedSharedObjects[5]));
+    CHECK(fs::equivalent(path(sharedObjects[6]), expectedSharedObjects[6]));
+    CHECK(fs::equivalent(path(sharedObjects[7]), expectedSharedObjects[7]));
+    CHECK(fs::equivalent(path(sharedObjects[8]), expectedSharedObjects[8]));
 }
 
 void CreateDynamicBackendsTestImpl()
@@ -824,10 +821,10 @@
     std::string testDynamicBackendsSubDir6 = GetTestSubDirectory(g_TestDynamicBackendsSubDir6);
     std::string testDynamicBackendsSubDir7 = GetTestSubDirectory(g_TestDynamicBackendsSubDir7);
     std::string testDynamicBackendsSubDir8 = GetTestSubDirectory(g_TestDynamicBackendsSubDir8);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir7));
-    BOOST_CHECK(!exists(testDynamicBackendsSubDir8));
+    CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir7));
+    CHECK(!exists(testDynamicBackendsSubDir8));
 
     std::vector<std::string> backendPaths
     {
@@ -839,19 +836,19 @@
     std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
     std::vector<DynamicBackendPtr> dynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 5);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[1] != nullptr));
-    BOOST_TEST((dynamicBackends[2] != nullptr));
-    BOOST_TEST((dynamicBackends[3] != nullptr));
-    BOOST_TEST((dynamicBackends[4] != nullptr));
+    CHECK(dynamicBackends.size() == 5);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[1] != nullptr));
+    CHECK((dynamicBackends[2] != nullptr));
+    CHECK((dynamicBackends[3] != nullptr));
+    CHECK((dynamicBackends[4] != nullptr));
 
     // Duplicates are allowed here, they will be skipped later during the backend registration
-    BOOST_TEST((dynamicBackends[0]->GetBackendId() == "TestValid2"));
-    BOOST_TEST((dynamicBackends[1]->GetBackendId() == "TestValid3"));
-    BOOST_TEST((dynamicBackends[2]->GetBackendId() == "TestValid2")); // From duplicate Arm_TestValid2_backend.so
-    BOOST_TEST((dynamicBackends[3]->GetBackendId() == "TestValid2")); // From Arm_TestValid4_backend.so
-    BOOST_TEST((dynamicBackends[4]->GetBackendId() == "TestValid5"));
+    CHECK((dynamicBackends[0]->GetBackendId() == "TestValid2"));
+    CHECK((dynamicBackends[1]->GetBackendId() == "TestValid3"));
+    CHECK((dynamicBackends[2]->GetBackendId() == "TestValid2")); // From duplicate Arm_TestValid2_backend.so
+    CHECK((dynamicBackends[3]->GetBackendId() == "TestValid2")); // From Arm_TestValid4_backend.so
+    CHECK((dynamicBackends[4]->GetBackendId() == "TestValid5"));
 }
 
 void CreateDynamicBackendsNoPathsTestImpl()
@@ -860,7 +857,7 @@
 
     std::vector<DynamicBackendPtr> dynamicBackends = DynamicBackendUtils::CreateDynamicBackends({});
 
-    BOOST_TEST(dynamicBackends.empty());
+    CHECK(dynamicBackends.empty());
 }
 
 void CreateDynamicBackendsAllInvalidTestImpl()
@@ -875,7 +872,7 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.empty());
+    CHECK(dynamicBackends.empty());
 }
 
 void CreateDynamicBackendsMixedTypesTestImpl()
@@ -885,8 +882,8 @@
 
     std::string testDynamicBackendsSubDir5 = GetTestSubDirectory(g_TestDynamicBackendsSubDir5);
     std::string testDynamicBackendsSubDir6 = GetTestSubDirectory(g_TestDynamicBackendsSubDir6);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir6));
 
     std::string testValidBackend2FilePath = GetTestFilePath(testDynamicBackendsSubDir5,
                                                             g_TestValidBackend2FileName);
@@ -894,9 +891,9 @@
                                                               g_TestInvalidBackend8FileName);
     std::string testInvalidBackend9FilePath = GetTestFilePath(testDynamicBackendsSubDir6,
                                                               g_TestInvalidBackend9FileName);
-    BOOST_CHECK(exists(testValidBackend2FilePath));
-    BOOST_CHECK(exists(testInvalidBackend8FilePath));
-    BOOST_CHECK(exists(testInvalidBackend9FilePath));
+    CHECK(exists(testValidBackend2FilePath));
+    CHECK(exists(testInvalidBackend8FilePath));
+    CHECK(exists(testInvalidBackend9FilePath));
 
     std::vector<std::string> sharedObjects
     {
@@ -907,9 +904,9 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 1);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[0]->GetBackendId() == "TestValid2"));
+    CHECK(dynamicBackends.size() == 1);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[0]->GetBackendId() == "TestValid2"));
 }
 
 #if defined(ARMNNREF_ENABLED)
@@ -922,42 +919,42 @@
 
     // Dummy registry used for testing
     BackendRegistry backendRegistry;
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     std::string testDynamicBackendsSubDir5 = GetTestSubDirectory(g_TestDynamicBackendsSubDir5);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir5));
 
     std::string testValidBackend2FilePath = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend2FileName);
-    BOOST_CHECK(exists(testValidBackend2FilePath));
+    CHECK(exists(testValidBackend2FilePath));
 
     std::vector<std::string> sharedObjects{ testValidBackend2FilePath };
     std::vector<DynamicBackendPtr> dynamicBackends = TestDynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 1);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
+    CHECK(dynamicBackends.size() == 1);
+    CHECK((dynamicBackends[0] != nullptr));
 
     BackendId dynamicBackendId = dynamicBackends[0]->GetBackendId();
-    BOOST_TEST((dynamicBackendId == "TestValid2"));
+    CHECK((dynamicBackendId == "TestValid2"));
 
     BackendVersion dynamicBackendVersion = dynamicBackends[0]->GetBackendVersion();
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
+    CHECK(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
 
     BackendIdSet registeredBackendIds = TestDynamicBackendUtils::RegisterDynamicBackendsImplTest(backendRegistry,
                                                                                                  dynamicBackends);
-    BOOST_TEST(backendRegistry.Size() == 1);
-    BOOST_TEST(registeredBackendIds.size() == 1);
+    CHECK(backendRegistry.Size() == 1);
+    CHECK(registeredBackendIds.size() == 1);
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
-    BOOST_TEST(backendIds.size() == 1);
-    BOOST_TEST((backendIds.find(dynamicBackendId) != backendIds.end()));
-    BOOST_TEST((registeredBackendIds.find(dynamicBackendId) != registeredBackendIds.end()));
+    CHECK(backendIds.size() == 1);
+    CHECK((backendIds.find(dynamicBackendId) != backendIds.end()));
+    CHECK((registeredBackendIds.find(dynamicBackendId) != registeredBackendIds.end()));
 
     auto dynamicBackendFactoryFunction = backendRegistry.GetFactory(dynamicBackendId);
-    BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+    CHECK((dynamicBackendFactoryFunction != nullptr));
 
     IBackendInternalUniquePtr dynamicBackend = dynamicBackendFactoryFunction();
-    BOOST_TEST((dynamicBackend != nullptr));
-    BOOST_TEST((dynamicBackend->GetId() == dynamicBackendId));
+    CHECK((dynamicBackend != nullptr));
+    CHECK((dynamicBackend->GetId() == dynamicBackendId));
 }
 
 void RegisterMultipleDynamicBackendsTestImpl()
@@ -969,15 +966,15 @@
 
     std::string testDynamicBackendsSubDir5 = GetTestSubDirectory(g_TestDynamicBackendsSubDir5);
     std::string testDynamicBackendsSubDir6 = GetTestSubDirectory(g_TestDynamicBackendsSubDir6);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir6));
 
     std::string testValidBackend2FilePath = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend2FileName);
     std::string testValidBackend3FilePath = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend3FileName);
     std::string testValidBackend5FilePath = GetTestFilePath(testDynamicBackendsSubDir6, g_TestValidBackend5FileName);
-    BOOST_CHECK(exists(testValidBackend2FilePath));
-    BOOST_CHECK(exists(testValidBackend3FilePath));
-    BOOST_CHECK(exists(testValidBackend5FilePath));
+    CHECK(exists(testValidBackend2FilePath));
+    CHECK(exists(testValidBackend3FilePath));
+    CHECK(exists(testValidBackend5FilePath));
 
     std::vector<std::string> sharedObjects
     {
@@ -987,52 +984,52 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = TestDynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 3);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[1] != nullptr));
-    BOOST_TEST((dynamicBackends[2] != nullptr));
+    CHECK(dynamicBackends.size() == 3);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[1] != nullptr));
+    CHECK((dynamicBackends[2] != nullptr));
 
     BackendId dynamicBackendId1 = dynamicBackends[0]->GetBackendId();
     BackendId dynamicBackendId2 = dynamicBackends[1]->GetBackendId();
     BackendId dynamicBackendId3 = dynamicBackends[2]->GetBackendId();
-    BOOST_TEST((dynamicBackendId1 == "TestValid2"));
-    BOOST_TEST((dynamicBackendId2 == "TestValid3"));
-    BOOST_TEST((dynamicBackendId3 == "TestValid5"));
+    CHECK((dynamicBackendId1 == "TestValid2"));
+    CHECK((dynamicBackendId2 == "TestValid3"));
+    CHECK((dynamicBackendId3 == "TestValid5"));
 
     for (size_t i = 0; i < dynamicBackends.size(); i++)
     {
         BackendVersion dynamicBackendVersion = dynamicBackends[i]->GetBackendVersion();
-        BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
+        CHECK(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
     }
 
     // Dummy registry used for testing
     BackendRegistry backendRegistry;
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     BackendIdSet registeredBackendIds = TestDynamicBackendUtils::RegisterDynamicBackendsImplTest(backendRegistry,
                                                                                                  dynamicBackends);
-    BOOST_TEST(backendRegistry.Size() == 3);
-    BOOST_TEST(registeredBackendIds.size() == 3);
+    CHECK(backendRegistry.Size() == 3);
+    CHECK(registeredBackendIds.size() == 3);
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
-    BOOST_TEST(backendIds.size() == 3);
-    BOOST_TEST((backendIds.find(dynamicBackendId1) != backendIds.end()));
-    BOOST_TEST((backendIds.find(dynamicBackendId2) != backendIds.end()));
-    BOOST_TEST((backendIds.find(dynamicBackendId3) != backendIds.end()));
-    BOOST_TEST((registeredBackendIds.find(dynamicBackendId1) != registeredBackendIds.end()));
-    BOOST_TEST((registeredBackendIds.find(dynamicBackendId2) != registeredBackendIds.end()));
-    BOOST_TEST((registeredBackendIds.find(dynamicBackendId3) != registeredBackendIds.end()));
+    CHECK(backendIds.size() == 3);
+    CHECK((backendIds.find(dynamicBackendId1) != backendIds.end()));
+    CHECK((backendIds.find(dynamicBackendId2) != backendIds.end()));
+    CHECK((backendIds.find(dynamicBackendId3) != backendIds.end()));
+    CHECK((registeredBackendIds.find(dynamicBackendId1) != registeredBackendIds.end()));
+    CHECK((registeredBackendIds.find(dynamicBackendId2) != registeredBackendIds.end()));
+    CHECK((registeredBackendIds.find(dynamicBackendId3) != registeredBackendIds.end()));
 
     for (size_t i = 0; i < dynamicBackends.size(); i++)
     {
         BackendId dynamicBackendId = dynamicBackends[i]->GetBackendId();
 
         auto dynamicBackendFactoryFunction = backendRegistry.GetFactory(dynamicBackendId);
-        BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+        CHECK((dynamicBackendFactoryFunction != nullptr));
 
         IBackendInternalUniquePtr dynamicBackend = dynamicBackendFactoryFunction();
-        BOOST_TEST((dynamicBackend != nullptr));
-        BOOST_TEST((dynamicBackend->GetId() == dynamicBackendId));
+        CHECK((dynamicBackend != nullptr));
+        CHECK((dynamicBackend->GetId() == dynamicBackendId));
     }
 }
 
@@ -1076,11 +1073,11 @@
     std::string testDynamicBackendsSubDir7 = GetTestSubDirectory(g_TestDynamicBackendsSubDir7);
     std::string testDynamicBackendsSubDir8 = GetTestSubDirectory(g_TestDynamicBackendsSubDir8);
     std::string testDynamicBackendsSubDir9 = GetTestSubDirectory(g_TestDynamicBackendsSubDir9);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir7));
-    BOOST_CHECK(!exists(testDynamicBackendsSubDir8));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir9));
+    CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir7));
+    CHECK(!exists(testDynamicBackendsSubDir8));
+    CHECK(exists(testDynamicBackendsSubDir9));
 
     std::string testValidBackend2FilePath    = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend2FileName);
     std::string testValidBackend3FilePath    = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend3FileName);
@@ -1095,15 +1092,15 @@
                                                                g_TestInvalidBackend10FileName);
     std::string testInvalidBackend11FilePath = GetTestFilePath(testDynamicBackendsSubDir9,
                                                                g_TestInvalidBackend11FileName);
-    BOOST_CHECK(exists(testValidBackend2FilePath));
-    BOOST_CHECK(exists(testValidBackend3FilePath));
-    BOOST_CHECK(exists(testValidBackend2DupFilePath));
-    BOOST_CHECK(exists(testValidBackend4FilePath));
-    BOOST_CHECK(exists(testValidBackend5FilePath));
-    BOOST_CHECK(exists(testInvalidBackend8FilePath));
-    BOOST_CHECK(exists(testInvalidBackend9FilePath));
-    BOOST_CHECK(exists(testInvalidBackend10FilePath));
-    BOOST_CHECK(exists(testInvalidBackend11FilePath));
+    CHECK(exists(testValidBackend2FilePath));
+    CHECK(exists(testValidBackend3FilePath));
+    CHECK(exists(testValidBackend2DupFilePath));
+    CHECK(exists(testValidBackend4FilePath));
+    CHECK(exists(testValidBackend5FilePath));
+    CHECK(exists(testInvalidBackend8FilePath));
+    CHECK(exists(testInvalidBackend9FilePath));
+    CHECK(exists(testInvalidBackend10FilePath));
+    CHECK(exists(testInvalidBackend11FilePath));
 
     std::vector<std::string> sharedObjects
     {
@@ -1120,14 +1117,14 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = TestDynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 7);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[1] != nullptr));
-    BOOST_TEST((dynamicBackends[2] != nullptr));
-    BOOST_TEST((dynamicBackends[3] != nullptr));
-    BOOST_TEST((dynamicBackends[4] != nullptr));
-    BOOST_TEST((dynamicBackends[5] != nullptr));
-    BOOST_TEST((dynamicBackends[6] != nullptr));
+    CHECK(dynamicBackends.size() == 7);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[1] != nullptr));
+    CHECK((dynamicBackends[2] != nullptr));
+    CHECK((dynamicBackends[3] != nullptr));
+    CHECK((dynamicBackends[4] != nullptr));
+    CHECK((dynamicBackends[5] != nullptr));
+    CHECK((dynamicBackends[6] != nullptr));
 
     BackendId dynamicBackendId1 = dynamicBackends[0]->GetBackendId();
     BackendId dynamicBackendId2 = dynamicBackends[1]->GetBackendId();
@@ -1136,23 +1133,23 @@
     BackendId dynamicBackendId5 = dynamicBackends[4]->GetBackendId();
     BackendId dynamicBackendId6 = dynamicBackends[5]->GetBackendId();
     BackendId dynamicBackendId7 = dynamicBackends[6]->GetBackendId();
-    BOOST_TEST((dynamicBackendId1 == "TestValid2"));
-    BOOST_TEST((dynamicBackendId2 == "TestValid3"));
-    BOOST_TEST((dynamicBackendId3 == "TestValid2")); // From duplicate Arm_TestValid2_backend.so
-    BOOST_TEST((dynamicBackendId4 == "TestValid2")); // From Arm_TestValid4_backend.so
-    BOOST_TEST((dynamicBackendId5 == "TestValid5"));
-    BOOST_TEST((dynamicBackendId6 == ""));
-    BOOST_TEST((dynamicBackendId7 == "Unknown"));
+    CHECK((dynamicBackendId1 == "TestValid2"));
+    CHECK((dynamicBackendId2 == "TestValid3"));
+    CHECK((dynamicBackendId3 == "TestValid2")); // From duplicate Arm_TestValid2_backend.so
+    CHECK((dynamicBackendId4 == "TestValid2")); // From Arm_TestValid4_backend.so
+    CHECK((dynamicBackendId5 == "TestValid5"));
+    CHECK((dynamicBackendId6 == ""));
+    CHECK((dynamicBackendId7 == "Unknown"));
 
     for (size_t i = 0; i < dynamicBackends.size(); i++)
     {
         BackendVersion dynamicBackendVersion = dynamicBackends[i]->GetBackendVersion();
-        BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
+        CHECK(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
     }
 
     // Dummy registry used for testing
     BackendRegistry backendRegistry;
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     std::vector<BackendId> expectedRegisteredbackendIds
     {
@@ -1163,22 +1160,22 @@
 
     BackendIdSet registeredBackendIds = TestDynamicBackendUtils::RegisterDynamicBackendsImplTest(backendRegistry,
                                                                                                  dynamicBackends);
-    BOOST_TEST(backendRegistry.Size() == expectedRegisteredbackendIds.size());
-    BOOST_TEST(registeredBackendIds.size() == expectedRegisteredbackendIds.size());
+    CHECK(backendRegistry.Size() == expectedRegisteredbackendIds.size());
+    CHECK(registeredBackendIds.size() == expectedRegisteredbackendIds.size());
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
-    BOOST_TEST(backendIds.size() == expectedRegisteredbackendIds.size());
+    CHECK(backendIds.size() == expectedRegisteredbackendIds.size());
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
-        BOOST_TEST((registeredBackendIds.find(expectedRegisteredbackendId) != registeredBackendIds.end()));
+        CHECK((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
+        CHECK((registeredBackendIds.find(expectedRegisteredbackendId) != registeredBackendIds.end()));
 
         auto dynamicBackendFactoryFunction = backendRegistry.GetFactory(expectedRegisteredbackendId);
-        BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+        CHECK((dynamicBackendFactoryFunction != nullptr));
 
         IBackendInternalUniquePtr dynamicBackend = dynamicBackendFactoryFunction();
-        BOOST_TEST((dynamicBackend != nullptr));
-        BOOST_TEST((dynamicBackend->GetId() == expectedRegisteredbackendId));
+        CHECK((dynamicBackend != nullptr));
+        CHECK((dynamicBackend->GetId() == expectedRegisteredbackendId));
     }
 }
 #endif
@@ -1200,14 +1197,14 @@
     // Arm_TestInvalid11_backend.so -> not valid (invalid backend id)
 
     std::string testDynamicBackendsSubDir9 = GetTestSubDirectory(g_TestDynamicBackendsSubDir9);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir9));
+    CHECK(exists(testDynamicBackendsSubDir9));
 
     std::string testInvalidBackend10FilePath = GetTestFilePath(testDynamicBackendsSubDir9,
                                                                g_TestInvalidBackend10FileName);
     std::string testInvalidBackend11FilePath = GetTestFilePath(testDynamicBackendsSubDir9,
                                                                g_TestInvalidBackend11FileName);
-    BOOST_CHECK(exists(testInvalidBackend10FilePath));
-    BOOST_CHECK(exists(testInvalidBackend11FilePath));
+    CHECK(exists(testInvalidBackend10FilePath));
+    CHECK(exists(testInvalidBackend11FilePath));
 
     std::vector<std::string> sharedObjects
     {
@@ -1217,30 +1214,30 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = TestDynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 2);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[1] != nullptr));
+    CHECK(dynamicBackends.size() == 2);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[1] != nullptr));
 
     BackendId dynamicBackendId1 = dynamicBackends[0]->GetBackendId();
     BackendId dynamicBackendId2 = dynamicBackends[1]->GetBackendId();
-    BOOST_TEST((dynamicBackendId1 == ""));
-    BOOST_TEST((dynamicBackendId2 == "Unknown"));
+    CHECK((dynamicBackendId1 == ""));
+    CHECK((dynamicBackendId2 == "Unknown"));
 
     for (size_t i = 0; i < dynamicBackends.size(); i++)
     {
         BackendVersion dynamicBackendVersion = dynamicBackends[i]->GetBackendVersion();
-        BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
+        CHECK(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
     }
 
     // Dummy registry used for testing
     BackendRegistry backendRegistry;
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     // Check that no dynamic backend got registered
     BackendIdSet registeredBackendIds = TestDynamicBackendUtils::RegisterDynamicBackendsImplTest(backendRegistry,
                                                                                                  dynamicBackends);
-    BOOST_TEST(backendRegistry.Size() == 0);
-    BOOST_TEST(registeredBackendIds.empty());
+    CHECK(backendRegistry.Size() == 0);
+    CHECK(registeredBackendIds.empty());
 }
 
 #if !defined(ARMNN_DYNAMIC_BACKEND_ENABLED)
@@ -1253,16 +1250,16 @@
     TestBackendRegistry testBackendRegistry;
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     IRuntime::CreationOptions creationOptions;
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.empty());
+    CHECK(supportedBackendIds.empty());
 
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 }
 
 #endif
@@ -1277,7 +1274,7 @@
 
     // This directory contains valid and invalid backends
     std::string testDynamicBackendsSubDir5 = GetTestSubDirectory(g_TestDynamicBackendsSubDir5);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir5));
 
     // Using the path override in CreationOptions to load some test dynamic backends
     IRuntime::CreationOptions creationOptions;
@@ -1291,20 +1288,20 @@
     };
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == expectedRegisteredbackendIds.size());
+    CHECK(backendRegistry.Size() == expectedRegisteredbackendIds.size());
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
+        CHECK((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
     }
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.size() == expectedRegisteredbackendIds.size());
+    CHECK(supportedBackendIds.size() == expectedRegisteredbackendIds.size());
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((supportedBackendIds.find(expectedRegisteredbackendId) != supportedBackendIds.end()));
+        CHECK((supportedBackendIds.find(expectedRegisteredbackendId) != supportedBackendIds.end()));
     }
 }
 
@@ -1318,7 +1315,7 @@
 
     // This directory contains valid, invalid and duplicate backends
     std::string testDynamicBackendsSubDir6 = GetTestSubDirectory(g_TestDynamicBackendsSubDir6);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir6));
 
     // Using the path override in CreationOptions to load some test dynamic backends
     IRuntime::CreationOptions creationOptions;
@@ -1332,20 +1329,20 @@
     };
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == expectedRegisteredbackendIds.size());
+    CHECK(backendRegistry.Size() == expectedRegisteredbackendIds.size());
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
+        CHECK((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
     }
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.size() == expectedRegisteredbackendIds.size());
+    CHECK(supportedBackendIds.size() == expectedRegisteredbackendIds.size());
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((supportedBackendIds.find(expectedRegisteredbackendId) != supportedBackendIds.end()));
+        CHECK((supportedBackendIds.find(expectedRegisteredbackendId) != supportedBackendIds.end()));
     }
 }
 
@@ -1359,7 +1356,7 @@
 
     // This directory contains only invalid backends
     std::string testDynamicBackendsSubDir9 = GetTestSubDirectory(g_TestDynamicBackendsSubDir9);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir9));
+    CHECK(exists(testDynamicBackendsSubDir9));
 
     // Using the path override in CreationOptions to load some test dynamic backends
     IRuntime::CreationOptions creationOptions;
@@ -1367,11 +1364,11 @@
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.empty());
+    CHECK(supportedBackendIds.empty());
 }
 
 void RuntimeInvalidOverridePathTestImpl()
@@ -1387,11 +1384,11 @@
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.empty());
+    CHECK(supportedBackendIds.empty());
 }
 
 #if defined(ARMNNREF_ENABLED)
@@ -1410,12 +1407,12 @@
     std::string dynamicBackendsBaseDir = GetDynamicBackendsBasePath();
     std::string referenceDynamicBackendSubDir = GetTestSubDirectory(dynamicBackendsBaseDir,
                                                                     g_ReferenceDynamicBackendSubDir);
-    BOOST_CHECK(exists(referenceDynamicBackendSubDir));
+    CHECK(exists(referenceDynamicBackendSubDir));
 
     // Check that the reference dynamic backend file exists
     std::string referenceBackendFilePath = GetTestFilePath(referenceDynamicBackendSubDir,
                                                            g_ReferenceBackendFileName);
-    BOOST_CHECK(exists(referenceBackendFilePath));
+    CHECK(exists(referenceBackendFilePath));
 
     // Using the path override in CreationOptions to load the reference dynamic backend
     IRuntime::CreationOptions creationOptions;
@@ -1423,28 +1420,28 @@
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == 1);
+    CHECK(backendRegistry.Size() == 1);
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
-    BOOST_TEST((backendIds.find("CpuRef") != backendIds.end()));
+    CHECK((backendIds.find("CpuRef") != backendIds.end()));
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.size() == 1);
-    BOOST_TEST((supportedBackendIds.find("CpuRef") != supportedBackendIds.end()));
+    CHECK(supportedBackendIds.size() == 1);
+    CHECK((supportedBackendIds.find("CpuRef") != supportedBackendIds.end()));
 
     // Get the factory function
     auto referenceDynamicBackendFactoryFunction = backendRegistry.GetFactory("CpuRef");
-    BOOST_TEST((referenceDynamicBackendFactoryFunction != nullptr));
+    CHECK((referenceDynamicBackendFactoryFunction != nullptr));
 
     // Use the factory function to create an instance of the reference backend
     IBackendInternalUniquePtr referenceDynamicBackend = referenceDynamicBackendFactoryFunction();
-    BOOST_TEST((referenceDynamicBackend != nullptr));
-    BOOST_TEST((referenceDynamicBackend->GetId() == "CpuRef"));
+    CHECK((referenceDynamicBackend != nullptr));
+    CHECK((referenceDynamicBackend->GetId() == "CpuRef"));
 
     // Test the backend instance by querying the layer support
     IBackendInternal::ILayerSupportSharedPtr referenceLayerSupport = referenceDynamicBackend->GetLayerSupport();
-    BOOST_TEST((referenceLayerSupport != nullptr));
+    CHECK((referenceLayerSupport != nullptr));
 
     TensorShape inputShape {  1, 16, 16, 16 };
     TensorShape outputShape{  1, 16, 16, 16 };
@@ -1459,11 +1456,11 @@
                                                             convolution2dDescriptor,
                                                             weightInfo,
                                                             EmptyOptional());
-    BOOST_TEST(referenceConvolution2dSupported);
+    CHECK(referenceConvolution2dSupported);
 
     // Test the backend instance by creating a workload
     IBackendInternal::IWorkloadFactoryPtr referenceWorkloadFactory = referenceDynamicBackend->CreateWorkloadFactory();
-    BOOST_TEST((referenceWorkloadFactory != nullptr));
+    CHECK((referenceWorkloadFactory != nullptr));
 
     // Create dummy settings for the workload
     Convolution2dQueueDescriptor convolution2dQueueDescriptor;
@@ -1478,8 +1475,8 @@
 
     // Create a convolution workload with the dummy settings
     auto workload = referenceWorkloadFactory->CreateConvolution2d(convolution2dQueueDescriptor, workloadInfo);
-    BOOST_TEST((workload != nullptr));
-    BOOST_TEST(workload.get() == PolymorphicDowncast<RefConvolution2dWorkload*>(workload.get()));
+    CHECK((workload != nullptr));
+    CHECK(workload.get() == PolymorphicDowncast<RefConvolution2dWorkload*>(workload.get()));
 }
 
 #endif
@@ -1498,7 +1495,7 @@
                               "Ensure a DYNAMIC_BACKEND_PATHS was set at compile time to the location of "
                               "libArm_SampleDynamic_backend.so. "
                               "To disable this test recompile with: -DSAMPLE_DYNAMIC_BACKEND_ENABLED=0";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 }
 
@@ -1509,25 +1506,25 @@
     IRuntime::CreationOptions creationOptions;
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() >= 1);
+    CHECK(backendRegistry.Size() >= 1);
     CheckSampleDynamicBackendLoaded();
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.size()>= 1);
-    BOOST_TEST((supportedBackendIds.find("SampleDynamic") != supportedBackendIds.end()));
+    CHECK(supportedBackendIds.size()>= 1);
+    CHECK((supportedBackendIds.find("SampleDynamic") != supportedBackendIds.end()));
 
     // Get the factory function
     auto sampleDynamicBackendFactoryFunction = backendRegistry.GetFactory("SampleDynamic");
-    BOOST_TEST((sampleDynamicBackendFactoryFunction != nullptr));
+    CHECK((sampleDynamicBackendFactoryFunction != nullptr));
 
     // Use the factory function to create an instance of the dynamic backend
     IBackendInternalUniquePtr sampleDynamicBackend = sampleDynamicBackendFactoryFunction();
-    BOOST_TEST((sampleDynamicBackend != nullptr));
-    BOOST_TEST((sampleDynamicBackend->GetId() == "SampleDynamic"));
+    CHECK((sampleDynamicBackend != nullptr));
+    CHECK((sampleDynamicBackend->GetId() == "SampleDynamic"));
 
     // Test the backend instance by querying the layer support
     IBackendInternal::ILayerSupportSharedPtr sampleLayerSupport = sampleDynamicBackend->GetLayerSupport();
-    BOOST_TEST((sampleLayerSupport != nullptr));
+    CHECK((sampleLayerSupport != nullptr));
 
     TensorShape inputShape {  1, 16, 16, 16 };
     TensorShape outputShape{  1, 16, 16, 16 };
@@ -1542,11 +1539,11 @@
                                                          convolution2dDescriptor,
                                                          weightInfo,
                                                          EmptyOptional());
-    BOOST_TEST(!sampleConvolution2dSupported);
+    CHECK(!sampleConvolution2dSupported);
 
     // Test the backend instance by creating a workload
     IBackendInternal::IWorkloadFactoryPtr sampleWorkloadFactory = sampleDynamicBackend->CreateWorkloadFactory();
-    BOOST_TEST((sampleWorkloadFactory != nullptr));
+    CHECK((sampleWorkloadFactory != nullptr));
 
     // Create dummy settings for the workload
     AdditionQueueDescriptor additionQueueDescriptor;
@@ -1558,7 +1555,7 @@
 
     // Create a addition workload
     auto workload = sampleWorkloadFactory->CreateAddition(additionQueueDescriptor, workloadInfo);
-    BOOST_TEST((workload != nullptr));
+    CHECK((workload != nullptr));
 }
 
 void SampleDynamicBackendEndToEndTestImpl()
@@ -1611,6 +1608,6 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results.
-    BOOST_TEST(outputData == expectedOutputData);
+    CHECK(outputData == expectedOutputData);
 }
 #endif
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
index 5fedaa2..f958613 100644
--- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -61,7 +61,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateElementwiseUnaryNetwork<ArmnnInType>(inputShape, outputShape, operation, qScale, qOffset);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     const std::vector<float> input({ 1, -1, 1, 1,  5, -5, 5, 5,
                                        -3, 3, 3, 3,  4, 4, -4, 4 });
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index a5fe8c6..2d268f8 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -14,7 +14,7 @@
 #include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -169,7 +169,7 @@
         std::vector<TOutput> out = outputStorage.at(it.first);
         for (unsigned int i = 0; i < out.size(); ++i)
         {
-            BOOST_CHECK_MESSAGE(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true,
+            CHECK_MESSAGE(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true,
                     "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
 
         }
@@ -203,7 +203,7 @@
 
     // Optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -238,7 +238,7 @@
     runtime->GetProfiler(netId)->EnableProfiling(true);
 
     // Do the inference and expect it to fail with a ImportMemoryException
-    BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
+    CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
 }
 
 inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
@@ -268,7 +268,7 @@
 
     // Optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -304,11 +304,11 @@
     if (backends[0] == Compute::CpuAcc)
     {
         // For CpuAcc the NeonTensorHandle will throw its own exception on misaligned memory
-        BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
+        CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
     }
     else
     {
-        BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryExportException);
+        CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryExportException);
     }
 }
 
@@ -339,7 +339,7 @@
 
     // Optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -383,18 +383,18 @@
 
     // Contains ActivationWorkload
     std::size_t found = dump.find("ActivationWorkload");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
 inline void ImportOnlyWorkload(std::vector<BackendId> backends)
@@ -424,17 +424,17 @@
     // optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    BOOST_TEST_CHECKPOINT("Load Network");
+    INFO("Load Network");
     // Load it into the runtime. It should pass.
     NetworkId netId;
     std::string ignoredErrorMessage;
 
     INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Undefined);
 
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
                == Status::Success);
 
-    BOOST_TEST_CHECKPOINT("Generate Data");
+    INFO("Generate Data");
     // Creates structures for input & output
     std::vector<float> inputData
     {
@@ -448,7 +448,7 @@
          1.0f, 4.0f, 9.0f, 16.0f
     };
 
-    BOOST_TEST_CHECKPOINT("Create Network");
+    INFO("Create Network");
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -458,15 +458,14 @@
         {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
     };
 
-    BOOST_TEST_CHECKPOINT("Get Profiler");
-
+    INFO("Get Profiler");
     runtime->GetProfiler(netId)->EnableProfiling(true);
 
-    BOOST_TEST_CHECKPOINT("Run Inference");
+    INFO("Run Inference");
     // Do the inference
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
-    BOOST_TEST_CHECKPOINT("Print Profiler");
+    INFO("Print Profiler");
     // Retrieve the Profiler.Print() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
     std::stringstream ss;
@@ -474,17 +473,17 @@
     std::string dump = ss.str();
 
     // Check there are no SyncMemGeneric workloads as we didn't export
-    BOOST_TEST_CHECKPOINT("Find SyncMemGeneric");
+    INFO("Find SyncMemGeneric");
     int count = SubStringCounter(dump, "SyncMemGeneric");
-    BOOST_TEST(count == 0);
+    CHECK(count == 0);
 
     // Should only be 1 CopyMemGeneric for the output as we imported
-    BOOST_TEST_CHECKPOINT("Find CopyMemGeneric");
+    INFO("Find CopyMemGeneric");
     count = SubStringCounter(dump, "CopyMemGeneric");
-    BOOST_TEST(count == 1);
+    CHECK(count == 1);
 
     // Check the output is correct
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end());
+    CHECK(std::equal(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end()));
 }
 
 inline void ExportOnlyWorkload(std::vector<BackendId> backends)
@@ -514,15 +513,15 @@
     // optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    BOOST_TEST_CHECKPOINT("Load Network");
+    INFO("Load Network");
     // Load it into the runtime. It should pass.
     NetworkId netId;
     std::string ignoredErrorMessage;
     INetworkProperties networkProperties(false, MemorySource::Undefined, MemorySource::Malloc);
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
                == Status::Success);
 
-    BOOST_TEST_CHECKPOINT("Generate Data");
+    INFO("Generate Data");
     // Creates structures for input & output
     std::vector<float> inputData
     {
@@ -536,7 +535,7 @@
          1.0f, 4.0f, 9.0f, 16.0f
     };
 
-    BOOST_TEST_CHECKPOINT("Create Network");
+    INFO("Create Network");
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -546,15 +545,14 @@
         {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
     };
 
-    BOOST_TEST_CHECKPOINT("Get Profiler");
-
+    INFO("Get Profiler");
     runtime->GetProfiler(netId)->EnableProfiling(true);
 
-    BOOST_TEST_CHECKPOINT("Run Inference");
+    INFO("Run Inference");
     // Do the inference
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
-    BOOST_TEST_CHECKPOINT("Print Profiler");
+    INFO("Print Profiler");
     // Retrieve the Profiler.Print() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
     std::stringstream ss;
@@ -562,17 +560,17 @@
     std::string dump = ss.str();
 
     // Check there is a SyncMemGeneric workload as we exported
-    BOOST_TEST_CHECKPOINT("Find SyncMemGeneric");
+    INFO("Find SyncMemGeneric");
     int count = SubStringCounter(dump, "SyncMemGeneric");
-    BOOST_TEST(count == 1);
+    CHECK(count == 1);
 
     // Should be 1 CopyMemGeneric for the output as we did not import
-    BOOST_TEST_CHECKPOINT("Find CopyMemGeneric");
+    INFO("Find CopyMemGeneric");
     count = SubStringCounter(dump, "CopyMemGeneric");
-    BOOST_TEST(count == 1);
+    CHECK(count == 1);
 
     // Check the output is correct
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end());
+    CHECK(std::equal(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end()));
 }
 
 inline void ImportAndExportWorkload(std::vector<BackendId> backends)
@@ -601,17 +599,17 @@
 
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    BOOST_TEST_CHECKPOINT("Load Network");
+    INFO("Load Network");
     // Load it into the runtime. It should pass.
     NetworkId netId;
     std::string ignoredErrorMessage;
 
     INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
 
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
                == Status::Success);
 
-    BOOST_TEST_CHECKPOINT("Generate Data");
+    INFO("Generate Data");
     // Creates structures for input & output
     std::vector<float> inputData
     {
@@ -625,7 +623,7 @@
          1.0f, 4.0f, 9.0f, 16.0f
     };
 
-    BOOST_TEST_CHECKPOINT("Create Network");
+    INFO("Create Network");
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -635,15 +633,14 @@
         {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
     };
 
-    BOOST_TEST_CHECKPOINT("Get Profiler");
-
+    INFO("Get Profiler");
     runtime->GetProfiler(netId)->EnableProfiling(true);
 
-    BOOST_TEST_CHECKPOINT("Run Inference");
+    INFO("Run Inference");
     // Do the inference
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
-    BOOST_TEST_CHECKPOINT("Print Profiler");
+    INFO("Print Profiler");
     // Retrieve the Profiler.Print() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
     std::stringstream ss;
@@ -651,17 +648,17 @@
     std::string dump = ss.str();
 
     // Check there is a SyncMemGeneric workload as we exported
-    BOOST_TEST_CHECKPOINT("Find SyncMemGeneric");
+    INFO("Find SyncMemGeneric");
     int count = SubStringCounter(dump, "SyncMemGeneric");
-    BOOST_TEST(count == 1);
+    CHECK(count == 1);
 
     // Shouldn't be any CopyMemGeneric workloads
-    BOOST_TEST_CHECKPOINT("Find CopyMemGeneric");
+    INFO("Find CopyMemGeneric");
     count = SubStringCounter(dump, "CopyMemGeneric");
-    BOOST_TEST(count == 0);
+    CHECK(count == 0);
 
     // Check the output is correct
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end());
+    CHECK(std::equal(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end()));
 }
 
 inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<BackendId> backends)
@@ -753,19 +750,19 @@
         found = dump.find("ClActivationWorkload");
     }
 
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
     // No contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
     // Contains CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check that the outputs are correct
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData0.begin(), outputData0.end(),
-                                  expectedOutput.begin(), expectedOutput.end());
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData1.begin(), outputData1.end(),
-                                  expectedOutput.begin(), expectedOutput.end());
+    CHECK(std::equal(outputData0.begin(), outputData0.end(),
+                                  expectedOutput.begin(), expectedOutput.end()));
+    CHECK(std::equal(outputData1.begin(), outputData1.end(),
+                                  expectedOutput.begin(), expectedOutput.end()));
 }
 
 inline void StridedSliceInvalidSliceEndToEndTest(std::vector<BackendId> backends)
@@ -801,7 +798,7 @@
     stridedSlice->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 3 }, DataType::Float32));
 
     // Attempt to optimize the network and check that the correct exception is thrown
-    BOOST_CHECK_THROW(Optimize(*net, backends, runtime->GetDeviceSpec()), armnn::LayerValidationException);
+    CHECK_THROWS_AS(Optimize(*net, backends, runtime->GetDeviceSpec()), armnn::LayerValidationException);
 }
 
 } // anonymous namespace
diff --git a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
index 5d7601b..2a4ccb6 100644
--- a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
@@ -12,6 +12,8 @@
 
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -55,7 +57,7 @@
 
     armnn::INetworkPtr network = CreateFillNetwork(inputInfo, outputInfo, descriptor);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(network);
 
     std::map<int, std::vector<int32_t>> inputTensorData    = {{ 0, inputData }};
     std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index 5a618c3..923d6f3 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -83,7 +83,7 @@
                                                                             weightsTensorInfo,
                                                                             descriptor);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(network);
 
     std::map<int, std::vector<T>> inputTensorData    = {{ 0, inputData }, {1, weightsData}};
     std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
index 82f9451..431ef31 100644
--- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 #include <armnn/INetwork.hpp>
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace{
 
 armnn::INetworkPtr CreateGatherNetwork(const armnn::TensorInfo& paramsInfo,
@@ -59,7 +61,7 @@
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateGatherNetwork(paramsInfo, indicesInfo, outputInfo, indicesData);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
@@ -115,8 +117,6 @@
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateGatherNetwork(paramsInfo, indicesInfo, outputInfo, indicesData);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
 
diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
index a3dd88c..d758137 100644
--- a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
@@ -16,7 +16,7 @@
 
 #include <test/TestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -82,7 +82,7 @@
                                                                             beta,
                                                                             eps);
 
-    BOOST_TEST_CHECKPOINT("Create a network");
+    CHECK(net);
 
     std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 4240bb1..5a05ee1 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -12,6 +12,8 @@
 
 #include <armnn/utility/IgnoreUnused.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 armnn::Graph dummyGraph;
@@ -756,7 +758,7 @@
         try
         {
             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
-            BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
+            CHECK_MESSAGE(retVal, layerName << errorMsg);
             return retVal;
         }
         catch(const armnn::InvalidArgumentException& e)
@@ -768,13 +770,13 @@
         catch(const std::exception& e)
         {
             errorMsg = e.what();
-            BOOST_TEST_ERROR(layerName << ": " << errorMsg);
+            FAIL(layerName << ": " << errorMsg);
             return false;
         }
         catch(...)
         {
             errorMsg = "Unexpected error while testing support for ";
-            BOOST_TEST_ERROR(errorMsg << layerName);
+            FAIL(errorMsg << layerName);
             return false;
         }
     }
@@ -784,7 +786,7 @@
         try
         {
             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
-            BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
+            CHECK_MESSAGE(retVal, layerName << errorMsg);
             return retVal;
         }
         // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
@@ -803,13 +805,13 @@
         catch(const std::exception& e)
         {
             errorMsg = e.what();
-            BOOST_TEST_ERROR(layerName << ": " << errorMsg);
+            FAIL(layerName << ": " << errorMsg);
             return false;
         }
         catch(...)
         {
             errorMsg = "Unexpected error while testing support for ";
-            BOOST_TEST_ERROR(errorMsg << layerName);
+            FAIL(errorMsg << layerName);
             return false;
         }
     }
@@ -871,7 +873,7 @@
     std::stringstream ss;
     ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
     bool v = Type == layer.m_Layer->GetType();
-    BOOST_CHECK_MESSAGE(v, ss.str());
+    CHECK_MESSAGE(v, ss.str());
     return v;
 }
 
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index a2206f7..92c8e14 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -12,11 +12,12 @@
 #include <armnn/IRuntime.hpp>
 #include <armnn/INetwork.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <sstream>
 #include <stack>
 #include <string>
+#include <algorithm>
 
 inline bool AreMatchingPair(const char opening, const char closing)
 {
@@ -67,7 +68,7 @@
             }
             catch (std::invalid_argument const&)
             {
-                BOOST_FAIL("Could not convert measurements to double: " + numberString);
+                FAIL("Could not convert measurements to double: " + numberString);
             }
 
             numberString.clear();
@@ -82,7 +83,7 @@
             }
             catch (std::invalid_argument const&)
             {
-                BOOST_FAIL("Could not convert measurements to double: " + numberString);
+                FAIL("Could not convert measurements to double: " + numberString);
             }
             numberString.clear();
         }
@@ -120,7 +121,7 @@
 {
     using namespace armnn;
 
-    BOOST_CHECK(!backends.empty());
+    CHECK(!backends.empty());
 
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
 
@@ -160,12 +161,12 @@
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
     if(!optNet)
     {
-        BOOST_FAIL("Error occurred during Optimization, Optimize() returned nullptr.");
+        FAIL("Error occurred during Optimization, Optimize() returned nullptr.");
     }
     // load it into the runtime
     NetworkId netId;
     auto error = runtime->LoadNetwork(netId, std::move(optNet));
-    BOOST_TEST(error == Status::Success);
+    CHECK(error == Status::Success);
 
     // create structures for input & output
     std::vector<uint8_t> inputData
@@ -202,7 +203,7 @@
 {
     // ensure all measurements are greater than zero
     std::vector<double> measurementsVector = ExtractMeasurements(result);
-    BOOST_CHECK(!measurementsVector.empty());
+    CHECK(!measurementsVector.empty());
 
     // check sections contain raw and unit tags
     // first ensure Parenthesis are balanced
@@ -219,12 +220,12 @@
                 sectionVector.erase(sectionVector.begin() + static_cast<int>(i));
             }
         }
-        BOOST_CHECK(!sectionVector.empty());
+        CHECK(!sectionVector.empty());
 
-        BOOST_CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
+        CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
                                 [](std::string i) { return (i.find("\"raw\":") != std::string::npos); }));
 
-        BOOST_CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
+        CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
                                 [](std::string i) { return (i.find("\"unit\":") != std::string::npos); }));
     }
 
@@ -235,11 +236,11 @@
     result.erase(std::remove_if (result.begin(),result.end(),
                                  [](char c) { return c == '\t'; }), result.end());
 
-    BOOST_CHECK(result.find("ArmNN") != std::string::npos);
-    BOOST_CHECK(result.find("inference_measurements") != std::string::npos);
+    CHECK(result.find("ArmNN") != std::string::npos);
+    CHECK(result.find("inference_measurements") != std::string::npos);
 
     // ensure no spare parenthesis present in print output
-    BOOST_CHECK(AreParenthesesMatching(result));
+    CHECK(AreParenthesesMatching(result));
 }
 
 void RunSoftmaxProfilerJsonPrinterTest(const std::vector<armnn::BackendId>& backends)
@@ -253,11 +254,11 @@
     const armnn::BackendId& firstBackend = backends.at(0);
     if (firstBackend == armnn::Compute::GpuAcc)
     {
-        BOOST_CHECK(result.find("OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]")
+        CHECK(result.find("OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]")
                     != std::string::npos);
     }
     else if (firstBackend == armnn::Compute::CpuAcc)
     {
-        BOOST_CHECK(result.find("NeonKernelTimer/: CpuLogitsDLogSoftmaxKernel_#") != std::string::npos);
+        CHECK(result.find("NeonKernelTimer/: CpuLogitsDLogSoftmaxKernel_#") != std::string::npos);
     }
 }
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
index 0ca4b0a..579be51 100644
--- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -10,7 +10,7 @@
 #include <backendsCommon/TensorHandle.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <utility>
 
@@ -23,9 +23,9 @@
 // Checks weights and biases before the method called and after.
 /////////////////////////////////////////////////////////////////////////////////////////////
 
-BOOST_AUTO_TEST_SUITE(LayerReleaseConstantDataTest)
-
-BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
+TEST_SUITE("LayerReleaseConstantDataTest")
+{
+TEST_CASE("ReleaseBatchNormalizationLayerConstantDataTest")
 {
     Graph graph;
 
@@ -54,24 +54,24 @@
     Connect(layer, output, tensorInfo);
 
     // check the constants that they are not NULL
-    BOOST_CHECK(layer->m_Mean != nullptr);
-    BOOST_CHECK(layer->m_Variance != nullptr);
-    BOOST_CHECK(layer->m_Beta != nullptr);
-    BOOST_CHECK(layer->m_Gamma != nullptr);
+    CHECK(layer->m_Mean != nullptr);
+    CHECK(layer->m_Variance != nullptr);
+    CHECK(layer->m_Beta != nullptr);
+    CHECK(layer->m_Gamma != nullptr);
 
     // free up the constants..
     layer->ReleaseConstantData();
 
     // check the constants that they are NULL now
-    BOOST_CHECK(layer->m_Mean == nullptr);
-    BOOST_CHECK(layer->m_Variance == nullptr);
-    BOOST_CHECK(layer->m_Beta == nullptr);
-    BOOST_CHECK(layer->m_Gamma == nullptr);
+    CHECK(layer->m_Mean == nullptr);
+    CHECK(layer->m_Variance == nullptr);
+    CHECK(layer->m_Beta == nullptr);
+    CHECK(layer->m_Gamma == nullptr);
 
  }
 
 
- BOOST_AUTO_TEST_CASE(ReleaseConvolution2dLayerConstantDataTest)
+ TEST_CASE("ReleaseConvolution2dLayerConstantDataTest")
  {
      Graph graph;
 
@@ -104,18 +104,18 @@
      Connect(layer, output, TensorInfo({2, 2, 2, 10}, armnn::DataType::Float32));
 
      // check the constants that they are not NULL
-     BOOST_CHECK(layer->m_Weight != nullptr);
-     BOOST_CHECK(layer->m_Bias != nullptr);
+     CHECK(layer->m_Weight != nullptr);
+     CHECK(layer->m_Bias != nullptr);
 
      // free up the constants..
      layer->ReleaseConstantData();
 
      // check the constants that they are NULL now
-     BOOST_CHECK(layer->m_Weight == nullptr);
-     BOOST_CHECK(layer->m_Bias == nullptr);
+     CHECK(layer->m_Weight == nullptr);
+     CHECK(layer->m_Bias == nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
+TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest")
 {
     Graph graph;
 
@@ -145,18 +145,18 @@
     Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
 
     // check the constants that they are not NULL
-    BOOST_CHECK(layer->m_Weight != nullptr);
-    BOOST_CHECK(layer->m_Bias != nullptr);
+    CHECK(layer->m_Weight != nullptr);
+    CHECK(layer->m_Bias != nullptr);
 
     // free up the constants..
     layer->ReleaseConstantData();
 
     // check the constants that they are NULL now
-    BOOST_CHECK(layer->m_Weight == nullptr);
-    BOOST_CHECK(layer->m_Bias == nullptr);
+    CHECK(layer->m_Weight == nullptr);
+    CHECK(layer->m_Bias == nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
+TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest")
 {
     Graph graph;
 
@@ -186,16 +186,16 @@
     Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
 
     // check the constants that they are not NULL
-    BOOST_CHECK(layer->m_Weight != nullptr);
-    BOOST_CHECK(layer->m_Bias != nullptr);
+    CHECK(layer->m_Weight != nullptr);
+    CHECK(layer->m_Bias != nullptr);
 
     // free up the constants..
     layer->ReleaseConstantData();
 
     // check the constants that they are NULL now
-    BOOST_CHECK(layer->m_Weight == nullptr);
-    BOOST_CHECK(layer->m_Bias == nullptr);
+    CHECK(layer->m_Weight == nullptr);
+    CHECK(layer->m_Bias == nullptr);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
diff --git a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
index f1e6242..1f7f578 100644
--- a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
@@ -10,7 +10,7 @@
 
 #include <test/TestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace {
 
@@ -60,7 +60,7 @@
                                                                  beta,
                                                                  axis);
 
-    BOOST_TEST_CHECKPOINT("Create a network");
+    CHECK(net);
 
     std::map<int, std::vector<float>> inputTensorData = { {0, inputData} };
     std::map<int, std::vector<float>> expectedOutputTensorData = { {0, expectedOutputData} };
diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
index b472a03..246cb50 100644
--- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
@@ -14,8 +14,7 @@
 #include <SubgraphView.hpp>
 #include <SubgraphViewSelector.hpp>
 
-#include <boost/test/unit_test.hpp>
-
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
@@ -28,31 +27,31 @@
         {
             case LayerType::Input:
                 ++m_inputLayerCount;
-                BOOST_TEST((layer->GetName() == std::string("inLayer0") ||
+                CHECK((layer->GetName() == std::string("inLayer0") ||
                             layer->GetName() == std::string("inLayer1")));
                 break;
             // The Addition layer should become a PreCompiled Layer after Optimisation
             case LayerType::PreCompiled:
                 ++m_addLayerCount;
-                BOOST_TEST(layer->GetName() == "pre-compiled");
+                CHECK(std::string(layer->GetName()) == "pre-compiled");
                 break;
             case LayerType::Output:
                 ++m_outputLayerCount;
-                BOOST_TEST(layer->GetName() == "outLayer");
+                CHECK(std::string(layer->GetName()) == "outLayer");
                 break;
             default:
                 //Fail for anything else
-                BOOST_TEST(false);
+                CHECK(false);
         }
     }
-    BOOST_TEST(m_inputLayerCount == 2);
-    BOOST_TEST(m_outputLayerCount == 1);
-    BOOST_TEST(m_addLayerCount == 1);
+    CHECK(m_inputLayerCount == 2);
+    CHECK(m_outputLayerCount == 1);
+    CHECK(m_addLayerCount == 1);
 }
 
-BOOST_AUTO_TEST_SUITE(OptimizationViewsTestSuite)
-
-BOOST_AUTO_TEST_CASE(OptimizedViewsSubgraphLayerCount)
+TEST_SUITE("OptimizationViewsTestSuite")
+{
+TEST_CASE("OptimizedViewsSubgraphLayerCount")
 {
     OptimizationViews view;
     // Construct a graph with 3 layers
@@ -117,10 +116,10 @@
             CreateOutputsFrom({convLayer2}),
             {convLayer1, convLayer2, substitutionpreCompiledLayer});
 
-    BOOST_CHECK(view.Validate(*originalSubgraph));
+    CHECK(view.Validate(*originalSubgraph));
 }
 
-BOOST_AUTO_TEST_CASE(OptimizedViewsSubgraphLayerCountFailValidate)
+TEST_CASE("OptimizedViewsSubgraphLayerCountFailValidate")
 {
     OptimizationViews view;
     // Construct a graph with 3 layers
@@ -180,10 +179,10 @@
                                    {convLayer1, convLayer2, substitutionpreCompiledLayer});
 
     // Validate should fail as convLayer1 is not counted
-    BOOST_CHECK(!view.Validate(*originalSubgraph));
+    CHECK(!view.Validate(*originalSubgraph));
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeViewsValidateDeviceMockBackend)
+TEST_CASE("OptimizeViewsValidateDeviceMockBackend")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -209,11 +208,11 @@
 
     std::vector<armnn::BackendId> backends = { MockBackend().GetIdStatic() };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Check the optimised graph
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     CheckLayers(graph);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index f7ebf1a..6c76da6 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -12,8 +12,7 @@
 
 #include <armnn/BackendRegistry.hpp>
 
-#include <boost/test/unit_test.hpp>
-
+#include <doctest/doctest.h>
 #include <unordered_map>
 
 using namespace armnn;
@@ -64,7 +63,7 @@
                      LayerBindingId inputId = 0)
 {
     Layer* const inputLayer = graph.AddLayer<InputLayer>(inputId, layerName.c_str());
-    BOOST_TEST(inputLayer);
+    CHECK(inputLayer);
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
     return inputLayer;
 }
@@ -74,7 +73,7 @@
                       const std::string& layerName)
 {
     Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, layerName.c_str());
-    BOOST_TEST(outputLayer);
+    CHECK(outputLayer);
     return outputLayer;
 }
 
@@ -88,7 +87,7 @@
                                         const TensorInfo& outputInfo)
 {
     Convolution2dLayer* const convLayer = graph.AddLayer<Convolution2dLayer>(convolutionDescriptor, layerName.c_str());
-    BOOST_TEST(convLayer);
+    CHECK(convLayer);
     SetWeightAndBias(convLayer, weightInfo, biasInfo);
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
     layersInGraph.insert(std::make_pair(convLayer->GetName(), convLayer));
@@ -103,7 +102,7 @@
                                 const TensorInfo& outputInfo)
 {
     Pooling2dLayer* const poolingLayer = graph.AddLayer<Pooling2dLayer>(poolingDescriptor, layerName.c_str());
-    BOOST_TEST(poolingLayer);
+    CHECK(poolingLayer);
     poolingLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
     layersInGraph.insert(std::make_pair(poolingLayer->GetName(), poolingLayer));
     return poolingLayer;
@@ -116,7 +115,7 @@
                                  const TensorInfo& outputInfo)
 {
     AdditionLayer* const additionLayer = graph.AddLayer<AdditionLayer>(layerName.c_str());
-    BOOST_TEST(additionLayer);
+    CHECK(additionLayer);
     additionLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
     layersInGraph.insert(std::make_pair(additionLayer->GetName(), additionLayer));
     return additionLayer;
@@ -140,23 +139,23 @@
     const SubgraphView::OutputSlots& replacementSubgraphOutputSlots = replacementSubgraph.GetOutputSlots();
     const SubgraphView::Layers&      replacementSubgraphLayers      = replacementSubgraph.GetLayers();
 
-    BOOST_TEST(substitutableSubgraphInputSlots.size()  == expectedSubstitutableSubgraphSize.m_NumInputSlots);
-    BOOST_TEST(substitutableSubgraphOutputSlots.size() == expectedSubstitutableSubgraphSize.m_NumOutputSlots);
-    BOOST_TEST(substitutableSubgraphLayers.size()      == expectedSubstitutableSubgraphSize.m_NumLayers);
+    CHECK(substitutableSubgraphInputSlots.size()  == expectedSubstitutableSubgraphSize.m_NumInputSlots);
+    CHECK(substitutableSubgraphOutputSlots.size() == expectedSubstitutableSubgraphSize.m_NumOutputSlots);
+    CHECK(substitutableSubgraphLayers.size()      == expectedSubstitutableSubgraphSize.m_NumLayers);
 
-    BOOST_TEST(AreEqual(substitutableSubgraphInputSlots,  expectedSubstitutableInputSlots));
-    BOOST_TEST(AreEqual(substitutableSubgraphOutputSlots, expectedSubstitutableOutputSlots));
-    BOOST_TEST(AreEqual(substitutableSubgraphLayers,      expectedSubstitutableLayers));
+    CHECK(AreEqual(substitutableSubgraphInputSlots,  expectedSubstitutableInputSlots));
+    CHECK(AreEqual(substitutableSubgraphOutputSlots, expectedSubstitutableOutputSlots));
+    CHECK(AreEqual(substitutableSubgraphLayers,      expectedSubstitutableLayers));
 
-    BOOST_TEST(replacementSubgraphInputSlots.size()  == expectedReplacementSubgraphSize.m_NumInputSlots);
-    BOOST_TEST(replacementSubgraphOutputSlots.size() == expectedReplacementSubgraphSize.m_NumOutputSlots);
-    BOOST_TEST(replacementSubgraphLayers.size()      == expectedReplacementSubgraphSize.m_NumLayers);
+    CHECK(replacementSubgraphInputSlots.size()  == expectedReplacementSubgraphSize.m_NumInputSlots);
+    CHECK(replacementSubgraphOutputSlots.size() == expectedReplacementSubgraphSize.m_NumOutputSlots);
+    CHECK(replacementSubgraphLayers.size()      == expectedReplacementSubgraphSize.m_NumLayers);
 
-    BOOST_TEST(!AreEqual(replacementSubgraphInputSlots,  expectedSubstitutableInputSlots));
-    BOOST_TEST(!AreEqual(replacementSubgraphOutputSlots, expectedSubstitutableOutputSlots));
-    BOOST_TEST(!AreEqual(replacementSubgraphLayers,      expectedSubstitutableLayers));
+    CHECK(!AreEqual(replacementSubgraphInputSlots,  expectedSubstitutableInputSlots));
+    CHECK(!AreEqual(replacementSubgraphOutputSlots, expectedSubstitutableOutputSlots));
+    CHECK(!AreEqual(replacementSubgraphLayers,      expectedSubstitutableLayers));
 
-    BOOST_TEST(std::all_of(replacementSubgraphLayers.begin(),
+    CHECK(std::all_of(replacementSubgraphLayers.begin(),
                            replacementSubgraphLayers.end(),
                            [](const Layer* layer)
     {
@@ -175,13 +174,13 @@
     const SubgraphView::OutputSlots& failedSubgraphOutputSlots = failedSubgraph.GetOutputSlots();
     const SubgraphView::Layers&      failedSubgraphLayers      = failedSubgraph.GetLayers();
 
-    BOOST_TEST(failedSubgraphInputSlots.size()  == expectedFailedSubgraphSize.m_NumInputSlots);
-    BOOST_TEST(failedSubgraphOutputSlots.size() == expectedFailedSubgraphSize.m_NumOutputSlots);
-    BOOST_TEST(failedSubgraphLayers.size()      == expectedFailedSubgraphSize.m_NumLayers);
+    CHECK(failedSubgraphInputSlots.size()  == expectedFailedSubgraphSize.m_NumInputSlots);
+    CHECK(failedSubgraphOutputSlots.size() == expectedFailedSubgraphSize.m_NumOutputSlots);
+    CHECK(failedSubgraphLayers.size()      == expectedFailedSubgraphSize.m_NumLayers);
 
-    BOOST_TEST(AreEqual(failedSubgraphInputSlots,  expectedFailedInputSlots));
-    BOOST_TEST(AreEqual(failedSubgraphOutputSlots, expectedFailedOutputSlots));
-    BOOST_TEST(AreEqual(failedSubgraphLayers,      expectedFailedLayers));
+    CHECK(AreEqual(failedSubgraphInputSlots,  expectedFailedInputSlots));
+    CHECK(AreEqual(failedSubgraphOutputSlots, expectedFailedOutputSlots));
+    CHECK(AreEqual(failedSubgraphLayers,      expectedFailedLayers));
 }
 
 // Convenience function to check that the given untouched subgraph matches the specified expected values
@@ -195,13 +194,13 @@
     const SubgraphView::OutputSlots& untouchedSubgraphOutputSlots = untouchedSubgraph.GetOutputSlots();
     const SubgraphView::Layers&      untouchedSubgraphLayers      = untouchedSubgraph.GetLayers();
 
-    BOOST_TEST(untouchedSubgraphInputSlots.size()  == expectedUntouchedSubgraphSize.m_NumInputSlots);
-    BOOST_TEST(untouchedSubgraphOutputSlots.size() == expectedUntouchedSubgraphSize.m_NumOutputSlots);
-    BOOST_TEST(untouchedSubgraphLayers.size()      == expectedUntouchedSubgraphSize.m_NumLayers);
+    CHECK(untouchedSubgraphInputSlots.size()  == expectedUntouchedSubgraphSize.m_NumInputSlots);
+    CHECK(untouchedSubgraphOutputSlots.size() == expectedUntouchedSubgraphSize.m_NumOutputSlots);
+    CHECK(untouchedSubgraphLayers.size()      == expectedUntouchedSubgraphSize.m_NumLayers);
 
-    BOOST_TEST(AreEqual(untouchedSubgraphInputSlots,  expectedUntouchedInputSlots));
-    BOOST_TEST(AreEqual(untouchedSubgraphOutputSlots, expectedUntouchedOutputSlots));
-    BOOST_TEST(AreEqual(untouchedSubgraphLayers,      expectedUntouchedLayers));
+    CHECK(AreEqual(untouchedSubgraphInputSlots,  expectedUntouchedInputSlots));
+    CHECK(AreEqual(untouchedSubgraphOutputSlots, expectedUntouchedOutputSlots));
+    CHECK(AreEqual(untouchedSubgraphLayers,      expectedUntouchedLayers));
 }
 
 // Creates a subgraph containing only a single unsupported layer (only convolutions are unsupported by the mock backend)
@@ -551,28 +550,28 @@
 
     // Create an unsupported subgraph
     SubgraphView::SubgraphViewPtr subgraphPtr = BuildFullyUnsupportedSubgraph1(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 1);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 1);
 
-    BOOST_TEST(Contains(layersInGraph, "pooling layer"));
+    CHECK(Contains(layersInGraph, "pooling layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly, but no optimization is performed
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // =======================================================================
     // The expected results are:
@@ -585,14 +584,14 @@
     // Check the substitutions
     // -----------------------
 
-    BOOST_TEST(optimizationViews.GetSubstitutions().empty());
+    CHECK(optimizationViews.GetSubstitutions().empty());
 
     // --------------------------
     // Check the failed subgraphs
     // --------------------------
 
     const OptimizationViews::Subgraphs& failedSubgraphs = optimizationViews.GetFailedSubgraphs();
-    BOOST_TEST(failedSubgraphs.size() == 1);
+    CHECK(failedSubgraphs.size() == 1);
 
     CheckFailedSubgraph(failedSubgraphs.at(0),
                         { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() },
@@ -604,7 +603,7 @@
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // The input subgraph contains only unsupported layers (only convolutions are unsupported by the mock backend)
@@ -615,30 +614,30 @@
 
     // Create an unsupported subgraph
     SubgraphView::SubgraphViewPtr subgraphPtr = BuildFullyUnsupportedSubgraph2(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 3);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 3);
 
-    BOOST_TEST(Contains(layersInGraph, "pooling1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling2 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling3 layer"));
+    CHECK(Contains(layersInGraph, "pooling1 layer"));
+    CHECK(Contains(layersInGraph, "pooling2 layer"));
+    CHECK(Contains(layersInGraph, "pooling3 layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly, but no optimization is performed
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // =======================================================================
     // The expected results are:
@@ -651,18 +650,18 @@
     // Check the substitutions
     // -----------------------
 
-    BOOST_TEST(optimizationViews.GetSubstitutions().empty());
+    CHECK(optimizationViews.GetSubstitutions().empty());
 
     // --------------------------
     // Check the failed subgraphs
     // --------------------------
 
     const OptimizationViews::Subgraphs& failedSubgraphs = optimizationViews.GetFailedSubgraphs();
-    BOOST_TEST(failedSubgraphs.size() == 1);
+    CHECK(failedSubgraphs.size() == 1);
 
-    std::vector<Layer*> expectedFailedLayers{ layersInGraph.at("pooling1 layer"),
-                                              layersInGraph.at("pooling2 layer"),
-                                              layersInGraph.at("pooling3 layer") };
+    std::list<Layer*> expectedFailedLayers{ layersInGraph.at("pooling1 layer"),
+                                            layersInGraph.at("pooling2 layer"),
+                                            layersInGraph.at("pooling3 layer") };
 
     const SubgraphView& failedSubgraph = failedSubgraphs.at(0);
 
@@ -674,15 +673,15 @@
 
     const SubgraphView::Layers& failedSubgraphLayers = failedSubgraph.GetLayers();
 
-    BOOST_TEST(failedSubgraphLayers.front() + 0, expectedFailedLayers.at(0));
-    BOOST_TEST(failedSubgraphLayers.front() + 1, expectedFailedLayers.at(1));
-    BOOST_TEST(failedSubgraphLayers.front() + 2, expectedFailedLayers.at(2));
+    CHECK_EQ(failedSubgraphLayers.front() + 0, expectedFailedLayers.front() + 0);
+    CHECK_EQ(failedSubgraphLayers.front() + 1, expectedFailedLayers.front() + 1);
+    CHECK_EQ(failedSubgraphLayers.front() + 2, expectedFailedLayers.front() + 2);
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // A simple case with only one layer (convolution) to optimize, supported by the mock backend
@@ -693,28 +692,28 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyOptimizableSubgraph1(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 1);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 1);
 
-    BOOST_TEST(Contains(layersInGraph, "conv layer"));
+    CHECK(Contains(layersInGraph, "conv layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ===========================================================================================
     // The expected results are:
@@ -728,7 +727,7 @@
     // -----------------------
 
     const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 1);
+    CHECK(substitutions.size() == 1);
 
     CheckSubstitution(substitutions.at(0),
                       { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() },
@@ -741,13 +740,13 @@
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // A case with five layers (all convolutions) to optimize, all supported by the mock backend
@@ -758,32 +757,32 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyOptimizableSubgraph2(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphPtr->GetInputSlots().size()  == 1);
-    BOOST_TEST(subgraphPtr->GetOutputSlots().size() == 1);
-    BOOST_TEST(subgraphPtr->GetLayers().size()      == 5);
+    CHECK(subgraphPtr->GetInputSlots().size()  == 1);
+    CHECK(subgraphPtr->GetOutputSlots().size() == 1);
+    CHECK(subgraphPtr->GetLayers().size()      == 5);
 
-    BOOST_TEST(Contains(layersInGraph, "conv1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv2 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv3 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv4 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv5 layer"));
+    CHECK(Contains(layersInGraph, "conv1 layer"));
+    CHECK(Contains(layersInGraph, "conv2 layer"));
+    CHECK(Contains(layersInGraph, "conv3 layer"));
+    CHECK(Contains(layersInGraph, "conv4 layer"));
+    CHECK(Contains(layersInGraph, "conv5 layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ===========================================================================================
     // The expected results are:
@@ -797,7 +796,7 @@
     // -----------------------
 
     const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 1);
+    CHECK(substitutions.size() == 1);
 
     std::list<Layer*> expectedSubstitutableLayers{ layersInGraph.at("conv1 layer"),
                                                    layersInGraph.at("conv2 layer"),
@@ -816,23 +815,23 @@
 
     const SubgraphView::Layers& substitutableSubgraphLayers = substitution.m_SubstitutableSubgraph.GetLayers();
 
-    BOOST_TEST(substitutableSubgraphLayers.front() + 0, expectedSubstitutableLayers.front() + 0);
-    BOOST_TEST(substitutableSubgraphLayers.front() + 1, expectedSubstitutableLayers.front() + 1);
-    BOOST_TEST(substitutableSubgraphLayers.front() + 2, expectedSubstitutableLayers.front() + 2);
-    BOOST_TEST(substitutableSubgraphLayers.front() + 3, expectedSubstitutableLayers.front() + 3);
-    BOOST_TEST(substitutableSubgraphLayers.front() + 4, expectedSubstitutableLayers.front() + 4);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 0, expectedSubstitutableLayers.front() + 0);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 1, expectedSubstitutableLayers.front() + 1);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 2, expectedSubstitutableLayers.front() + 2);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 3, expectedSubstitutableLayers.front() + 3);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 4, expectedSubstitutableLayers.front() + 4);
 
     // --------------------------
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // The input subgraph contaions both supported and unsupported layers
@@ -844,32 +843,32 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallySupportedSubgraph(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 5);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 5);
 
-    BOOST_TEST(Contains(layersInGraph, "conv1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling2 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv2 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling3 layer"));
+    CHECK(Contains(layersInGraph, "conv1 layer"));
+    CHECK(Contains(layersInGraph, "pooling1 layer"));
+    CHECK(Contains(layersInGraph, "pooling2 layer"));
+    CHECK(Contains(layersInGraph, "conv2 layer"));
+    CHECK(Contains(layersInGraph, "pooling3 layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ========================================================================
     // The expected results are:
@@ -883,7 +882,7 @@
     // -----------------------
 
     OptimizationViews::Substitutions substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 2);
+    CHECK(substitutions.size() == 2);
     // Sort into a consistent order
     std::sort(substitutions.begin(), substitutions.end(), [](auto s1, auto s2) {
         return strcmp(s1.m_SubstitutableSubgraph.GetLayers().front()->GetName(),
@@ -925,7 +924,7 @@
     // --------------------------
 
     OptimizationViews::Subgraphs failedSubgraphs = optimizationViews.GetFailedSubgraphs();
-    BOOST_TEST(failedSubgraphs.size() == 2);
+    CHECK(failedSubgraphs.size() == 2);
     // Sort into a consistent order
     std::sort(failedSubgraphs.begin(), failedSubgraphs.end(), [](auto s1, auto s2) {
         return strcmp(s1.GetLayers().front()->GetName(), s2.GetLayers().front()->GetName()) < 0;
@@ -963,7 +962,7 @@
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // The input subgraph contains only unoptimizable layers ("unoptimizable" is added to the layer's name)
@@ -974,28 +973,28 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyUnoptimizableSubgraph1(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 1);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 1);
 
-    BOOST_TEST(Contains(layersInGraph, "conv layer unoptimizable"));
+    CHECK(Contains(layersInGraph, "conv layer unoptimizable"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ============================================================================
     // The expected results are:
@@ -1008,20 +1007,20 @@
     // Check the substitutions
     // -----------------------
 
-    BOOST_TEST(optimizationViews.GetSubstitutions().empty());
+    CHECK(optimizationViews.GetSubstitutions().empty());
 
     // --------------------------
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
     const OptimizationViews::Subgraphs& untouchedSubgraphs = optimizationViews.GetUntouchedSubgraphs();
-    BOOST_TEST(untouchedSubgraphs.size() == 1);
+    CHECK(untouchedSubgraphs.size() == 1);
 
     CheckUntouchedSubgraph(untouchedSubgraphs.at(0),
                            { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() },
@@ -1038,32 +1037,32 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallyOptimizableSubgraph1(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 5);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 5);
 
-    BOOST_TEST(Contains(layersInGraph, "conv1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv2 layer unoptimizable"));
-    BOOST_TEST(Contains(layersInGraph, "conv3 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv4 layer unoptimizable"));
-    BOOST_TEST(Contains(layersInGraph, "conv5 layer"));
+    CHECK(Contains(layersInGraph, "conv1 layer"));
+    CHECK(Contains(layersInGraph, "conv2 layer unoptimizable"));
+    CHECK(Contains(layersInGraph, "conv3 layer"));
+    CHECK(Contains(layersInGraph, "conv4 layer unoptimizable"));
+    CHECK(Contains(layersInGraph, "conv5 layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ===============================================================================
     // The expected results are:
@@ -1077,7 +1076,7 @@
     // -----------------------
 
     OptimizationViews::Substitutions substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 3);
+    CHECK(substitutions.size() == 3);
     // Sort into a consistent order
     std::sort(substitutions.begin(), substitutions.end(),
         [](auto s1, auto s2) { return strcmp(s1.m_SubstitutableSubgraph.GetLayers().front()->GetName(),
@@ -1122,14 +1121,14 @@
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
     OptimizationViews::Subgraphs untouchedSubgraphs = optimizationViews.GetUntouchedSubgraphs();
-    BOOST_TEST(untouchedSubgraphs.size() == 2);
+    CHECK(untouchedSubgraphs.size() == 2);
     // Sort into a consistent order
     std::sort(untouchedSubgraphs.begin(), untouchedSubgraphs.end(), [](auto s1, auto s2) {
         return strcmp(s1.GetLayers().front()->GetName(), s2.GetLayers().front()->GetName()) < 0;
@@ -1172,31 +1171,31 @@
 
     // Create a partially optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallyOptimizableSubgraph2(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 2);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 4);
+    CHECK(subgraphInputSlots.size()  == 2);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 4);
 
-    BOOST_TEST(Contains(layersInGraph, "conv1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv2 layer unoptimizable"));
-    BOOST_TEST(Contains(layersInGraph, "conv3 layer"));
-    BOOST_TEST(Contains(layersInGraph, "add layer"));
+    CHECK(Contains(layersInGraph, "conv1 layer"));
+    CHECK(Contains(layersInGraph, "conv2 layer unoptimizable"));
+    CHECK(Contains(layersInGraph, "conv3 layer"));
+    CHECK(Contains(layersInGraph, "add layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ==============================================================================
     // The expected results are:
@@ -1210,7 +1209,7 @@
     // -----------------------
 
     const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 1);
+    CHECK(substitutions.size() == 1);
 
     ExpectedSubgraphSize expectedSubstitutableSubgraphSizes{ 2, 1, 3 };
     ExpectedSubgraphSize expectedReplacementSubgraphSizes{ 2, 1, 1 };
@@ -1241,14 +1240,14 @@
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
     const OptimizationViews::Subgraphs& untouchedSubgraphs = optimizationViews.GetUntouchedSubgraphs();
-    BOOST_TEST(untouchedSubgraphs.size() == 1);
+    CHECK(untouchedSubgraphs.size() == 1);
 
     std::vector<ExpectedSubgraphSize> expectedUntouchedSubgraphSizes{ { 1, 1, 1 } };
     std::vector<SubgraphView::InputSlots> expectedUntouchedInputSlots
@@ -1276,15 +1275,15 @@
 
 } // Anonymous namespace
 
-BOOST_AUTO_TEST_SUITE(OptimizeSubGraph)
+TEST_SUITE("OptimizeSubGraph")
+{
+TEST_CASE("FullyUnsupportedSubgraph1")     { FullyUnsupporteSubgraphTestImpl1();      }
+TEST_CASE("FullyUnsupportedSubgraph2")     { FullyUnsupporteSubgraphTestImpl2();      }
+TEST_CASE("FullyOptimizableSubgraph1")     { FullyOptimizableSubgraphTestImpl1();     }
+TEST_CASE("FullyOptimizableSubgraph2")     { FullyOptimizableSubgraphTestImpl2();     }
+TEST_CASE("PartiallySupportedSubgraph")    { PartiallySupportedSubgraphTestImpl();    }
+TEST_CASE("FullyUnoptimizableSubgraph")    { FullyUnoptimizableSubgraphTestImpl1();   }
+TEST_CASE("PartiallyOptimizableSubgraph1") { PartiallyOptimizableSubgraphTestImpl1(); }
+TEST_CASE("PartiallyOptimizableSubgraph2") { PartiallyOptimizableSubgraphTestImpl2(); }
 
-BOOST_AUTO_TEST_CASE(FullyUnsupportedSubgraph1)     { FullyUnsupporteSubgraphTestImpl1();      }
-BOOST_AUTO_TEST_CASE(FullyUnsupportedSubgraph2)     { FullyUnsupporteSubgraphTestImpl2();      }
-BOOST_AUTO_TEST_CASE(FullyOptimizableSubgraph1)     { FullyOptimizableSubgraphTestImpl1();     }
-BOOST_AUTO_TEST_CASE(FullyOptimizableSubgraph2)     { FullyOptimizableSubgraphTestImpl2();     }
-BOOST_AUTO_TEST_CASE(PartiallySupportedSubgraph)    { PartiallySupportedSubgraphTestImpl();    }
-BOOST_AUTO_TEST_CASE(FullyUnoptimizableSubgraph)    { FullyUnoptimizableSubgraphTestImpl1();   }
-BOOST_AUTO_TEST_CASE(PartiallyOptimizableSubgraph1) { PartiallyOptimizableSubgraphTestImpl1(); }
-BOOST_AUTO_TEST_CASE(PartiallyOptimizableSubgraph2) { PartiallyOptimizableSubgraphTestImpl2(); }
-
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 66d166f..2c74690 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -3,17 +3,16 @@
 // SPDX-License-Identifier: MIT
 //
 
-
 #include <Graph.hpp>
 #include <Network.hpp>
 
 #include <reference/RefWorkloadFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(OptimizedNetwork)
-
-BOOST_AUTO_TEST_CASE(SerializeToDot)
+TEST_SUITE("OptimizedNetwork")
+{
+TEST_CASE("SerializeToDot")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -62,10 +61,10 @@
         "    " << addId << " -> " << outputId << " [label=< [4] >];\n"
         "}\n";
 
-    BOOST_TEST(ss.str() == expected.str());
+    CHECK(ss.str() == expected.str());
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
+TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -93,16 +92,16 @@
     try
     {
         Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
-        BOOST_FAIL("Should have thrown an exception.");
+        FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException& e)
     {
         // Different exceptions are thrown on different backends
     }
-    BOOST_CHECK(errMessages.size() > 0);
+    CHECK(errMessages.size() > 0);
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)
+TEST_CASE("OptimizeValidateDeviceNonSupportLayerWithFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -126,7 +125,7 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_REQUIRE(optNet);
+    REQUIRE(optNet);
 
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     graph.AllocateDynamicBuffers();
@@ -139,19 +138,19 @@
 #if defined(ARMCOMPUTENEON_ENABLED)
         if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
         }
         else if (layer->GetType() == armnn::LayerType::Normalization)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
         }
 #else
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
 #endif
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
+TEST_CASE("OptimizeValidateWorkloadsUndefinedComputeDevice")
 {
     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
 
@@ -213,16 +212,16 @@
     try
     {
         Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
-        BOOST_FAIL("Should have thrown an exception.");
+        FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException& e)
     {
         // Different exceptions are thrown on different backends
     }
-    BOOST_CHECK(errMessages.size() > 0);
+    CHECK(errMessages.size() > 0);
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)
+TEST_CASE("OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback")
 {
     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
 
@@ -281,7 +280,7 @@
     std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
 
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     graph.AllocateDynamicBuffers();
@@ -290,13 +289,13 @@
     armnn::RefWorkloadFactory fact;
     for (auto&& layer : graph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
-        BOOST_CHECK_NO_THROW(
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK_NOTHROW(
             layer->CreateWorkload(fact));
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)
+TEST_CASE("OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -323,7 +322,7 @@
                                              armnn::Compute::CpuRef };
 
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_REQUIRE(optNet);
+    REQUIRE(optNet);
 
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     graph.AllocateDynamicBuffers();
@@ -338,25 +337,25 @@
 #if defined(ARMCOMPUTENEON_ENABLED)
         if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
         }
         else if (layer->GetType() == armnn::LayerType::Normalization)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
         }
 #elif defined(ARMCOMPUTECL_ENABLED)
         if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
+            CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
         }
         else if (layer->GetType() == armnn::LayerType::Normalization)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
         }
 #else
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
 #endif
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
index 0dc1e78..e11553d 100644
--- a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 template<typename armnn::DataType DataType>
@@ -56,7 +58,7 @@
 
     INetworkPtr net = CreatePreluNetwork<ArmnnType>(inputInfo, alphaInfo, outputInfo);
 
-    BOOST_TEST_CHECKPOINT("Create a network");
+    CHECK(net);
 
     std::map<int, std::vector<T>> inputTensorData          = { { 0, inputData }, { 1, alphaData} };
     std::map<int, std::vector<T>> expectedOutputTensorData = { { 0, expectedOutputData } };
diff --git a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
index 9949824..281bed1 100644
--- a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
@@ -11,7 +11,7 @@
 #include <armnn/INetwork.hpp>
 #include <armnn/LstmParams.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -260,11 +260,11 @@
     constexpr int8_t toleranceInt8 = 1;
     for (unsigned int i = 0u; i < outputStateOutResult.size(); ++i)
     {
-        BOOST_TEST(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceInt8));
+        CHECK(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceInt8));
     }
 
     for (unsigned int i = 0u; i < outputResult.size(); ++i)
     {
-        BOOST_TEST(IsCloseEnough(outputVector[i], outputResult[i], toleranceInt8));
+        CHECK(IsCloseEnough(outputVector[i], outputResult[i], toleranceInt8));
     }
 }
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index c68051c..a2fadc7 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -17,7 +17,7 @@
 
 #include <test/TensorHelpers.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <type_traits>
 
@@ -196,8 +196,6 @@
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateQuantizedLstmNetwork(inputDesc.GetShape(), outputDesc.GetShape());
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     IRuntime::CreationOptions options;
     IRuntimePtr runtime(IRuntime::Create(options));
 
@@ -232,12 +230,12 @@
     constexpr int16_t toleranceInt16 = 2;
     for (unsigned int i = 0u; i < cellStateOutResult.size(); ++i)
     {
-        BOOST_CHECK(IsCloseEnough(cellStateOutVector[i], cellStateOutResult[i], toleranceInt16));
+        CHECK(IsCloseEnough(cellStateOutVector[i], cellStateOutResult[i], toleranceInt16));
     }
 
     constexpr uint8_t toleranceUint8 = 1;
     for (unsigned int i = 0u; i < outputStateOutResult.size(); ++i)
     {
-        BOOST_TEST(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceUint8));
+        CHECK(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceUint8));
     }
 }
diff --git a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
index a67bd46..461b3b9 100644
--- a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
@@ -12,6 +12,8 @@
 
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -50,7 +52,7 @@
 
     armnn::INetworkPtr network = CreateRankNetwork(inputInfo, outputInfo);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(network);
 
     std::map<int, std::vector<T>> inputTensorData   = {{ 0, inputData }};
     std::map<int, std::vector<int32_t>> expectedOutputTensorData = {{ 0, expectedOutputData }};
diff --git a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
index cde85ca..aa7af11 100644
--- a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
@@ -14,8 +14,6 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <map>
 #include <vector>
 
diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
index 6d1a7b0..4e5baad 100644
--- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
@@ -16,7 +16,7 @@
 
 #include <test/TestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -81,7 +81,7 @@
             dataLayout,
             blockSize);
 
-    BOOST_TEST_CHECKPOINT("Create a network");
+    CHECK(net);
 
     std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
index 257a81b..64e24e5 100644
--- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -86,7 +86,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{ 1, 2, 3, 4 };
@@ -114,7 +114,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -149,7 +149,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -187,7 +187,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -235,7 +235,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -283,7 +283,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -323,7 +323,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -403,7 +403,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -483,7 +483,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -562,7 +562,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index 0ad6bc4..a552a6a 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -14,7 +14,7 @@
 #include <AsyncExecutionCallback.hpp>
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -109,7 +109,7 @@
             std::vector<TOutput> out = outputStorageVec[i].at(it.first);
             for (unsigned int j = 0; j < out.size(); ++j)
             {
-                BOOST_CHECK(Compare<ArmnnOType>(it.second[j], out[j], tolerance) == true);
+                CHECK(Compare<ArmnnOType>(it.second[j], out[j], tolerance) == true);
             }
         }
     }
@@ -197,7 +197,7 @@
             cb->Wait();
             
             // Checks the results.
-            BOOST_CHECK(cb->GetStatus() == Status::Success);
+            CHECK(cb->GetStatus() == Status::Success);
         }
     }
 
@@ -207,7 +207,7 @@
 
         for (unsigned int i = 0; i < out.size(); ++i)
         {
-            BOOST_CHECK(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true);
+            CHECK(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true);
         }
     }
 }
@@ -282,8 +282,7 @@
                                                            ellipsisMask,
                                                            newAxisMask);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
+    CHECK(net);
     // Creates structures for input & output.
     std::vector<T> inputData{
             1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
@@ -337,7 +336,7 @@
                                                            ellipsisMask,
                                                            newAxisMask);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData1{
diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
index 57fc200..133829c 100644
--- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
@@ -14,8 +14,6 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <map>
 #include <vector>
 
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 182c913..2034a65 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -13,13 +13,13 @@
 #include <reference/workloads/RefWorkloads.hpp>
 #include <reference/RefWorkloadFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(WorkloadInfoValidation)
-
-BOOST_AUTO_TEST_CASE(BatchNormalizationQueueDescriptor_Validate_DifferentQuantizationData)
+TEST_SUITE("WorkloadInfoValidation")
+{
+TEST_CASE("BatchNormalizationQueueDescriptor_Validate_DifferentQuantizationData")
 {
     TensorShape inputShape { 1, 3, 2, 2 };
     TensorShape outputShape { 1, 3, 2, 2 };
@@ -42,18 +42,18 @@
     invalidData.m_Beta= &sameTensor;
     invalidData.m_Gamma = &sameTensor;
 
-    BOOST_CHECK_NO_THROW(RefBatchNormalizationWorkload(invalidData, invalidInfo));
+    CHECK_NOTHROW(RefBatchNormalizationWorkload(invalidData, invalidInfo));
 }
 
-BOOST_AUTO_TEST_CASE(QueueDescriptor_Validate_WrongNumOfInputsOutputs)
+TEST_CASE("QueueDescriptor_Validate_WrongNumOfInputsOutputs")
 {
     InputQueueDescriptor invalidData;
     WorkloadInfo invalidInfo;
     //Invalid argument exception is expected, because no inputs and no outputs were defined.
-    BOOST_CHECK_THROW(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(RefPooling2dFloat32Workload_Validate_WrongDimTensor)
+TEST_CASE("RefPooling2dFloat32Workload_Validate_WrongDimTensor")
 {
     armnn::TensorInfo inputTensorInfo;
     armnn::TensorInfo outputTensorInfo;
@@ -71,10 +71,10 @@
     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
 
     // Invalid argument exception is expected, input tensor has to be 4D.
-    BOOST_CHECK_THROW(RefPooling2dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefPooling2dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(SoftmaxQueueDescriptor_Validate_WrongInputHeight)
+TEST_CASE("SoftmaxQueueDescriptor_Validate_WrongInputHeight")
 {
     unsigned int inputHeight = 1;
     unsigned int inputWidth = 1;
@@ -102,10 +102,10 @@
     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
     //Invalid argument exception is expected, because height != 1.
-    BOOST_CHECK_THROW(RefSoftmaxWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefSoftmaxWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(FullyConnectedQueueDescriptor_Validate_RequiredDataMissing)
+TEST_CASE("FullyConnectedQueueDescriptor_Validate_RequiredDataMissing")
 {
     unsigned int inputWidth = 1;
     unsigned int inputHeight = 1;
@@ -149,11 +149,11 @@
 
     //Invalid argument exception is expected, because not all required fields have been provided.
     //In particular inputsData[0], outputsData[0] and weightsData can not be null.
-    BOOST_CHECK_THROW(RefFullyConnectedWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefFullyConnectedWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
 
-BOOST_AUTO_TEST_CASE(NormalizationQueueDescriptor_Validate_WrongInputHeight)
+TEST_CASE("NormalizationQueueDescriptor_Validate_WrongInputHeight")
 {
     constexpr unsigned int inputNum = 5;
     constexpr unsigned int inputHeight   = 32;
@@ -197,10 +197,10 @@
     invalidData.m_Parameters.m_K               = kappa;
 
     //Invalid argument exception is expected, because input height != output height.
-    BOOST_CHECK_THROW(RefNormalizationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefNormalizationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(SplitterQueueDescriptor_Validate_WrongWindow)
+TEST_CASE("SplitterQueueDescriptor_Validate_WrongWindow")
 {
     constexpr unsigned int inputNum = 1;
     constexpr unsigned int inputHeight   = 32;
@@ -233,16 +233,15 @@
     armnn::SplitterQueueDescriptor::ViewOrigin window(wOrigin);
     invalidData.m_ViewOrigins.push_back(window);
 
-    BOOST_TEST_INFO("Invalid argument exception is expected, because split window dimensionality does not "
-        "match input.");
-    BOOST_CHECK_THROW(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid argument exception is expected, because split window dimensionality does not match input.");
+    CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
     // Invalid, since window extends past the boundary of input tensor.
     std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
     armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
     invalidData.m_ViewOrigins[0] = window3;
-    BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight");
-    BOOST_CHECK_THROW(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight");
+    CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
 
     std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
@@ -253,12 +252,12 @@
     armnn::SplitterQueueDescriptor::ViewOrigin window5(wOrigin4);
     invalidData.m_ViewOrigins.push_back(window5);
 
-    BOOST_TEST_INFO("Invalid exception due to number of split windows not matching number of outputs.");
-    BOOST_CHECK_THROW(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid exception due to number of split windows not matching number of outputs.");
+    CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
 
-BOOST_AUTO_TEST_CASE(ConcatQueueDescriptor_Validate_WrongWindow)
+TEST_CASE("ConcatQueueDescriptor_Validate_WrongWindow")
 {
     constexpr unsigned int inputNum = 1;
     constexpr unsigned int inputChannels = 3;
@@ -291,16 +290,15 @@
     armnn::ConcatQueueDescriptor::ViewOrigin window(wOrigin);
     invalidData.m_ViewOrigins.push_back(window);
 
-    BOOST_TEST_INFO("Invalid argument exception is expected, because merge window dimensionality does not "
-        "match input.");
-    BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid argument exception is expected, because merge window dimensionality does not match input.");
+    CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
     // Invalid, since window extends past the boundary of output tensor.
     std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
     armnn::ConcatQueueDescriptor::ViewOrigin window3(wOrigin3);
     invalidData.m_ViewOrigins[0] = window3;
-    BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
-    BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
+    CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
 
     std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
@@ -311,11 +309,11 @@
     armnn::ConcatQueueDescriptor::ViewOrigin window5(wOrigin4);
     invalidData.m_ViewOrigins.push_back(window5);
 
-    BOOST_TEST_INFO("Invalid exception due to number of merge windows not matching number of inputs.");
-    BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid exception due to number of merge windows not matching number of inputs.");
+    CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputNumbers)
+TEST_CASE("AdditionQueueDescriptor_Validate_InputNumbers")
 {
     armnn::TensorInfo input1TensorInfo;
     armnn::TensorInfo input2TensorInfo;
@@ -336,20 +334,20 @@
     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
     // Too few inputs.
-    BOOST_CHECK_THROW(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
     AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
 
     // Correct.
-    BOOST_CHECK_NO_THROW(RefAdditionWorkload<>(invalidData, invalidInfo));
+    CHECK_NOTHROW(RefAdditionWorkload<>(invalidData, invalidInfo));
 
     AddInputToWorkload(invalidData, invalidInfo, input3TensorInfo, nullptr);
 
     // Too many inputs.
-    BOOST_CHECK_THROW(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputShapes)
+TEST_CASE("AdditionQueueDescriptor_Validate_InputShapes")
 {
     armnn::TensorInfo input1TensorInfo;
     armnn::TensorInfo input2TensorInfo;
@@ -371,7 +369,7 @@
         AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
         AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
-        BOOST_CHECK_THROW(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+        CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
     }
 
     // Output size not compatible with input sizes.
@@ -388,11 +386,11 @@
         AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
         // Output differs.
-        BOOST_CHECK_THROW(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+        CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
     }
 }
 
-BOOST_AUTO_TEST_CASE(MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch)
+TEST_CASE("MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch")
 {
     armnn::TensorInfo input0TensorInfo;
     armnn::TensorInfo input1TensorInfo;
@@ -423,7 +421,7 @@
         AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
         AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
 
-        BOOST_CHECK_THROW(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+        CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
     }
 
     // Checks dimension consistency for input and output tensors.
@@ -448,11 +446,11 @@
         AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
         AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
 
-        BOOST_CHECK_THROW(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+        CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
     }
 }
 
-BOOST_AUTO_TEST_CASE(ReshapeQueueDescriptor_Validate_MismatchingNumElements)
+TEST_CASE("ReshapeQueueDescriptor_Validate_MismatchingNumElements")
 {
     armnn::TensorInfo inputTensorInfo;
     armnn::TensorInfo outputTensorInfo;
@@ -471,11 +469,11 @@
     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
     // InvalidArgumentException is expected, because the number of elements don't match.
-    BOOST_CHECK_THROW(RefReshapeWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefReshapeWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
 
-BOOST_AUTO_TEST_CASE(LstmQueueDescriptor_Validate)
+TEST_CASE("LstmQueueDescriptor_Validate")
 {
     armnn::DataType dataType = armnn::DataType::Float32;
 
@@ -568,61 +566,61 @@
     data.m_Parameters.m_LayerNormEnabled = true;
 
     // check wrong number of outputs
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     AddOutputToWorkload(data, info, outputTensorInfo, nullptr);
 
     // check wrong cifg parameter configuration
     data.m_Parameters.m_CifgEnabled = true;
     armnn::TensorInfo scratchBufferTensorInfo2({batchSize, numUnits * 3}, dataType, qScale, qOffset);
     SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo2, nullptr);
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_CifgEnabled = false;
     SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo, nullptr);
 
     // check wrong inputGateBias configuration
     data.m_InputGateBias = nullptr;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_InputGateBias = &inputGateBiasTensor;
 
     // check inconsistant projection parameters
     data.m_Parameters.m_ProjectionEnabled = false;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_ProjectionEnabled = true;
     data.m_ProjectionWeights = nullptr;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_ProjectionWeights = &projectionWeightsTensor;
 
     // check missing input layer normalisation weights
     data.m_InputLayerNormWeights = nullptr;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
 
     // layer norm disabled but normalisation weights are present
     data.m_Parameters.m_LayerNormEnabled = false;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_LayerNormEnabled = true;
 
     // check invalid outputTensor shape
     armnn::TensorInfo incorrectOutputTensorInfo({batchSize, outputSize + 1}, dataType, qScale, qOffset);
     SetWorkloadOutput(data, info, 3, incorrectOutputTensorInfo, nullptr);
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     SetWorkloadOutput(data, info, 3, outputTensorInfo, nullptr);
 
     // check invalid cell clipping parameters
     data.m_Parameters.m_ClippingThresCell = -1.0f;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_ClippingThresCell = 0.0f;
 
     // check invalid projection clipping parameters
     data.m_Parameters.m_ClippingThresProj = -1.0f;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_ClippingThresProj = 0.0f;
 
     // check correct configuration
-    BOOST_CHECK_NO_THROW(data.Validate(info));
+    CHECK_NOTHROW(data.Validate(info));
 }
 
-BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
+TEST_CASE("BiasPerAxisQuantization_Validate")
 {
     constexpr unsigned int nInput  = 1u;
     constexpr unsigned int cInput  = 3u;
@@ -667,7 +665,7 @@
     ScopedTensorHandle biasHandle1(biasInfo1);
     queueDescriptor.m_Bias = &biasHandle1;
 
-    BOOST_CHECK_NO_THROW(queueDescriptor.Validate(workloadInfo));
+    CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
 
     // Test 2: wrong per-axis quantization values
     const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
@@ -676,7 +674,7 @@
     ScopedTensorHandle biasHandle2(biasInfo2);
     queueDescriptor.m_Bias = &biasHandle2;
 
-    BOOST_CHECK_NO_THROW(queueDescriptor.Validate(workloadInfo));
+    CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
 
     // Test 3: mismatched number of quantization scales
     const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
@@ -685,7 +683,7 @@
     ScopedTensorHandle biasHandle3(biasInfo3);
     queueDescriptor.m_Bias = &biasHandle3;
 
-    BOOST_CHECK_THROW(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
+    CHECK_THROWS_AS(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index 9720475..0539cd1 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -14,6 +14,8 @@
 
 #include <test/TensorHelpers.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -69,7 +71,7 @@
 
     std::cout.rdbuf(coutStreambuf);
 
-    BOOST_TEST(oss.str() == expectedStringOutput);
+    CHECK(oss.str() == expectedStringOutput);
 
     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
 
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index 143f9e0..2472c34 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -18,6 +18,8 @@
 
 #include <test/TensorHelpers.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -221,25 +223,25 @@
                                  expectedDetectionBoxes,
                                  outputBoxesHandle->GetShape(),
                                  detectionBoxesInfo.GetShape());
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     result = CompareTensors(actualDetectionClassesOutput,
                             expectedDetectionClasses,
                             classesHandle->GetShape(),
                             detectionClassesInfo.GetShape());
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     result = CompareTensors(actualDetectionScoresOutput,
                             expectedDetectionScores,
                             outputScoresHandle->GetShape(),
                             detectionScoresInfo.GetShape());
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     result = CompareTensors(actualNumDetectionOutput,
                             expectedNumDetections,
                             numDetectionHandle->GetShape(),
                             numDetectionInfo.GetShape());
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 }
 
 template<armnn::DataType QuantizedType, typename RawType = armnn::ResolveType<QuantizedType>>
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 11003a2..035c592 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -20,6 +20,7 @@
 
 #include <test/TensorHelpers.hpp>
 
+#include <doctest/doctest.h>
 namespace
 {
 
@@ -45,11 +46,11 @@
 
     // check shape and compare values
     auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape);
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     batchVecEncoder->Set(1.0f);
-    BOOST_TEST(batchVec[0] == 1.0f);
+    CHECK(batchVec[0] == 1.0f);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -72,11 +73,11 @@
 
     // check shape and compare values
     auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape);
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     outputEncoder->Set(1.0f);
-    BOOST_TEST(input[0] == 1.0f);
+    CHECK(input[0] == 1.0f);
 
 }
 
@@ -100,11 +101,11 @@
 
     // check shape and compare values
     auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape);
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     outputEncoder->Set(1.0f);
-    BOOST_TEST(input[0] == 1.0f);
+    CHECK(input[0] == 1.0f);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -129,11 +130,11 @@
 
     // check shape and compare values
     auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape);
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     batchVecEncoder->Set(1.0f);
-    BOOST_TEST(batchVec[0] == 1.0f);
+    CHECK(batchVec[0] == 1.0f);
 }
 
 // Lstm Layer tests:
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index cd77572..dae7483 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -20,7 +20,7 @@
 
 #include <test/TensorHelpers.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <string>
 #include <utility>
diff --git a/src/backends/cl/test/ClContextSerializerTests.cpp b/src/backends/cl/test/ClContextSerializerTests.cpp
index 1fc0fb9..1135e11 100644
--- a/src/backends/cl/test/ClContextSerializerTests.cpp
+++ b/src/backends/cl/test/ClContextSerializerTests.cpp
@@ -7,7 +7,7 @@
 
 #include <cl/test/ClContextControlFixture.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <fstream>
 
@@ -66,9 +66,7 @@
 
 } // anonymous namespace
 
-BOOST_FIXTURE_TEST_SUITE(ClContextSerializer, ClContextControlFixture)
-
-BOOST_AUTO_TEST_CASE(ClContextSerializerTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextSerializerTest")
 {
     // Get tmp directory and create blank file.
     fs::path filePath = armnnUtils::Filesystem::NamedTempFile("Armnn-CachedNetworkFileTest-TempFile.bin");
@@ -101,24 +99,24 @@
             *net1, backends, runtime->GetDeviceSpec(), optimizerOptions1);
     armnn::IOptimizedNetworkPtr optNet2 = armnn::Optimize(
             *net2, backends, runtime->GetDeviceSpec(), optimizerOptions2);
-    BOOST_CHECK(optNet1);
-    BOOST_CHECK(optNet2);
+    CHECK(optNet1);
+    CHECK(optNet2);
 
     // Cached file should be empty until net1 is loaded into runtime.
-    BOOST_TEST(fs::is_empty(filePathString));
+    CHECK(fs::is_empty(filePathString));
 
     // Load net1 into the runtime.
     armnn::NetworkId netId1;
-    BOOST_TEST(runtime->LoadNetwork(netId1, std::move(optNet1)) == armnn::Status::Success);
+    CHECK(runtime->LoadNetwork(netId1, std::move(optNet1)) == armnn::Status::Success);
 
     // File should now exist and not be empty. It has been serialized.
-    BOOST_TEST(fs::exists(filePathString));
+    CHECK(fs::exists(filePathString));
     std::vector<char> dataSerialized = ReadBinaryFile(filePathString);
-    BOOST_TEST(dataSerialized.size() != 0);
+    CHECK(dataSerialized.size() != 0);
 
     // Load net2 into the runtime using file and deserialize.
     armnn::NetworkId netId2;
-    BOOST_TEST(runtime->LoadNetwork(netId2, std::move(optNet2)) == armnn::Status::Success);
+    CHECK(runtime->LoadNetwork(netId2, std::move(optNet2)) == armnn::Status::Success);
 
     // Run inference and get output data.
     std::vector<uint8_t> outputData1(5);
@@ -128,11 +126,8 @@
     RunInference(netId2, runtime, outputData2);
 
     // Compare outputs from both networks.
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData1.begin(), outputData1.end(),
-                                  outputData2.begin(), outputData2.end());
+    CHECK(std::equal(outputData1.begin(), outputData1.end(), outputData2.begin(), outputData2.end()));
 
     // Remove temp file created.
     fs::remove(filePath);
 }
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 7602cbb..4e40328 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -21,14 +21,16 @@
 #include <cl/workloads/ClWorkloads.hpp>
 #include <cl/workloads/ClWorkloadUtils.hpp>
 
+#include <doctest/doctest.h>
+
 armnn::PredicateResult CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
                                                    std::initializer_list<unsigned int> expectedDimensions)
 {
     return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
 }
 
-BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture)
-
+TEST_SUITE("CreateWorkloadCl")
+{
 template <armnn::DataType DataType>
 static void ClCreateActivationWorkloadTest()
 {
@@ -44,18 +46,18 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     predResult = CompareIClTensorHandleShape(outputHandle, {1, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateActivationFloatWorkload")
 {
     ClCreateActivationWorkloadTest<armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateActivationFloat16Workload")
 {
     ClCreateActivationWorkloadTest<armnn::DataType::Float16>();
 }
@@ -78,14 +80,14 @@
     auto inputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle1, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(inputHandle2, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloatWorkload")
 {
     ClCreateElementwiseWorkloadTest<ClAdditionWorkload,
                                     AdditionQueueDescriptor,
@@ -93,7 +95,7 @@
                                     armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloat16Workload")
 {
     ClCreateElementwiseWorkloadTest<ClAdditionWorkload,
                                     AdditionQueueDescriptor,
@@ -101,7 +103,7 @@
                                     armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloatWorkload")
 {
     ClCreateElementwiseWorkloadTest<ClSubtractionWorkload,
                                     SubtractionQueueDescriptor,
@@ -109,7 +111,7 @@
                                     armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloat16Workload")
 {
     ClCreateElementwiseWorkloadTest<ClSubtractionWorkload,
                                     SubtractionQueueDescriptor,
@@ -117,7 +119,7 @@
                                     armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloatWorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
                                     MultiplicationQueueDescriptor,
@@ -125,7 +127,7 @@
                                     armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloat16WorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
                                     MultiplicationQueueDescriptor,
@@ -133,7 +135,7 @@
                                     armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationUint8WorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
                                     MultiplicationQueueDescriptor,
@@ -141,7 +143,7 @@
                                     armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloatWorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClDivisionWorkload,
                                     DivisionQueueDescriptor,
@@ -149,7 +151,7 @@
                                     armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloat16WorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClDivisionWorkload,
                                     DivisionQueueDescriptor,
@@ -174,13 +176,13 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateRsqrtFloat32WorkloadTest")
 {
     ClCreateElementwiseUnaryWorkloadTest<ClRsqrtWorkload, RsqrtQueueDescriptor, armnn::DataType::Float32>(
         UnaryOperation::Rsqrt);
@@ -206,43 +208,43 @@
     {
         case DataLayout::NHWC:
             predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             break;
         default: // NCHW
             predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     }
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationFloatNchwWorkload")
 {
     ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
                                            armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationFloat16NchwWorkload")
 {
     ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
                                            armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationFloatNhwcWorkload")
 {
     ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
                                            armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationNhwcFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationNhwcFloat16NhwcWorkload")
 {
     ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
                                            armnn::DataType::Float16>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvertFp16ToFp32Workload")
 {
     Graph graph;
     ClWorkloadFactory factory =
@@ -254,14 +256,14 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
-    BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
-    BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
+    CHECK((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
+    CHECK((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvertFp32ToFp16Workload")
 {
     Graph graph;
     ClWorkloadFactory factory =
@@ -274,11 +276,11 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
-    BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
-    BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
+    CHECK((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
+    CHECK((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
 }
 
 template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
@@ -301,31 +303,31 @@
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloatNchwWorkload")
 {
     ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloatNhwcWorkload")
 {
     ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloat16NchwWorkload")
 {
     ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloat16NhwcWorkload")
 {
     ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFastMathEnabledWorkload")
 {
     Graph graph;
 
@@ -353,7 +355,7 @@
     ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dClCompiledContextWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dClCompiledContextWorkload")
 {
     using namespace armnn;
 
@@ -434,7 +436,7 @@
 
 
     // Check built programs are empty in context
-    BOOST_TEST(clCompileContext.get_built_programs().empty());
+    CHECK(clCompileContext.get_built_programs().empty());
 
     auto workload = std::make_unique<ClConvolution2dWorkload>(queueDescriptor,
                                                               workloadInfo,
@@ -442,7 +444,7 @@
                                                               clCompileContext);
     ARMNN_ASSERT(workload != nullptr);
     // Check built programs are not empty in context
-    BOOST_TEST(!clCompileContext.get_built_programs().empty());
+    CHECK(!clCompileContext.get_built_programs().empty());
 }
 
 template <typename DepthwiseConvolutionWorkloadType, typename armnn::DataType DataType>
@@ -465,11 +467,11 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
                                                                : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDepthwiseConvolutionFloat32NhwcWorkload")
 {
     ClDepthwiseConvolutionWorkloadTest<ClDepthwiseConvolutionWorkload, DataType::Float32>(DataLayout::NHWC);
 }
@@ -488,22 +490,22 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDirectConvolution2dFloatWorkload")
 {
     ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDirectConvolution2dFloat16Workload")
 {
     ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDirectConvolution2dUint8Workload")
 {
     ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QAsymmU8>();
 }
@@ -523,18 +525,18 @@
     auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {3, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateFullyConnectedFloatWorkloadTest")
 {
     ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateFullyConnectedFloat16WorkloadTest")
 {
     ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float16>();
 }
@@ -558,26 +560,26 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({3, 5, 5, 1})
                                                                : std::initializer_list<unsigned int>({3, 1, 5, 5});
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat32NchwWorkload")
 {
     ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat16NchwWorkload")
 {
     ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat32NhwcWorkload")
 {
     ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat16NhwcWorkload")
 {
     ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
 }
@@ -601,26 +603,26 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloatNchwWorkload")
 {
     ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloatNhwcWorkload")
 {
     ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloat16NchwWorkload")
 {
     ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloat16NhwcWorkload")
 {
     ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NHWC);
 }
@@ -647,22 +649,22 @@
     auto alphaHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((alphaHandle->GetShape() == alphaShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((alphaHandle->GetShape() == alphaShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePreluFloat16Workload")
 {
     ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float16);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePreluFloatWorkload")
 {
     ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePreluUint8Workload")
 {
     ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
 }
@@ -682,22 +684,22 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {1, 4});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateReshapeFloatWorkload")
 {
     ClCreateReshapeWorkloadTest<armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateReshapeFloat16Workload")
 {
     ClCreateReshapeWorkloadTest<armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateReshapeUint8Workload")
 {
     ClCreateReshapeWorkloadTest<armnn::DataType::QAsymmU8>();
 }
@@ -729,28 +731,28 @@
     }
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxFloat32WorkloadTest")
 {
     ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxFloat16WorkloadTest")
 {
     ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmU8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxQAsymmU8Workload")
 {
     ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmS8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxQAsymmS8Workload")
 {
     ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::QAsymmS8>();
 }
@@ -768,27 +770,27 @@
     SplitterQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {5, 7, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     auto outputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
     predResult = CompareIClTensorHandleShape(outputHandle1, {2, 7, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     auto outputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
     predResult = CompareIClTensorHandleShape(outputHandle2, {2, 7, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     auto outputHandle0 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     predResult = CompareIClTensorHandleShape(outputHandle0, {1, 7, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterFloatWorkload")
 {
     ClSplitterWorkloadTest<armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterFloat16Workload")
 {
     ClSplitterWorkloadTest<armnn::DataType::Float16>();
 }
@@ -819,35 +821,35 @@
     armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
     armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(mIn0);
-    BOOST_TEST(mIn1);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(mIn0);
+    CHECK(mIn1);
 
     //Fliped order of inputs/outputs.
     bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 
 
     //Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor.
     bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent())
                                     && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent());
 
-    BOOST_TEST(validSubTensorParents);
+    CHECK(validSubTensorParents);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterConcatFloatWorkload")
 {
     ClSplitterConcatTest<armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterConcatFloat16Workload")
 {
     ClSplitterConcatTest<armnn::DataType::Float16>();
 }
 
 
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSingleOutputMultipleInputs")
 {
     // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
     // We create a splitter with two outputs. That each of those outputs is used by two different activation layers.
@@ -875,24 +877,24 @@
     armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
 
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(activ0_0Im);
-    BOOST_TEST(activ0_1Im);
-    BOOST_TEST(activ1_0Im);
-    BOOST_TEST(activ1_1Im);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(activ0_0Im);
+    CHECK(activ0_1Im);
+    CHECK(activ1_0Im);
+    CHECK(activ1_1Im);
 
     bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
                              (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
 #if defined(ARMNNREF_ENABLED)
 
 // This test unit needs the reference backend, it's not available if the reference backend is not built
 
-BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMemCopyWorkloadsCl")
 {
     ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
@@ -922,26 +924,26 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
                                                                : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloatNchwWorkload")
 {
     ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloatNhwcWorkload")
 {
     ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloat16NchwWorkload")
 {
     ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloat16NhwcWorkload")
 {
     ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
 }
@@ -961,12 +963,12 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat32WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateLogSoftmaxFloat32WorkloadTest")
 {
     ClCreateLogSoftmaxWorkloadTest<ClLogSoftmaxWorkload, armnn::DataType::Float32>();
 }
@@ -984,12 +986,12 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 2});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {2, 4});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateLSTMWorkloadFloatWorkload")
 {
     ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
 }
@@ -1013,44 +1015,44 @@
     {
         case DataLayout::NHWC:
             predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             break;
         default: // DataLayout::NCHW
             predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     }
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat32NchwWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat16NchwWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeUint8NchwWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat32NhwcWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat16NhwcWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeUint8NhwcWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
@@ -1071,22 +1073,22 @@
 
     // The first dimension (batch size) in both input and output is singular thus it has been reduced by ACL.
     auto predResult = CompareIClTensorHandleShape(inputHandle, {  1, 3, 7, 4 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, { 1, 4 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateMeanFloat32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMeanFloat32Workload")
 {
     ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMeanFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMeanFloat16Workload")
 {
     ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMeanUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMeanUint8Workload")
 {
     ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QAsymmU8>();
 }
@@ -1107,39 +1109,39 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, outputShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim0Float32Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim1Float32Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim3Float32Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim0Uint8Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim1Uint8Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim3Uint8Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
 }
@@ -1158,27 +1160,27 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthFloat32Workload")
 {
     ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthFloat16Workload")
 {
     ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthQAsymm8Workload")
 {
     ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthQSymm16Workload")
 {
     ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
 }
@@ -1206,24 +1208,24 @@
     {
         auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
         auto predResult1 = CompareIClTensorHandleShape(inputHandle, inputShape);
-        BOOST_TEST(predResult1.m_Result, predResult1.m_Message.str());
+        CHECK_MESSAGE(predResult1.m_Result, predResult1.m_Message.str());
     }
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult2 = CompareIClTensorHandleShape(outputHandle, outputShape);
-    BOOST_TEST(predResult2.m_Result, predResult2.m_Message.str());
+    CHECK_MESSAGE(predResult2.m_Result, predResult2.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateStackFloat32Workload")
 {
     ClCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateStackFloat16Workload")
 {
     ClCreateStackWorkloadTest<armnn::DataType::Float16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateStackUint8Workload")
 {
     ClCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
@@ -1239,19 +1241,19 @@
     QLstmQueueDescriptor queueDescriptor = workload->GetData();
 
     IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+    CHECK((inputHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
 
     IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
-    BOOST_TEST((outputHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+    CHECK((outputHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQLstmWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateQLstmWorkloadTest")
 {
     ClCreateQLstmWorkloadTest<ClQLstmWorkload>();
 }
@@ -1270,29 +1272,29 @@
     QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
 
     IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2})));
-    BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((inputHandle->GetShape() == TensorShape({2, 2})));
+    CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 
     IAclTensorHandle* cellStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
-    BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateInHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[2]);
-    BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((outputStateInHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 
     IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQuantizedLstmWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateQuantizedLstmWorkload")
 {
     ClCreateQuantizedLstmWorkloadTest<ClQuantizedLstmWorkload>();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index edee368..9e0137e 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -21,14 +21,14 @@
 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
 #include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(ClEndToEnd)
-
-std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::GpuAcc};
+TEST_SUITE("ClEndToEnd")
+{
+std::vector<armnn::BackendId> clDefaultBackends = {armnn::Compute::GpuAcc};
 
 // Abs
-BOOST_AUTO_TEST_CASE(ClAbsEndToEndTestFloat32)
+TEST_CASE("ClAbsEndToEndTestFloat32")
 {
     std::vector<float> expectedOutput =
     {
@@ -36,482 +36,482 @@
         3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
     };
 
-    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(clDefaultBackends,
                                                              UnaryOperation::Abs,
                                                              expectedOutput);
 }
 
 // Constant
-BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32)
+TEST_CASE("ConstantUsage_Cl_Float32")
 {
-    ConstantUsageFloat32Test(defaultBackends);
+    ConstantUsageFloat32Test(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Test)
+TEST_CASE("ClConcatEndToEndDim0Test")
 {
-    ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Uint8Test)
+TEST_CASE("ClConcatEndToEndDim0Uint8Test")
 {
-    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test)
+TEST_CASE("ClConcatEndToEndDim1Test")
 {
-    ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Uint8Test)
+TEST_CASE("ClConcatEndToEndDim1Uint8Test")
 {
-    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test)
+TEST_CASE("ClConcatEndToEndDim3Test")
 {
-    ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Uint8Test)
+TEST_CASE("ClConcatEndToEndDim3Uint8Test")
 {
-    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
 // DepthToSpace
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat32)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat32")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::Float32>(clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::Float16>(clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
+TEST_CASE("DephtToSpaceEndToEndNchwUint8")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
+TEST_CASE("DephtToSpaceEndToEndNchwInt16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat32")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::Float32>(clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::Float16>(clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
+TEST_CASE("DephtToSpaceEndToEndNhwcUint8")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
+TEST_CASE("DephtToSpaceEndToEndNhwcInt16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Dequantize
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+TEST_CASE("DequantizeEndToEndSimpleTest")
 {
-    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+TEST_CASE("DequantizeEndToEndOffsetTest")
 {
-    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
+    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClStridedSliceInvalidSliceEndToEndTest)
+TEST_CASE("ClStridedSliceInvalidSliceEndToEndTest")
 {
-    StridedSliceInvalidSliceEndToEndTest(defaultBackends);
+    StridedSliceInvalidSliceEndToEndTest(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClEluEndToEndTestFloat32)
+TEST_CASE("ClEluEndToEndTestFloat32")
 {
-    EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+    EluEndToEndTest<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClEluEndToEndTestFloat16)
+TEST_CASE("ClEluEndToEndTestFloat16")
 {
-    EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+    EluEndToEndTest<armnn::DataType::Float16>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest)
+TEST_CASE("ClGreaterSimpleEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::Float32>(clDefaultBackends,
                                                        ComparisonOperation::Greater,
                                                        expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndUint8Test)
+TEST_CASE("ClGreaterSimpleEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends,
                                                                ComparisonOperation::Greater,
                                                                expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(ClGreaterBroadcastEndToEndTest)
+TEST_CASE("ClGreaterBroadcastEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(clDefaultBackends,
                                                           ComparisonOperation::Greater,
                                                           expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(ClGreaterBroadcastEndToEndUint8Test)
+TEST_CASE("ClGreaterBroadcastEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends,
                                                                   ComparisonOperation::Greater,
                                                                   expectedOutput);
 }
 
 // HardSwish
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestFloat32)
+TEST_CASE("ClHardSwishEndToEndTestFloat32")
 {
-    HardSwishEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestFloat16)
+TEST_CASE("ClHardSwishEndToEndTestFloat16")
 {
-    HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::Float16>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestQAsymmS8)
+TEST_CASE("ClHardSwishEndToEndTestQAsymmS8")
 {
-    HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestQAsymmU8)
+TEST_CASE("ClHardSwishEndToEndTestQAsymmU8")
 {
-    HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestQSymmS16)
+TEST_CASE("ClHardSwishEndToEndTestQSymmS16")
 {
-    HardSwishEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QSymmS16>(clDefaultBackends);
 }
 
 // InstanceNormalization
-BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNhwcEndToEndTest1)
+TEST_CASE("ClInstanceNormalizationNhwcEndToEndTest1")
 {
-    InstanceNormalizationNhwcEndToEndTest1(defaultBackends);
+    InstanceNormalizationNhwcEndToEndTest1(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNchwEndToEndTest1)
+TEST_CASE("ClInstanceNormalizationNchwEndToEndTest1")
 {
-    InstanceNormalizationNchwEndToEndTest1(defaultBackends);
+    InstanceNormalizationNchwEndToEndTest1(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNhwcEndToEndTest2)
+TEST_CASE("ClInstanceNormalizationNhwcEndToEndTest2")
 {
-    InstanceNormalizationNhwcEndToEndTest2(defaultBackends);
+    InstanceNormalizationNhwcEndToEndTest2(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNchwEndToEndTest2)
+TEST_CASE("ClInstanceNormalizationNchwEndToEndTest2")
 {
-    InstanceNormalizationNchwEndToEndTest2(defaultBackends);
+    InstanceNormalizationNchwEndToEndTest2(clDefaultBackends);
 }
 
 // Fill
-BOOST_AUTO_TEST_CASE(ClFillEndToEndTest)
+TEST_CASE("ClFillEndToEndTest")
 {
-    FillEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTestFloat16)
+TEST_CASE("RefFillEndToEndTestFloat16")
 {
-    FillEndToEnd<armnn::DataType::Float16>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Float16>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClFillEndToEndTestInt32)
+TEST_CASE("ClFillEndToEndTestInt32")
 {
-    FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Signed32>(clDefaultBackends);
 }
 
 // Prelu
-BOOST_AUTO_TEST_CASE(ClPreluEndToEndFloat32Test)
+TEST_CASE("ClPreluEndToEndFloat32Test")
 {
-    PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
+    PreluEndToEndNegativeTest<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClPreluEndToEndTestUint8)
+TEST_CASE("ClPreluEndToEndTestUint8")
 {
-    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
+    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSpaceToDepthNhwcEndToEndTest1)
+TEST_CASE("ClSpaceToDepthNhwcEndToEndTest1")
 {
-    SpaceToDepthNhwcEndToEndTest1(defaultBackends);
+    SpaceToDepthNhwcEndToEndTest1(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSpaceToDepthNchwEndToEndTest1)
+TEST_CASE("ClSpaceToDepthNchwEndToEndTest1")
 {
-    SpaceToDepthNchwEndToEndTest1(defaultBackends);
+    SpaceToDepthNchwEndToEndTest1(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSpaceToDepthNhwcEndToEndTest2)
+TEST_CASE("ClSpaceToDepthNhwcEndToEndTest2")
 {
-    SpaceToDepthNhwcEndToEndTest2(defaultBackends);
+    SpaceToDepthNhwcEndToEndTest2(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSpaceToDepthNchwEndToEndTest2)
+TEST_CASE("ClSpaceToDepthNchwEndToEndTest2")
 {
-    SpaceToDepthNchwEndToEndTest2(defaultBackends);
+    SpaceToDepthNchwEndToEndTest2(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndTest)
+TEST_CASE("ClSplitter1dEndToEndTest")
 {
-    Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndUint8Test)
+TEST_CASE("ClSplitter1dEndToEndUint8Test")
 {
-    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndTest)
+TEST_CASE("ClSplitter2dDim0EndToEndTest")
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter2dDim1EndToEndTest)
+TEST_CASE("ClSplitter2dDim1EndToEndTest")
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndUint8Test)
+TEST_CASE("ClSplitter2dDim0EndToEndUint8Test")
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter2dDim1EndToEndUint8Test)
+TEST_CASE("ClSplitter2dDim1EndToEndUint8Test")
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndTest)
+TEST_CASE("ClSplitter3dDim0EndToEndTest")
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim1EndToEndTest)
+TEST_CASE("ClSplitter3dDim1EndToEndTest")
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim2EndToEndTest)
+TEST_CASE("ClSplitter3dDim2EndToEndTest")
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndUint8Test)
+TEST_CASE("ClSplitter3dDim0EndToEndUint8Test")
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim1EndToEndUint8Test)
+TEST_CASE("ClSplitter3dDim1EndToEndUint8Test")
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim2EndToEndUint8Test)
+TEST_CASE("ClSplitter3dDim2EndToEndUint8Test")
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndTest)
+TEST_CASE("ClSplitter4dDim0EndToEndTest")
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim1EndToEndTest)
+TEST_CASE("ClSplitter4dDim1EndToEndTest")
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim2EndToEndTest)
+TEST_CASE("ClSplitter4dDim2EndToEndTest")
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim3EndToEndTest)
+TEST_CASE("ClSplitter4dDim3EndToEndTest")
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndUint8Test)
+TEST_CASE("ClSplitter4dDim0EndToEndUint8Test")
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim1EndToEndUint8Test)
+TEST_CASE("ClSplitter4dDim1EndToEndUint8Test")
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim2EndToEndUint8Test)
+TEST_CASE("ClSplitter4dDim2EndToEndUint8Test")
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim3EndToEndUint8Test)
+TEST_CASE("ClSplitter4dDim3EndToEndUint8Test")
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
 // TransposeConvolution2d
-BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndFloatNchwTest)
+TEST_CASE("ClTransposeConvolution2dEndToEndFloatNchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
-        defaultBackends, armnn::DataLayout::NCHW);
+        clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NchwTest)
+TEST_CASE("ClTransposeConvolution2dEndToEndUint8NchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-        defaultBackends, armnn::DataLayout::NCHW);
+        clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndFloatNhwcTest)
+TEST_CASE("ClTransposeConvolution2dEndToEndFloatNhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
-        defaultBackends, armnn::DataLayout::NHWC);
+        clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NhwcTest)
+TEST_CASE("ClTransposeConvolution2dEndToEndUint8NhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-        defaultBackends, armnn::DataLayout::NHWC);
+        clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(ClQuantizedLstmEndToEndTest)
+TEST_CASE("ClQuantizedLstmEndToEndTest")
 {
-    QuantizedLstmEndToEnd(defaultBackends);
+    QuantizedLstmEndToEnd(clDefaultBackends);
 }
 
 // ArgMinMax
-BOOST_AUTO_TEST_CASE(ClArgMaxSimpleTest)
+TEST_CASE("ClArgMaxSimpleTest")
 {
-    ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinSimpleTest)
+TEST_CASE("ClArgMinSimpleTest")
 {
-    ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis0Test)
+TEST_CASE("ClArgMaxAxis0Test")
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis0Test)
+TEST_CASE("ClArgMinAxis0Test")
 {
-    ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis1Test)
+TEST_CASE("ClArgMaxAxis1Test")
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis1Test)
+TEST_CASE("ClArgMinAxis1Test")
 {
-    ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis2Test)
+TEST_CASE("ClArgMaxAxis2Test")
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis2Test)
+TEST_CASE("ClArgMinAxis2Test")
 {
-    ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis3Test)
+TEST_CASE("ClArgMaxAxis3Test")
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis3Test)
+TEST_CASE("ClArgMinAxis3Test")
 {
-    ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxSimpleTestQAsymmU8)
+TEST_CASE("ClArgMaxSimpleTestQAsymmU8")
 {
-    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinSimpleTestQAsymmU8)
+TEST_CASE("ClArgMinSimpleTestQAsymmU8")
 {
-    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis0TestQAsymmU8)
+TEST_CASE("ClArgMaxAxis0TestQAsymmU8")
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis0TestQAsymmU8)
+TEST_CASE("ClArgMinAxis0TestQAsymmU8")
 {
-    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis1TestQAsymmU8)
+TEST_CASE("ClArgMaxAxis1TestQAsymmU8")
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis1TestQAsymmU8)
+TEST_CASE("ClArgMinAxis1TestQAsymmU8")
 {
-    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis2TestQAsymmU8)
+TEST_CASE("ClArgMaxAxis2TestQAsymmU8")
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis2TestQAsymmU8)
+TEST_CASE("ClArgMinAxis2TestQAsymmU8")
 {
-    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis3TestQAsymmU8)
+TEST_CASE("ClArgMaxAxis3TestQAsymmU8")
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis3TestQAsymmU8)
+TEST_CASE("ClArgMinAxis3TestQAsymmU8")
 {
-    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClQLstmEndToEndTest)
+TEST_CASE("ClQLstmEndToEndTest")
 {
-    QLstmEndToEnd(defaultBackends);
+    QLstmEndToEnd(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 183b8ca..7721206 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -7,11 +7,11 @@
 
 #include <test/GraphUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(ClFallback)
-
-BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackToNeon)
+TEST_SUITE("ClFallback")
+{
+TEST_CASE("ClImportEnabledFallbackToNeon")
 {
     using namespace armnn;
 
@@ -62,18 +62,18 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::CpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::CpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -109,14 +109,14 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData0 = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr0 = inputData0.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr0, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr0, space));
 
     auto* intputPtr0 = reinterpret_cast<float*>(alignedInputPtr0);
     std::copy(inputValue0.begin(), inputValue0.end(), intputPtr0);
 
     auto inputData1 = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr1 = inputData1.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr1, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr1, space));
 
     auto* intputPtr1 = reinterpret_cast<float*>(alignedInputPtr1);
     std::copy(inputValue1.begin(), inputValue1.end(), intputPtr1);
@@ -145,19 +145,19 @@
 
     // Executed Subtraction using CpuAcc
     std::size_t found = dump.find("NeonSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 
     runtime->UnloadNetwork(netId);
 }
 
-BOOST_AUTO_TEST_CASE(ClImportDisabledFallbackToNeon)
+TEST_CASE("ClImportDisabledFallbackToNeon")
 {
     using namespace armnn;
 
@@ -207,18 +207,18 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::CpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::CpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -269,17 +269,17 @@
 
     // Executed Subtraction using CpuAcc
     std::size_t found = dump.find("NeonSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackSubgraphToNeon)
+TEST_CASE("ClImportEnabledFallbackSubgraphToNeon")
 {
     using namespace armnn;
 
@@ -342,21 +342,21 @@
     armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
-    BOOST_TEST(CheckOrder(graph, layer6, layer7));
-    BOOST_TEST(CheckOrder(graph, layer7, layer8));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer6, layer7));
+    CHECK(CheckOrder(graph, layer7, layer8));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
-    BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer6->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::CpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::CpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -388,14 +388,14 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData0 = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr0 = inputData0.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr0, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr0, space));
 
     auto* intputPtr0 = reinterpret_cast<float*>(alignedInputPtr0);
     std::copy(inputValue0.begin(), inputValue0.end(), intputPtr0);
 
     auto inputData1 = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr1 = inputData1.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr1, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr1, space));
 
     auto* intputPtr1 = reinterpret_cast<float*>(alignedInputPtr1);
     std::copy(inputValue1.begin(), inputValue1.end(), intputPtr1);
@@ -424,23 +424,23 @@
 
     // Executed Subtraction using CpuAcc
     std::size_t found = dump.find("NeonSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Correctly switch back to GpuAcc
     found = dump.find("ClPooling2dWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 
     runtime->UnloadNetwork(netId);
 }
 
-BOOST_AUTO_TEST_CASE(ClImportDisableFallbackSubgraphToNeon)
+TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
 {
     using namespace armnn;
 
@@ -498,21 +498,21 @@
     armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
-    BOOST_TEST(CheckOrder(graph, layer6, layer7));
-    BOOST_TEST(CheckOrder(graph, layer7, layer8));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer6, layer7));
+    CHECK(CheckOrder(graph, layer7, layer8));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
-    BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer6->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::CpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::CpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -560,18 +560,18 @@
 
     // Executed Subtraction using CpuAcc
     std::size_t found = dump.find("NeonSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Correctly switch back to GpuAcc
     found = dump.find("ClPooling2dWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
index 0c6a9c6..fee40fd 100644
--- a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
@@ -3,14 +3,17 @@
 // SPDX-License-Identifier: MIT
 //
 
+#include <armnn/utility/Assert.hpp>
+
 #include <cl/ClImportTensorHandleFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(ClImportTensorHandleFactoryTests)
+TEST_SUITE("ClImportTensorHandleFactoryTests")
+{
 using namespace armnn;
 
-BOOST_AUTO_TEST_CASE(ImportTensorFactoryAskedToCreateManagedTensorThrowsException)
+TEST_CASE("ImportTensorFactoryAskedToCreateManagedTensorThrowsException")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -18,11 +21,11 @@
     TensorInfo tensorInfo;
     // This factory is designed to import the memory of tensors. Asking for a handle that requires
     // a memory manager should result in an exception.
-    BOOST_REQUIRE_THROW(factory.CreateTensorHandle(tensorInfo, true), InvalidArgumentException);
-    BOOST_REQUIRE_THROW(factory.CreateTensorHandle(tensorInfo, DataLayout::NCHW, true), InvalidArgumentException);
+    REQUIRE_THROWS_AS(factory.CreateTensorHandle(tensorInfo, true), InvalidArgumentException);
+    REQUIRE_THROWS_AS(factory.CreateTensorHandle(tensorInfo, DataLayout::NCHW, true), InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(ImportTensorFactoryCreateMallocTensorHandle)
+TEST_CASE("ImportTensorFactoryCreateMallocTensorHandle")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -32,24 +35,24 @@
     // Start with the TensorInfo factory method. Create an import tensor handle and verify the data is
     // passed through correctly.
     auto tensorHandle = factory.CreateTensorHandle(tensorInfo);
-    BOOST_ASSERT(tensorHandle);
-    BOOST_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
-    BOOST_ASSERT(tensorHandle->GetShape() == tensorShape);
+    ARMNN_ASSERT(tensorHandle);
+    ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+    ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
 
     // Same method but explicitly specifying isManaged = false.
     tensorHandle = factory.CreateTensorHandle(tensorInfo, false);
-    BOOST_CHECK(tensorHandle);
-    BOOST_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
-    BOOST_ASSERT(tensorHandle->GetShape() == tensorShape);
+    CHECK(tensorHandle);
+    ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+    ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
 
     // Now try TensorInfo and DataLayout factory method.
     tensorHandle = factory.CreateTensorHandle(tensorInfo, DataLayout::NHWC);
-    BOOST_CHECK(tensorHandle);
-    BOOST_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
-    BOOST_ASSERT(tensorHandle->GetShape() == tensorShape);
+    CHECK(tensorHandle);
+    ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+    ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtensorOfImportTensor)
+TEST_CASE("CreateSubtensorOfImportTensor")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -63,12 +66,12 @@
     // Starting at an offset of 1x1.
     uint32_t origin[4] = { 1, 1, 0, 0 };
     auto subTensor     = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
-    BOOST_CHECK(subTensor);
-    BOOST_ASSERT(subTensor->GetShape() == subTensorShape);
-    BOOST_ASSERT(subTensor->GetParent() == tensorHandle.get());
+    CHECK(subTensor);
+    ARMNN_ASSERT(subTensor->GetShape() == subTensorShape);
+    ARMNN_ASSERT(subTensor->GetParent() == tensorHandle.get());
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtensorNonZeroXYIsInvalid)
+TEST_CASE("CreateSubtensorNonZeroXYIsInvalid")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -84,10 +87,10 @@
     uint32_t origin[4] = { 0, 0, 1, 1 };
     auto subTensor     = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
     // We expect a nullptr.
-    BOOST_ASSERT(subTensor == nullptr);
+    ARMNN_ASSERT(subTensor == nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtensorXYMustMatchParent)
+TEST_CASE("CreateSubtensorXYMustMatchParent")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -102,10 +105,10 @@
     uint32_t origin[4] = { 1, 1, 0, 0 };
     auto subTensor     = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
     // We expect a nullptr.
-    BOOST_ASSERT(subTensor == nullptr);
+    ARMNN_ASSERT(subTensor == nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtensorMustBeSmallerThanParent)
+TEST_CASE("CreateSubtensorMustBeSmallerThanParent")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -119,7 +122,7 @@
     uint32_t origin[4] = { 1, 1, 0, 0 };
     // This should result in a nullptr.
     auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
-    BOOST_ASSERT(subTensor == nullptr);
+    ARMNN_ASSERT(subTensor == nullptr);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 3c8bd67..931729a 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -9,16 +9,17 @@
 #include <cl/ClImportTensorHandleFactory.hpp>
 #include <cl/test/ClContextControlFixture.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
+
 
 #include <armnn/IRuntime.hpp>
 #include <armnn/INetwork.hpp>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(ClImportTensorHandleTests)
-
-BOOST_FIXTURE_TEST_CASE(ClMallocImport, ClContextControlFixture)
+TEST_SUITE("ClImportTensorHandleTests")
+{
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClMallocImport")
 {
     ClImportTensorHandleFactory handleFactory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
                                               static_cast<MemorySourceFlags>(MemorySource::Malloc));
@@ -44,10 +45,10 @@
     size_t space = totalBytes + alignment + alignment;
     auto testData = std::make_unique<uint8_t[]>(space);
     void* alignedPtr = testData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedPtr, space));
 
     // Import memory
-    BOOST_CHECK(handle->Import(alignedPtr, armnn::MemorySource::Malloc));
+    CHECK(handle->Import(alignedPtr, armnn::MemorySource::Malloc));
 
     // Input with negative values
     auto* typedPtr = reinterpret_cast<float*>(alignedPtr);
@@ -60,11 +61,11 @@
     // Validate result by checking that the output has no negative values
     for(unsigned int i = 0; i < numElements; ++i)
     {
-        BOOST_TEST(typedPtr[i] >= 0);
+        CHECK(typedPtr[i] >= 0);
     }
 }
 
-BOOST_FIXTURE_TEST_CASE(ClIncorrectMemorySourceImport, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClIncorrectMemorySourceImport")
 {
     ClImportTensorHandleFactory handleFactory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
                                               static_cast<MemorySourceFlags>(MemorySource::Malloc));
@@ -84,13 +85,13 @@
     size_t space = totalBytes + alignment + alignment;
     auto testData = std::make_unique<uint8_t[]>(space);
     void* alignedPtr = testData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedPtr, space));
 
     // Import memory
-    BOOST_CHECK_THROW(handle->Import(alignedPtr, armnn::MemorySource::Undefined), MemoryImportException);
+    CHECK_THROWS_AS(handle->Import(alignedPtr, armnn::MemorySource::Undefined), MemoryImportException);
 }
 
-BOOST_FIXTURE_TEST_CASE(ClInvalidMemorySourceImport, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClInvalidMemorySourceImport")
 {
     MemorySource invalidMemSource = static_cast<MemorySource>(256);
     ClImportTensorHandleFactory handleFactory(static_cast<MemorySourceFlags>(invalidMemSource),
@@ -108,10 +109,10 @@
     };
 
     // Import non-support memory
-    BOOST_CHECK_THROW(handle->Import(inputData.data(), invalidMemSource), MemoryImportException);
+    CHECK_THROWS_AS(handle->Import(inputData.data(), invalidMemSource), MemoryImportException);
 }
 
-BOOST_FIXTURE_TEST_CASE(ClImportEndToEnd, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd")
 {
     // Create runtime in which test will run
     IRuntime::CreationOptions options;
@@ -143,7 +144,7 @@
     optOptions.m_ImportEnabled = true;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -158,7 +159,7 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr = inputData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
 
     // Input with negative values
     auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
@@ -166,7 +167,7 @@
 
     auto outputData = std::make_unique<uint8_t[]>(space);
     void* alignedOutputPtr = outputData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedOutputPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedOutputPtr, space));
     auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr);
     std::fill_n(outputPtr, numElements, -10.0f);
 
@@ -192,26 +193,26 @@
 
     // Contains ActivationWorkload
     std::size_t found = dump.find("ActivationWorkload");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     runtime->UnloadNetwork(netId);
 
     // Check output is as expected
     // Validate result by checking that the output has no negative values
     auto* outputResult = reinterpret_cast<float*>(alignedOutputPtr);
-    BOOST_TEST(outputResult);
+    CHECK(outputResult);
     for(unsigned int i = 0; i < numElements; ++i)
     {
-        BOOST_TEST(outputResult[i] >= 0);
+        CHECK(outputResult[i] >= 0);
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClJsonPrinterTests.cpp b/src/backends/cl/test/ClJsonPrinterTests.cpp
index d188a8e..2c24a53 100644
--- a/src/backends/cl/test/ClJsonPrinterTests.cpp
+++ b/src/backends/cl/test/ClJsonPrinterTests.cpp
@@ -8,16 +8,13 @@
 #include <cl/test/ClContextControlFixture.hpp>
 #include <backendsCommon/test/JsonPrinterTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
-BOOST_FIXTURE_TEST_SUITE(ClJsonPrinter, ClProfilingContextControlFixture)
-
-BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterGpuAccTest)
+TEST_CASE_FIXTURE(ClProfilingContextControlFixture, "SoftmaxProfilerJsonPrinterGpuAccTest")
 {
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     RunSoftmaxProfilerJsonPrinterTest(backends);
-}
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp
index 794a45f..b18da11 100644
--- a/src/backends/cl/test/ClLayerSupportTests.cpp
+++ b/src/backends/cl/test/ClLayerSupportTests.cpp
@@ -16,176 +16,176 @@
 #include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
 #include <backendsCommon/test/LayerTests.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(ClLayerSupport)
-
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat16Cl, ClContextControlFixture)
+TEST_SUITE("ClLayerSupport")
+{
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedFloat16Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::Float16>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat32Cl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedFloat32Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::Float32>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedQAsymmU8Cl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedQAsymmU8Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedQAsymmS8Cl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedQAsymmS8Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QAsymmS8>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedQSymmS8Cl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedQSymmS8Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp16ToFp32SupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp16ToFp32SupportedFp32InputCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float16");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Input should be Float16");
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp16ToFp32SupportedFp16OutputCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float32");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Output should be Float32");
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp32ToFp16SupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp32ToFp16SupportedFp16InputCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float32");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Input should be Float32");
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp32ToFp16SupportedFp32OutputCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float16");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Output should be Float16");
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLogicalBinarySupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLogicalBinarySupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsLogicalBinaryLayerSupportedTests<armnn::ClWorkloadFactory,
       armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLogicalBinaryBroadcastSupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLogicalBinaryBroadcastSupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsLogicalBinaryLayerBroadcastSupportedTests<armnn::ClWorkloadFactory,
       armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsMeanSupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsMeanSupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsMeanLayerSupportedTests<armnn::ClWorkloadFactory,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConstantSupportedCl)
+TEST_CASE("IsConstantSupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::Float16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::Float32>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::QAsymmU8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::Boolean>(reasonIfUnsupported);
-    BOOST_CHECK(!result);
+    CHECK(!result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::QSymmS16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::QSymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::QAsymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::BFloat16>(reasonIfUnsupported);
-    BOOST_CHECK(!result);
+    CHECK(!result);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 918ef03..1c3c831 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -19,12 +19,13 @@
 #include <arm_compute/core/CL/CLKernelLibrary.h>
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <iostream>
 #include <string>
 
-BOOST_FIXTURE_TEST_SUITE(Compute_ArmComputeCl, ClContextControlFixture)
+TEST_SUITE("Compute_ArmComputeCl")
+{
 
 using namespace armnn;
 
@@ -34,1267 +35,1870 @@
 // UNIT tests
 
 // Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantLinearActivation, ConstantLinearActivationTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ConstantLinearActivation, ClContextControlFixture, ConstantLinearActivationTest)
 
 // Sigmoid Activation / Logistic
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSigmoid, SimpleSigmoidTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSigmoidUint8, SimpleSigmoidUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSigmoid, ClContextControlFixture, SimpleSigmoidTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSigmoidUint8, ClContextControlFixture, SimpleSigmoidUint8Test)
 
 // BoundedReLU Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu1, BoundedReLuUpperAndLowerBoundTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu6, BoundedReLuUpperBoundOnlyTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu1, ClContextControlFixture, BoundedReLuUpperAndLowerBoundTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu6, ClContextControlFixture, BoundedReLuUpperBoundOnlyTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu1Uint8, ClContextControlFixture, BoundedReLuUint8UpperAndLowerBoundTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu6Uint8, ClContextControlFixture, BoundedReLuUint8UpperBoundOnlyTest)
 
 // ReLU Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu, ReLuTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLuUint8, ReLuUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu, ClContextControlFixture, ReLuTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLuUint8, ClContextControlFixture, ReLuUint8Test)
 
 // SoftReLU Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(SoftReLu, SoftReLuTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SoftReLu, ClContextControlFixture, SoftReLuTest)
 
 // LeakyReLU Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(LeakyReLu, LeakyReLuTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LeakyReLu, ClContextControlFixture, LeakyReLuTest)
 
 // Abs Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs, AbsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs, ClContextControlFixture, AbsTest)
 
 // Sqrt Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Sqrt, SqrtTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SqrtNN, SqrtNNTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Sqrt, ClContextControlFixture, SqrtTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SqrtNN, ClContextControlFixture, SqrtNNTest)
 
 // Square Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Square, SquareTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Square, ClContextControlFixture, SquareTest)
 
 // Tanh Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Tanh, TanhTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Tanh, ClContextControlFixture, TanhTest)
 
 // Elu Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Elu, EluTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Elu, ClContextControlFixture, EluTest)
 
 // Batch To Space
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat321, BatchToSpaceNdNhwcTest1<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat322, BatchToSpaceNdNhwcTest2<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat323, BatchToSpaceNdNhwcTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcFloat321,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcFloat322,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcFloat323,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest3<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwTest1<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwFloat322, BatchToSpaceNdNchwTest2<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwFloat321,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwFloat322,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwFloat323,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest3<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcInt1,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcInt2,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcInt3,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest3<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwInt1,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwInt2,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwInt3,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest3<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcUint1,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcUint2,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcUint3,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwUint1,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwUint2,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwUint3,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
 
 // Fully Connected
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true, true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFullyConnected,
+                                 ClContextControlFixture,
+                                 FullyConnectedFloat32Test,
+                                 false,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFullyConnectedWithBias,
+                                 ClContextControlFixture,
+                                 FullyConnectedFloat32Test,
+                                 true,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFullyConnectedWithTranspose,
+                                 ClContextControlFixture,
+                                 FullyConnectedFloat32Test,
+                                 false,
+                                 true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(FullyConnectedUint8,
+                                 ClContextControlFixture,
+                                 FullyConnectedTest<DataType::QAsymmU8>,
+                                 false,
+                                 true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(FullyConnectedBiasedUint8,
+                                 ClContextControlFixture,
+                                 FullyConnectedTest<DataType::QAsymmU8>,
+                                 true,
+                                 true)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedLarge, FullyConnectedLargeTest, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(FullyConnectedLarge,
+                                 ClContextControlFixture,
+                                 FullyConnectedLargeTest,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(FullyConnectedLargeTransposed,
+                                 ClContextControlFixture,
+                                 FullyConnectedLargeTest,
+                                 true)
 
 // Convolution
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution1d, Convolution1dTest, true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution1d,
+                                 ClContextControlFixture,
+                                 Convolution1dTest,
+                                 true)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d, SimpleConvolution2d3x5Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dNhwc, SimpleConvolution2d3x5Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8Nhwc, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dNhwc, SimpleConvolution2d3x5Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dStride2x2Nhwc,
-                     SimpleConvolution2d3x3Stride2x2Test, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2d,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x5Test,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x5Test,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2d3x3Uint8,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Uint8Test,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2d3x3Uint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Uint8Test,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2d,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x5Test,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x5Test,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2dStride2x2Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Stride2x2Test,
+                                 false,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPadding,
-                              Convolution2dAsymmetricPaddingTest,
-                              DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2dSquare,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Test,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2dAsymmetricPadding,
+                                 ClContextControlFixture,
+                                 Convolution2dAsymmetricPaddingTest,
+                                 DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquareNhwc, SimpleConvolution2d3x3Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPaddingNhwc,
-                     Convolution2dAsymmetricPaddingTest,
-                     DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2dSquareNhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Test,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2dAsymmetricPaddingNhwc,
+                                 ClContextControlFixture,
+                                 Convolution2dAsymmetricPaddingTest,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2dSquareNhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3NhwcTest,
+                                 false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3,
-                     Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Nhwc,
-                     Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Uint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcUint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Nhwc,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Uint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcUint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d3x3Dilation3x3,
+                                 ClContextControlFixture,
+                                 Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d3x3Dilation3x3Nhwc,
+                                 ClContextControlFixture,
+                                 Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d3x3Dilation3x3Uint8,
+                                 ClContextControlFixture,
+                                 Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d3x3Dilation3x3NhwcUint8,
+                                 ClContextControlFixture,
+                                 Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x3x3Dilation3x3,
+                                 ClContextControlFixture,
+                                 Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x3x3Dilation3x3Nhwc,
+                                 ClContextControlFixture,
+                                 Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x3x3Dilation3x3Uint8,
+                                 ClContextControlFixture,
+                                 Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcUint8,
+                                 ClContextControlFixture,
+                                 Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
+        ClContextControlFixture,
+        Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
+        false,
+        DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
+        ClContextControlFixture,
+        Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
+        false,
+        DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
+        ClContextControlFixture,
+        Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+        false,
+        DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
+        ClContextControlFixture,
+        Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+        false,
+        DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2dPerAxisQuantTestNchw,
+                                 ClContextControlFixture,
+                                 Convolution2dPerAxisQuantTest,
+                                 DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2dPerAxisQuantTestNhwc,
+                                 ClContextControlFixture,
+                                 Convolution2dPerAxisQuantTest,
+                                 DataLayout::NHWC);
 
 // Depthwise Convolution
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1,
-                     DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1,
-                     DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8,
-                     DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
-                     DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul1,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Test,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Test,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Uint8Test,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Uint8Test,
+                                 false,
+                                 DataLayout::NCHW)
 
 // NHWC Depthwise Convolution
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Nhwc,
-                     DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
-                     DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
-                     DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
-                     DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
-                     SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul1Nhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Test,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Test,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Uint8Test,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Uint8Test,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest)
 
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthNhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthNhwcTest,
+                                 false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetric,
-                     DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetric,
-                     DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetricNhwc,
-                     DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
-                     DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dAsymmetric,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dAsymmetricTest,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetric,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dAsymmetricTest,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dAsymmetricNhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dAsymmetricTest,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dAsymmetricTest,
+                                 false,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul64, DepthwiseConvolution2dDepthMul64Test);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul64,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul64Test);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNchw, DepthwiseConvolution2dPerAxisQuantTest,
-                     DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNhwc, DepthwiseConvolution2dPerAxisQuantTest,
-                     DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNchw,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dPerAxisQuantTest,
+                                 DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dPerAxisQuantTest,
+                                 DataLayout::NHWC);
 
 // Splitter
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSplitterFloat32, SplitterFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSplitterUint8, SplitterUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSplitterFloat32, ClContextControlFixture, SplitterFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSplitterUint8, ClContextControlFixture, SplitterUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterFloat32, CopyViaSplitterFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CopyViaSplitterFloat32, ClContextControlFixture, CopyViaSplitterFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CopyViaSplitterUint8, ClContextControlFixture, CopyViaSplitterUint8Test)
 
 // Concat
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConcat, ConcatTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
-                     ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConcat, ClContextControlFixture, ConcatTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ConcatUint8, ClContextControlFixture, ConcatUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
+                                 ClContextControlFixture,
+                                 ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>,
+                                 false)
 
 // Normalization
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationWithin, SimpleNormalizationWithinTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AcrossChannelNormalization, AcrossChannelNormalizationTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleNormalizationAcross, ClContextControlFixture, SimpleNormalizationAcrossTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleNormalizationWithin, ClContextControlFixture, SimpleNormalizationWithinTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleNormalizationAcrossNhwc,
+                                 ClContextControlFixture,
+                                 SimpleNormalizationAcrossNhwcTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AcrossChannelNormalization,
+                                 ClContextControlFixture,
+                                 AcrossChannelNormalizationTest)
 
 // Pooling
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4Uint8,
-                              SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dSize3x3Stride2x4Test,
+                                 true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4Uint8,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dSize3x3Stride2x4Uint8Test,
+                                 true)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleMaxPooling2d,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleMaxPooling2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleMaxPooling2dUint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleMaxPooling2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingMaxPooling2dSize3,
+                                 ClContextControlFixture,
+                                 IgnorePaddingMaxPooling2dSize3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingMaxPooling2dSize3Uint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingMaxPooling2dSize3Uint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleAveragePooling2dNoPadding,
-                              IgnorePaddingSimpleAveragePooling2dNoPaddingTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8,
-                              IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleAveragePooling2d,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleAveragePooling2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleAveragePooling2dUint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleAveragePooling2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleAveragePooling2dNoPadding,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleAveragePooling2dNoPaddingTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingAveragePooling2dSize3,
+                                 ClContextControlFixture,
+                                 IgnorePaddingAveragePooling2dSize3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingAveragePooling2dSize3Uint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingAveragePooling2dSize3Uint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleL2Pooling2d,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleL2Pooling2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleL2Pooling2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingL2Pooling2dSize3,
+                                 ClContextControlFixture,
+                                 IgnorePaddingL2Pooling2dSize3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingL2Pooling2dSize3Uint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2d, SimpleMaxPooling2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dNhwc, SimpleMaxPooling2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dUint8, SimpleMaxPooling2dUint8Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dUint8Nhwc, SimpleMaxPooling2dUint8Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2d,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dTest,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dUint8,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dUint8Test,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dUint8Test,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAveragePooling2d, SimpleAveragePooling2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAveragePooling2d,
+                                 ClContextControlFixture,
+                                 SimpleAveragePooling2dTest,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAveragePooling2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleAveragePooling2dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAveragePooling2dUint8,
+                                 ClContextControlFixture,
+                                 SimpleAveragePooling2dUint8Test,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAveragePooling2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleAveragePooling2dUint8Test,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
-                              IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
-                              false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
-                              IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
-                              true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
+                                 ClContextControlFixture,
+                                 IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
+                                 ClContextControlFixture,
+                                 IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
+                                 true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LargeTensorsAveragePooling2d,
+                                 ClContextControlFixture,
+                                 LargeTensorsAveragePooling2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LargeTensorsAveragePooling2dUint8,
+                                 ClContextControlFixture,
+                                 LargeTensorsAveragePooling2dUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleL2Pooling2d, SimpleL2Pooling2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleL2Pooling2dNhwc, SimpleL2Pooling2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleL2Pooling2d,
+                                 ClContextControlFixture,
+                                 SimpleL2Pooling2dTest,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleL2Pooling2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleL2Pooling2dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_SimpleL2Pooling2dUint8,
+                                 ClContextControlFixture,
+                                 SimpleL2Pooling2dUint8Test,
+                                 DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8, L2Pooling2dSize3Stride3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize3Stride4, L2Pooling2dSize3Stride4Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8, L2Pooling2dSize3Stride4Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize7, L2Pooling2dSize7Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize9, L2Pooling2dSize9Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize9Uint8, L2Pooling2dSize9Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize3Stride1, ClContextControlFixture, L2Pooling2dSize3Stride1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize3Stride3,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize3Stride4,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride4Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride4Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize7,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize7Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize7Uint8,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize7Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize9, ClContextControlFixture, L2Pooling2dSize9Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize9Uint8, ClContextControlFixture, L2Pooling2dSize9Uint8Test)
 
 // Add
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAdd, AdditionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Add5d, Addition5dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AddBroadcast1Element, AdditionBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AddBroadcast, AdditionBroadcastTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAdd, ClContextControlFixture, AdditionTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Add5d, ClContextControlFixture, Addition5dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AddBroadcast1Element, ClContextControlFixture, AdditionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AddBroadcast, ClContextControlFixture, AdditionBroadcastTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(AdditionUint8, AdditionUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AddBroadcastUint8, AdditionBroadcastUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AdditionUint8, ClContextControlFixture, AdditionUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AddBroadcastUint8, ClContextControlFixture, AdditionBroadcastUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AddBroadcast1ElementUint8,
+                                 ClContextControlFixture,
+                                 AdditionBroadcast1ElementUint8Test)
 
 // Sub
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSub, SubtractionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubBroadcast1Element, SubtractionBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubBroadcast, SubtractionBroadcastTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSub, ClContextControlFixture, SubtractionTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubBroadcast1Element, ClContextControlFixture, SubtractionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubBroadcast, ClContextControlFixture, SubtractionBroadcastTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubtractionUint8, SubtractionUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubBroadcastUint8, SubtractionBroadcastUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubtractionUint8, ClContextControlFixture, SubtractionUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubBroadcastUint8, ClContextControlFixture, SubtractionBroadcastUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubBroadcast1ElementUint8,
+                                 ClContextControlFixture,
+                                 SubtractionBroadcast1ElementUint8Test)
 
 // Div
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleDivision, DivisionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DivisionByZero, DivisionByZeroTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DivisionBroadcast1Element, DivisionBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleDivision, ClContextControlFixture, DivisionTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DivisionByZero, ClContextControlFixture, DivisionByZeroTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DivisionBroadcast1Element, ClContextControlFixture, DivisionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DivisionBroadcast1DVector, ClContextControlFixture, DivisionBroadcast1DVectorTest)
 // NOTE: quantized division is not supported by CL and not required by the
 //       android NN api
 
 // Mul
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMultiplication, MultiplicationTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationUint8, MultiplicationUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1ElementUint8, MultiplicationBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Multiplication5d, Multiplication5dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMultiplication, ClContextControlFixture, MultiplicationTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationBroadcast1Element,
+                                 ClContextControlFixture,
+                                 MultiplicationBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationBroadcast1DVector,
+                                 ClContextControlFixture,
+                                 MultiplicationBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationUint8, ClContextControlFixture, MultiplicationUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationBroadcast1ElementUint8,
+                                 ClContextControlFixture,
+                                 MultiplicationBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationBroadcast1DVectorUint8,
+                                 ClContextControlFixture,
+                                 MultiplicationBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Multiplication5d, ClContextControlFixture, Multiplication5dTest)
 
 // Batch Norm
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchNormFloat32, BatchNormFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchNormFloat32, ClContextControlFixture, BatchNormFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchNormFloat32Nhwc, ClContextControlFixture, BatchNormFloat32NhwcTest)
 
 // Rank
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Float16,  RankDimSize1Test<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Float32,  RankDimSize1Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmU8, RankDimSize1Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Signed32, RankDimSize1Test<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS16, RankDimSize1Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmS8, RankDimSize1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1Float16, ClContextControlFixture, RankDimSize1Test<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1Float32, ClContextControlFixture, RankDimSize1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1QAsymmU8, ClContextControlFixture, RankDimSize1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1Signed32, ClContextControlFixture, RankDimSize1Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1QSymmS16, ClContextControlFixture, RankDimSize1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1QAsymmS8, ClContextControlFixture, RankDimSize1Test<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float16,  RankDimSize2Test<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float32,  RankDimSize2Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmU8, RankDimSize2Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Signed32, RankDimSize2Test<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS16, RankDimSize2Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmS8, RankDimSize2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2Float16, ClContextControlFixture, RankDimSize2Test<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2Float32, ClContextControlFixture, RankDimSize2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2QAsymmU8, ClContextControlFixture, RankDimSize2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2Signed32, ClContextControlFixture, RankDimSize2Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2QSymmS16, ClContextControlFixture, RankDimSize2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2QAsymmS8, ClContextControlFixture, RankDimSize2Test<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float16,  RankDimSize3Test<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float32,  RankDimSize3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmU8, RankDimSize3Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Signed32, RankDimSize3Test<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS16, RankDimSize3Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmS8, RankDimSize3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3Float16, ClContextControlFixture, RankDimSize3Test<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3Float32, ClContextControlFixture, RankDimSize3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3QAsymmU8, ClContextControlFixture, RankDimSize3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3Signed32, ClContextControlFixture, RankDimSize3Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3QSymmS16, ClContextControlFixture, RankDimSize3Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3QAsymmS8, ClContextControlFixture, RankDimSize3Test<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float16,  RankDimSize4Test<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float32,  RankDimSize4Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmU8, RankDimSize4Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Signed32, RankDimSize4Test<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS16, RankDimSize4Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmS8, RankDimSize4Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4Float16, ClContextControlFixture, RankDimSize4Test<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4Float32, ClContextControlFixture, RankDimSize4Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4QAsymmU8, ClContextControlFixture, RankDimSize4Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4Signed32, ClContextControlFixture, RankDimSize4Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4QSymmS16, ClContextControlFixture, RankDimSize4Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4QAsymmS8, ClContextControlFixture, RankDimSize4Test<DataType::QAsymmS8>)
 
 // InstanceNormalization
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nchw, InstanceNormFloat16Test, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat32Nchw,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat32Test,
+                                 DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat16Nchw,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat16Test,
+                                 DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc, InstanceNormFloat32Test, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc, InstanceNormFloat16Test, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat32Nhwc,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat32Test,
+                                 DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat16Nhwc,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat16Test,
+                                 DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nchw2, InstanceNormFloat16Test2, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat32Nchw2,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat32Test2,
+                                 DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat16Nchw2,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat16Test2,
+                                 DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc2, InstanceNormFloat16Test2, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat32Nhwc2,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat32Test2,
+                                 DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat16Nhwc2,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat16Test2,
+                                 DataLayout::NHWC);
 
 // L2 Normalization
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2d, L2Normalization2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3d, L2Normalization3dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4d, L2Normalization4dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization1d, ClContextControlFixture, L2Normalization1dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization2d, ClContextControlFixture, L2Normalization2dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization3d, ClContextControlFixture, L2Normalization3dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization4d, ClContextControlFixture, L2Normalization4dTest, DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1dNhwc, L2Normalization1dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dNhwc, L2Normalization2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3dNhwc, L2Normalization3dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4dNhwc, L2Normalization4dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization1dNhwc,
+                                 ClContextControlFixture,
+                                 L2Normalization1dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization2dNhwc,
+                                 ClContextControlFixture,
+                                 L2Normalization2dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization3dNhwc,
+                                 ClContextControlFixture,
+                                 L2Normalization3dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization4dNhwc,
+                                 ClContextControlFixture,
+                                 L2Normalization4dTest,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dShape, L2Normalization2dShapeTest);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization2dShape, ClContextControlFixture, L2Normalization2dShapeTest);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2NormalizationDefaultEpsilon,
+                                 ClContextControlFixture,
+                                 L2NormalizationDefaultEpsilonTest,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2NormalizationNonDefaultEpsilon,
+                                 ClContextControlFixture,
+                                 L2NormalizationNonDefaultEpsilonTest,
+                                 DataLayout::NCHW)
 
 // Constant
-ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantUint8, ConstantUint8SimpleQuantizationScaleNoOffsetTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Constant, ClContextControlFixture, ConstantTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ConstantUint8,
+                                 ClContextControlFixture,
+                                 ConstantUint8SimpleQuantizationScaleNoOffsetTest)
 
 // Concat
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1d, Concat1dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1dUint8, Concat1dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat1d, ClContextControlFixture, Concat1dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat1dUint8, ClContextControlFixture, Concat1dUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0, Concat2dDim0Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0Uint8, Concat2dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1, Concat2dDim1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1Uint8, Concat2dDim1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim0, ClContextControlFixture, Concat2dDim0Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim0Uint8, ClContextControlFixture, Concat2dDim0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim1, ClContextControlFixture, Concat2dDim1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim1Uint8, ClContextControlFixture, Concat2dDim1Uint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDims, Concat2dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDimsUint8, Concat2dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDims, Concat2dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDimsUint8, Concat2dDim1DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim0DiffInputDims,
+                                 ClContextControlFixture,
+                                 Concat2dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim0DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat2dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim1DiffInputDims,
+                                 ClContextControlFixture,
+                                 Concat2dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim1DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat2dDim1DiffInputDimsUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0, Concat3dDim0Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0Uint8, Concat3dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1, Concat3dDim1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1Uint8, Concat3dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2, Concat3dDim2Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2Uint8, Concat3dDim2Uint8Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim0, ClContextControlFixture, Concat3dDim0Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim0Uint8, ClContextControlFixture, Concat3dDim0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim1, ClContextControlFixture, Concat3dDim1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim1Uint8, ClContextControlFixture, Concat3dDim1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim2, ClContextControlFixture, Concat3dDim2Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim2Uint8, ClContextControlFixture, Concat3dDim2Uint8Test, false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDims, Concat3dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDimsUint8, Concat3dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDims, Concat3dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDimsUint8, Concat3dDim1DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDims, Concat3dDim2DiffInputDimsTest, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDimsUint8, Concat3dDim2DiffInputDimsUint8Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim0DiffInputDims, ClContextControlFixture, Concat3dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim0DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat3dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim1DiffInputDims,
+                                 ClContextControlFixture,
+                                 Concat3dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim1DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat3dDim1DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim2DiffInputDims,
+                                 ClContextControlFixture,
+                                 Concat3dDim2DiffInputDimsTest,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim2DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat3dDim2DiffInputDimsUint8Test,
+                                 false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0, Concat4dDim0Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1, Concat4dDim1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3, Concat4dDim3Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0Uint8, Concat4dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1Uint8, Concat4dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3Uint8, Concat4dDim3Uint8Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim0, ClContextControlFixture, Concat4dDim0Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim1, ClContextControlFixture, Concat4dDim1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim3, ClContextControlFixture, Concat4dDim3Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim0Uint8, ClContextControlFixture, Concat4dDim0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim1Uint8, ClContextControlFixture, Concat4dDim1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim3Uint8, ClContextControlFixture, Concat4dDim3Uint8Test, false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0, Concat4dDiffShapeDim0Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1, Concat4dDiffShapeDim1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3, Concat4dDiffShapeDim3Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0Uint8, Concat4dDiffShapeDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1Uint8, Concat4dDiffShapeDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim0, ClContextControlFixture, Concat4dDiffShapeDim0Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim1, ClContextControlFixture, Concat4dDiffShapeDim1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim3, ClContextControlFixture, Concat4dDiffShapeDim3Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim0Uint8, ClContextControlFixture, Concat4dDiffShapeDim0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim1Uint8, ClContextControlFixture, Concat4dDiffShapeDim1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim3Uint8,
+                                 ClContextControlFixture,
+                                 Concat4dDiffShapeDim3Uint8Test,
+                                 false)
 
 // DepthToSpace
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_3, DepthToSpaceTest3<DataType::Float32>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_4, DepthToSpaceTest4<DataType::Float32>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat32_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat32_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::Float32>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat32_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::Float32>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat32_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::Float32>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_1, DepthToSpaceTest1<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat16_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat16_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat16_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat16_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt8_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt8_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt8_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt8_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwUint8_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwUint8_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwUint8_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwUint8_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt16_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt16_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt16_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt16_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_3, DepthToSpaceTest3<DataType::Float32>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_4, DepthToSpaceTest4<DataType::Float32>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat32_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat32_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat32_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::Float32>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat32_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::Float32>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_1, DepthToSpaceTest1<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat16_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat16_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat16_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat16_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt8_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt8_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt8_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt8_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcUint8_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcUint8_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcUint8_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcUint8_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt16_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt16_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt16_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt16_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
 
 // Fill
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFill, SimpleFillTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFillF16, SimpleFillTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFillS32, SimpleFillTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFill, ClContextControlFixture, SimpleFillTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFillF16, ClContextControlFixture, SimpleFillTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFillS32, ClContextControlFixture, SimpleFillTest<DataType::Signed32>)
 
 // FloorPreluUint8
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFloor, SimpleFloorTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFloor, ClContextControlFixture, SimpleFloorTest<DataType::Float32>)
 
 // Gather
-ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsUint8, Gather1dParamsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Gather1dParamsFloat32, ClContextControlFixture, Gather1dParamsFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Gather1dParamsUint8, ClContextControlFixture, Gather1dParamsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GatherMultiDimParamsFloat32, ClContextControlFixture, GatherMultiDimParamsFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GatherMultiDimParamsUint8, ClContextControlFixture, GatherMultiDimParamsUint8Test)
 
 // Reshape
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleReshapeInt8, SimpleReshapeTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleReshapeUint8, SimpleReshapeTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Reshape5d, Reshape5dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReshapeBoolean, ReshapeBooleanTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleReshapeFloat32, ClContextControlFixture, SimpleReshapeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleReshapeInt8, ClContextControlFixture, SimpleReshapeTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleReshapeUint8, ClContextControlFixture, SimpleReshapeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Reshape5d, ClContextControlFixture, Reshape5dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReshapeBoolean, ClContextControlFixture, ReshapeBooleanTest)
 
 // Pad
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322d, PadFloat322dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat323d, PadFloat323dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat324d, PadFloat324dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadFloat322d, ClContextControlFixture, PadFloat322dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadFloat322dCustomPadding, ClContextControlFixture, PadFloat322dCustomPaddingTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadFloat323d, ClContextControlFixture, PadFloat323dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadFloat324d, ClContextControlFixture, PadFloat324dTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint82d, PadUint82dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint83d, PadUint83dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint84d, PadUint84dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadUint82d, ClContextControlFixture, PadUint82dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadUint82dCustomPadding, ClContextControlFixture, PadUint82dCustomPaddingTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadUint83d, ClContextControlFixture, PadUint83dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadUint84d, ClContextControlFixture, PadUint84dTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Pad2dQSymm16,
+    ClContextControlFixture, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Pad2dQSymm16CustomPadding,
+    ClContextControlFixture, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Pad3dQSymm16, ClContextControlFixture, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Pad4dQSymm16, ClContextControlFixture, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
 
 // PReLU
-ARMNN_AUTO_TEST_CASE_WITH_THF(PreluFloat32, PreluTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PreluUint8,   PreluTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PreluFloat32, ClContextControlFixture, PreluTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PreluUint8, ClContextControlFixture,  PreluTest<DataType::QAsymmU8>)
 
 // Permute
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimplePermuteQASymmS8, SimplePermuteTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimplePermuteFloat32, ClContextControlFixture, SimplePermuteTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteFloat32ValueSet1Test, ClContextControlFixture, PermuteValueSet1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteFloat32ValueSet2Test, ClContextControlFixture, PermuteValueSet2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteFloat32ValueSet3Test, ClContextControlFixture, PermuteValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimplePermuteQASymmS8, ClContextControlFixture, SimplePermuteTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymmS8ValueSet1Test, ClContextControlFixture, PermuteValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymmS8ValueSet2Test, ClContextControlFixture, PermuteValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymmS8ValueSet3Test, ClContextControlFixture, PermuteValueSet3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimplePermuteQASymm8, ClContextControlFixture, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymm8ValueSet1Test, ClContextControlFixture, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymm8ValueSet2Test, ClContextControlFixture, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymm8ValueSet3Test, ClContextControlFixture, PermuteValueSet3Test<DataType::QAsymmU8>)
 
 // Lstm
-ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection, ClContextControlFixture,
                               LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgNoPeepholeNoProjection,
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LstmLayerFloat32NoCifgNoPeepholeNoProjection, ClContextControlFixture,
                               LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjection,
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjection, ClContextControlFixture,
                               LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
-                              LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
+                                 ClContextControlFixture,
+                                 LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
 
 // QLstm
-ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm, QLstmTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm1, QLstmTest1)
-ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm2, QLstmTest2)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QLstm, ClContextControlFixture, QLstmTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QLstm1, ClContextControlFixture, QLstmTest1)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QLstm2, ClContextControlFixture, QLstmTest2)
 
 // QuantizedLstm
-ARMNN_AUTO_TEST_CASE_WITH_THF(QuantizedLstm, QuantizedLstmTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QuantizedLstm, ClContextControlFixture, QuantizedLstmTest)
 
 // Convert from Float16 to Float32
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvertFp16ToFp32, ClContextControlFixture, SimpleConvertFp16ToFp32Test)
 // Convert from Float32 to Float16
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvertFp32ToFp16, ClContextControlFixture, SimpleConvertFp32ToFp16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(AdditionAfterMaxPool, AdditionAfterMaxPoolTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AdditionAfterMaxPool, ClContextControlFixture, AdditionAfterMaxPoolTest)
 
 //Max
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumSimple, MaximumSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumBroadcast1Element, MaximumBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumBroadcast1DVector, MaximumBroadcast1DVectorTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumUint8, MaximumUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumBroadcast1ElementUint8, MaximumBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumBroadcast1DVectorUint8, MaximumBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MaximumSimple, ClContextControlFixture, MaximumSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MaximumBroadcast1Element, ClContextControlFixture, MaximumBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MaximumBroadcast1DVector, ClContextControlFixture, MaximumBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MaximumUint8, ClContextControlFixture, MaximumUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MaximumBroadcast1ElementUint8, ClContextControlFixture, MaximumBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MaximumBroadcast1DVectorUint8, ClContextControlFixture, MaximumBroadcast1DVectorUint8Test)
 
 // Mean
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleFloat32, MeanSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleAxisFloat32, MeanSimpleAxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanKeepDimsFloat32, MeanKeepDimsTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanMultipleDimsFloat32, MeanMultipleDimsTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanSimpleFloat32, ClContextControlFixture, MeanSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanSimpleAxisFloat32, ClContextControlFixture, MeanSimpleAxisTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanKeepDimsFloat32, ClContextControlFixture, MeanKeepDimsTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanMultipleDimsFloat32, ClContextControlFixture, MeanMultipleDimsTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts1Float32, ClContextControlFixture, MeanVts1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts2Float32, ClContextControlFixture, MeanVts2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts3Float32, ClContextControlFixture, MeanVts3Test<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleQuantisedAsymmS8, MeanSimpleTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts1QuantisedAsymmS8, MeanVts1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts2QuantisedAsymmS8, MeanVts2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts3QuantisedAsymmS8, MeanVts3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanSimpleQuantisedAsymmS8, ClContextControlFixture, MeanSimpleTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanSimpleAxisQuantisedAsymmS8, ClContextControlFixture, MeanSimpleAxisTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanKeepDimsQuantisedAsymmS8, ClContextControlFixture, MeanKeepDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanMultipleDimsQuantisedAsymmS8, ClContextControlFixture, MeanMultipleDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts1QuantisedAsymmS8, ClContextControlFixture, MeanVts1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts2QuantisedAsymmS8, ClContextControlFixture, MeanVts2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts3QuantisedAsymmS8, ClContextControlFixture, MeanVts3Test<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanSimpleQuantisedAsymm8, ClContextControlFixture, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanSimpleAxisQuantisedAsymm8, ClContextControlFixture, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanKeepDimsQuantisedAsymm8, ClContextControlFixture, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanMultipleDimsQuantisedAsymm8, ClContextControlFixture, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts1QuantisedAsymm8, ClContextControlFixture, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts2QuantisedAsymm8, ClContextControlFixture, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts3QuantisedAsymm8, ClContextControlFixture, MeanVts3Test<DataType::QAsymmU8>)
 
 // Minimum
-ARMNN_AUTO_TEST_CASE_WITH_THF(MinimumBroadcast1Element1, MinimumBroadcast1ElementTest1)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MinimumBroadcast1Element2, MinimumBroadcast1ElementTest2)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MinimumBroadcast1DVectorUint8, MinimumBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MinimumBroadcast1Element1, ClContextControlFixture, MinimumBroadcast1ElementTest1)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MinimumBroadcast1Element2, ClContextControlFixture, MinimumBroadcast1ElementTest2)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MinimumBroadcast1DVectorUint8, ClContextControlFixture, MinimumBroadcast1DVectorUint8Test)
 
 // Equal
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimple,            EqualSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1Element, EqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVector, EqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualSimple, ClContextControlFixture, EqualSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualBroadcast1Element, ClContextControlFixture, EqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualBroadcast1dVector, ClContextControlFixture, EqualBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimpleFloat16,            EqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1ElementFloat16, EqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVectorFloat16, EqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualSimpleFloat16, ClContextControlFixture, EqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    EqualBroadcast1ElementFloat16, ClContextControlFixture, EqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    EqualBroadcast1dVectorFloat16, ClContextControlFixture, EqualBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimpleUint8,            EqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVectorUint8, EqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualSimpleUint8,  ClContextControlFixture, EqualSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualBroadcast1ElementUint8, ClContextControlFixture, EqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualBroadcast1dVectorUint8, ClContextControlFixture, EqualBroadcast1dVectorUint8Test)
 
 // Greater
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimple,            GreaterSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVector, GreaterBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterSimple, ClContextControlFixture, GreaterSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterBroadcast1Element, ClContextControlFixture, GreaterBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterBroadcast1dVector, ClContextControlFixture, GreaterBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimpleFloat16,            GreaterSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1ElementFloat16, GreaterBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVectorFloat16, GreaterBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterSimpleFloat16, ClContextControlFixture, GreaterSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterBroadcast1ElementFloat16, ClContextControlFixture, GreaterBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterBroadcast1dVectorFloat16, ClContextControlFixture, GreaterBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimpleUint8,            GreaterSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterSimpleUint8, ClContextControlFixture, GreaterSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterBroadcast1ElementUint8, ClContextControlFixture, GreaterBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterBroadcast1dVectorUint8, ClContextControlFixture, GreaterBroadcast1dVectorUint8Test)
 
 // GreaterOrEqual
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimple,            GreaterOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1Element, GreaterOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVector, GreaterOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterOrEqualSimple, ClContextControlFixture, GreaterOrEqualSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1Element, ClContextControlFixture, GreaterOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1dVector, ClContextControlFixture, GreaterOrEqualBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimpleFloat16,            GreaterOrEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1ElementFloat16, GreaterOrEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVectorFloat16, GreaterOrEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualSimpleFloat16, ClContextControlFixture, GreaterOrEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1ElementFloat16, ClContextControlFixture, GreaterOrEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1dVectorFloat16, ClContextControlFixture, GreaterOrEqualBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimpleUint8,            GreaterOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1ElementUint8, GreaterOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVectorUint8, GreaterOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterOrEqualSimpleUint8, ClContextControlFixture, GreaterOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1ElementUint8, ClContextControlFixture, GreaterOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1dVectorUint8, ClContextControlFixture, GreaterOrEqualBroadcast1dVectorUint8Test)
 
 // Less
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimple,            LessSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1Element, LessBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVector, LessBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessSimple, ClContextControlFixture, LessSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessBroadcast1Element, ClContextControlFixture, LessBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessBroadcast1dVector, ClContextControlFixture, LessBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimpleFloat16,            LessSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1ElementFloat16, LessBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVectorFloat16, LessBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessSimpleFloat16, ClContextControlFixture, LessSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessBroadcast1ElementFloat16, ClContextControlFixture, LessBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessBroadcast1dVectorFloat16, ClContextControlFixture, LessBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimpleUint8,            LessSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1ElementUint8, LessBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVectorUint8, LessBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessSimpleUint8, ClContextControlFixture, LessSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessBroadcast1ElementUint8, ClContextControlFixture, LessBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessBroadcast1dVectorUint8, ClContextControlFixture, LessBroadcast1dVectorUint8Test)
 
 // LessOrEqual
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimple,            LessOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1Element, LessOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVector, LessOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessOrEqualSimple, ClContextControlFixture, LessOrEqualSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1Element, ClContextControlFixture, LessOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1dVector, ClContextControlFixture, LessOrEqualBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimpleFloat16,            LessOrEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1ElementFloat16, LessOrEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVectorFloat16, LessOrEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessOrEqualSimpleFloat16, ClContextControlFixture, LessOrEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1ElementFloat16, ClContextControlFixture, LessOrEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1dVectorFloat16, ClContextControlFixture, LessOrEqualBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimpleUint8,            LessOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1ElementUint8, LessOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVectorUint8, LessOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessOrEqualSimpleUint8, ClContextControlFixture, LessOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1ElementUint8, ClContextControlFixture, LessOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1dVectorUint8, ClContextControlFixture, LessOrEqualBroadcast1dVectorUint8Test)
 
 // NotEqual
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimple,            NotEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1Element, NotEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVector, NotEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualSimple, ClContextControlFixture, NotEqualSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualBroadcast1Element, ClContextControlFixture, NotEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualBroadcast1dVector, ClContextControlFixture, NotEqualBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimpleFloat16,            NotEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1ElementFloat16, NotEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVectorFloat16, NotEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualSimpleFloat16, ClContextControlFixture, NotEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    NotEqualBroadcast1ElementFloat16, ClContextControlFixture, NotEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    NotEqualBroadcast1dVectorFloat16, ClContextControlFixture, NotEqualBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimpleUint8,            NotEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1ElementUint8, NotEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualSimpleUint8, ClContextControlFixture, NotEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    NotEqualBroadcast1ElementUint8, ClContextControlFixture, NotEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    NotEqualBroadcast1dVectorUint8, ClContextControlFixture, NotEqualBroadcast1dVectorUint8Test)
 
 // Softmax
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSoftmaxBeta1, ClContextControlFixture, SimpleSoftmaxTest, 1.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSoftmaxBeta2, ClContextControlFixture, SimpleSoftmaxTest, 2.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSoftmaxBeta1Uint8, ClContextControlFixture, SimpleSoftmaxUint8Test, 1.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSoftmaxBeta2Uint8, ClContextControlFixture, SimpleSoftmaxUint8Test, 2.0f)
 
 // LogSoftmax
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogSoftmaxFloat32_1, ClContextControlFixture, LogSoftmaxTest1<DataType::Float32>)
 
 // Space To Batch Nd
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleFloat32, SpaceToBatchNdSimpleFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsFloat32, SpaceToBatchNdMultiChannelsFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockFloat32, SpaceToBatchNdMultiBlockFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingFloat32, SpaceToBatchNdPaddingFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToBatchNdSimpleFloat32, ClContextControlFixture, SpaceToBatchNdSimpleFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiChannelsFloat32, ClContextControlFixture, SpaceToBatchNdMultiChannelsFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiBlockFloat32, ClContextControlFixture, SpaceToBatchNdMultiBlockFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdPaddingFloat32, ClContextControlFixture, SpaceToBatchNdPaddingFloat32Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleUint8, SpaceToBatchNdSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsUint8, SpaceToBatchNdMultiChannelsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockUint8, SpaceToBatchNdMultiBlockUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingUint8, SpaceToBatchNdPaddingUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToBatchNdSimpleUint8, ClContextControlFixture, SpaceToBatchNdSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiChannelsUint8, ClContextControlFixture, SpaceToBatchNdMultiChannelsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiBlockUint8, ClContextControlFixture, SpaceToBatchNdMultiBlockUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdPaddingUint8, ClContextControlFixture, SpaceToBatchNdPaddingUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwcFloat32, SpaceToBatchNdSimpleNhwcFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsNhwcFloat32, SpaceToBatchNdMultiChannelsNhwcFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockNhwcFloat32, SpaceToBatchNdMultiBlockNhwcFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingNhwcFloat32, SpaceToBatchNdPaddingNhwcFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdSimpleNhwcFloat32, ClContextControlFixture, SpaceToBatchNdSimpleNhwcFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiChannelsNhwcFloat32, ClContextControlFixture, SpaceToBatchNdMultiChannelsNhwcFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiBlockNhwcFloat32, ClContextControlFixture, SpaceToBatchNdMultiBlockNhwcFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdPaddingNhwcFloat32, ClContextControlFixture, SpaceToBatchNdPaddingNhwcFloat32Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwcUint8, SpaceToBatchNdSimpleNhwcUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsNhwcUint8, SpaceToBatchNdMultiChannelsNhwcUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockNhwcUint8, SpaceToBatchNdMultiBlockNhwcUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingNhwcUint8, SpaceToBatchNdPaddingNhwcUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdSimpleNhwcUint8, ClContextControlFixture, SpaceToBatchNdSimpleNhwcUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiChannelsNhwcUint8, ClContextControlFixture, SpaceToBatchNdMultiChannelsNhwcUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiBlockNhwcUint8, ClContextControlFixture, SpaceToBatchNdMultiBlockNhwcUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdPaddingNhwcUint8, ClContextControlFixture, SpaceToBatchNdPaddingNhwcUint8Test)
 
 // Space To Depth
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNhwcAsymmQ8, SpaceToDepthNhwcAsymmQ8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchwAsymmQ8, SpaceToDepthNchwAsymmQ8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNhwcAsymmQ8, ClContextControlFixture, SpaceToDepthNhwcAsymmQ8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNchwAsymmQ8, ClContextControlFixture, SpaceToDepthNchwAsymmQ8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNhwx1Float32, SpaceToDepthNhwcFloat32Test1)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchw1Float32, SpaceToDepthNchwFloat32Test1)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNhwx1Float32, ClContextControlFixture, SpaceToDepthNhwcFloat32Test1)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNchw1Float32, ClContextControlFixture, SpaceToDepthNchwFloat32Test1)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNhwc2Float32, SpaceToDepthNhwcFloat32Test2)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchw2Float32, SpaceToDepthNchwFloat32Test2)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNhwc2Float32, ClContextControlFixture, SpaceToDepthNhwcFloat32Test2)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNchw2Float32, ClContextControlFixture, SpaceToDepthNchwFloat32Test2)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNhwcQSymm16, SpaceToDepthNhwcQSymm16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchwQSymm16, SpaceToDepthNchwQSymm16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNhwcQSymm16, ClContextControlFixture, SpaceToDepthNhwcQSymm16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNchwQSymm16, ClContextControlFixture, SpaceToDepthNchwQSymm16Test)
 
 // Stack
-ARMNN_AUTO_TEST_CASE_WITH_THF(Stack0Axis,           StackAxis0Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput4DAxis1,   StackOutput4DAxis1Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput4DAxis2,   StackOutput4DAxis2Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput4DAxis3,   StackOutput4DAxis3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput3DInputs3, StackOutput3DInputs3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput5D,        StackOutput5DFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackFloat16,         StackFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Stack0Axis, ClContextControlFixture, StackAxis0Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput4DAxis1, ClContextControlFixture, StackOutput4DAxis1Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput4DAxis2, ClContextControlFixture, StackOutput4DAxis2Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput4DAxis3, ClContextControlFixture, StackOutput4DAxis3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput3DInputs3, ClContextControlFixture, StackOutput3DInputs3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput5D, ClContextControlFixture, StackOutput5DFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackFloat16, ClContextControlFixture, StackFloat16Test)
 
 // Slice
-ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test)
-ARMNN_AUTO_TEST_CASE(Slice3dFloat32, Slice3dFloat32Test)
-ARMNN_AUTO_TEST_CASE(Slice2dFloat32, Slice2dFloat32Test)
-ARMNN_AUTO_TEST_CASE(Slice1dFloat32, Slice1dFloat32Test)
-ARMNN_AUTO_TEST_CASE(Slice4dUint8, Slice4dUint8Test)
-ARMNN_AUTO_TEST_CASE(Slice3dUint8, Slice3dUint8Test)
-ARMNN_AUTO_TEST_CASE(Slice2dUint8, Slice2dUint8Test)
-ARMNN_AUTO_TEST_CASE(Slice1dUint8, Slice1dUint8Test)
-ARMNN_AUTO_TEST_CASE(Slice4dInt16, Slice4dInt16Test)
-ARMNN_AUTO_TEST_CASE(Slice3dInt16, Slice3dInt16Test)
-ARMNN_AUTO_TEST_CASE(Slice2dInt16, Slice2dInt16Test)
-ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice4dFloat32, ClContextControlFixture, Slice4dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice3dFloat32, ClContextControlFixture, Slice3dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice2dFloat32, ClContextControlFixture, Slice2dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice1dFloat32, ClContextControlFixture, Slice1dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice4dUint8, ClContextControlFixture, Slice4dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice3dUint8, ClContextControlFixture, Slice3dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice2dUint8, ClContextControlFixture, Slice2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice1dUint8, ClContextControlFixture, Slice1dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice4dInt16, ClContextControlFixture, Slice4dInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice3dInt16, ClContextControlFixture, Slice3dInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice2dInt16, ClContextControlFixture, Slice2dInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice1dInt16, ClContextControlFixture, Slice1dInt16Test)
 
 // Strided Slice
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dFloat32, StridedSlice4dFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dReverseFloat32, StridedSlice4dReverseFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceSimpleStrideFloat32, StridedSliceSimpleStrideFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceSimpleRangeMaskFloat32, StridedSliceSimpleRangeMaskFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceShrinkAxisMaskFloat32, StridedSliceShrinkAxisMaskFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceShrinkAxisMaskCTSFloat32, StridedSliceShrinkAxisMaskCTSFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Dim3Float32,
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice4dFloat32, ClContextControlFixture, StridedSlice4dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice4dReverseFloat32, ClContextControlFixture, StridedSlice4dReverseFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceSimpleStrideFloat32, ClContextControlFixture, StridedSliceSimpleStrideFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceSimpleRangeMaskFloat32, ClContextControlFixture, StridedSliceSimpleRangeMaskFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceShrinkAxisMaskFloat32, ClContextControlFixture, StridedSliceShrinkAxisMaskFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceShrinkAxisMaskCTSFloat32, ClContextControlFixture, StridedSliceShrinkAxisMaskCTSFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Dim3Float32, ClContextControlFixture,
                      StridedSliceShrinkAxisMaskBitPosition0Dim3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0Float32, StridedSliceShrinkAxisMaskBitPosition0Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition1Float32, StridedSliceShrinkAxisMaskBitPosition1Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition2Float32, StridedSliceShrinkAxisMaskBitPosition2Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition3Float32, StridedSliceShrinkAxisMaskBitPosition3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And1Float32, StridedSliceShrinkAxisMaskBitPosition0And1Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And2Float32, StridedSliceShrinkAxisMaskBitPosition0And2Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And3Float32, StridedSliceShrinkAxisMaskBitPosition0And3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And1And3Float32, StridedSliceShrinkAxisMaskBitPosition0And1And3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dFloat32, StridedSlice3dFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dReverseFloat32, StridedSlice3dReverseFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dFloat32, StridedSlice2dFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dReverseFloat32, StridedSlice2dReverseFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition1Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition1Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition2Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition2Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition3Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And1Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And1Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And2Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And2Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And3Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And1And3Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And1And3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice3dFloat32,
+                                 ClContextControlFixture,
+                                 StridedSlice3dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice3dReverseFloat32, ClContextControlFixture, StridedSlice3dReverseFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice2dFloat32, ClContextControlFixture, StridedSlice2dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice2dReverseFloat32, ClContextControlFixture, StridedSlice2dReverseFloat32Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dUint8, StridedSlice4dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dReverseUint8, StridedSlice4dReverseUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceSimpleStrideUint8, StridedSliceSimpleStrideUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceSimpleRangeMaskUint8, StridedSliceSimpleRangeMaskUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceShrinkAxisMaskUint8, StridedSliceShrinkAxisMaskUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8, StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0Uint8, StridedSliceShrinkAxisMaskBitPosition0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition1Uint8, StridedSliceShrinkAxisMaskBitPosition1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition2Uint8, StridedSliceShrinkAxisMaskBitPosition2Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition3Uint8, StridedSliceShrinkAxisMaskBitPosition3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And1Uint8, StridedSliceShrinkAxisMaskBitPosition0And1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And2Uint8, StridedSliceShrinkAxisMaskBitPosition0And2Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And3Uint8, StridedSliceShrinkAxisMaskBitPosition0And3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8, StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dUint8, StridedSlice3dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dReverseUint8, StridedSlice3dReverseUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dUint8, StridedSlice2dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dReverseUint8, StridedSlice2dReverseUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice4dUint8, ClContextControlFixture, StridedSlice4dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice4dReverseUint8, ClContextControlFixture, StridedSlice4dReverseUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceSimpleStrideUint8, ClContextControlFixture, StridedSliceSimpleStrideUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceSimpleRangeMaskUint8, ClContextControlFixture, StridedSliceSimpleRangeMaskUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceShrinkAxisMaskUint8, ClContextControlFixture, StridedSliceShrinkAxisMaskUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition1Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition2Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition2Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition3Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And1Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And2Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And2Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And3Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice3dUint8, ClContextControlFixture, StridedSlice3dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice3dReverseUint8, ClContextControlFixture, StridedSlice3dReverseUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice2dUint8, ClContextControlFixture, StridedSlice2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice2dReverseUint8, ClContextControlFixture, StridedSlice2dReverseUint8Test)
 
 // Resize Bilinear - NCHW
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinear,
-                              SimpleResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearInt8,
-                              SimpleResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearUint8,
-                              SimpleResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNop,
-                              ResizeBilinearNopTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopInt8,
-                              ResizeBilinearNopTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopUint8,
-                              ResizeBilinearNopTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMin,
-                              ResizeBilinearSqMinTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinInt8,
-                              ResizeBilinearSqMinTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinUint8,
-                              ResizeBilinearSqMinTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMin,
-                              ResizeBilinearMinTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinInt8,
-                              ResizeBilinearMinTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinUint8,
-                              ResizeBilinearMinTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinear,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearInt8,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearUint8,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNop,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopInt8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopUint8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMin,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinInt8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinUint8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMin,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinInt8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinUint8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinear,
-                              HalfPixelCentersResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinear,
-                              AlignCornersResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearInt8,
-                              HalfPixelCentersResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearInt8,
-                              AlignCornersResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearUint8,
-                              HalfPixelCentersResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearUint8,
-                              AlignCornersResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinear,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinear,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearInt8,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearInt8,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearUint8,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearUint8,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
 
 // Resize Bilinear - NHWC
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopNhwc,
-                              ResizeBilinearNopTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopInt8Nhwc,
-                              ResizeBilinearNopTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopUint8Nhwc,
-                              ResizeBilinearNopTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearNhwc,
-                              SimpleResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearInt8Nhwc,
-                              SimpleResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearUint8Nhwc,
-                              SimpleResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinNhwc,
-                              ResizeBilinearSqMinTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinInt8Nhwc,
-                              ResizeBilinearSqMinTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinUint8Nhwc,
-                              ResizeBilinearSqMinTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinNhwc,
-                              ResizeBilinearMinTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinInt8Nhwc,
-                              ResizeBilinearMinTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinUint8Nhwc,
-                              ResizeBilinearMinTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopNhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearNhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearInt8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinNhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinNhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearNhwc,
-                              HalfPixelCentersResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearNhwc,
-                              AlignCornersResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearInt8Nhwc,
-                              HalfPixelCentersResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearInt8Nhwc,
-                              AlignCornersResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearUint8Nhwc,
-                              HalfPixelCentersResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearUint8Nhwc,
-                              AlignCornersResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearNhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearNhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearInt8Nhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearInt8Nhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearUint8Nhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearUint8Nhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
 
 // Resize NearestNeighbor - NCHW
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighbor,
-                              SimpleResizeNearestNeighborTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborInt8,
-                              SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborUint8,
-                              SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNop,
-                              ResizeNearestNeighborNopTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopInt8,
-                              ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopUint8,
-                              ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMin,
-                              ResizeNearestNeighborSqMinTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinInt8,
-                              ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinUint8,
-                              ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMin,
-                              ResizeNearestNeighborMinTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinInt8,
-                              ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinUint8,
-                              ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMag,
-                              ResizeNearestNeighborMagTest<DataType::Float32>,
-                              DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagInt8,
-                              ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagUint8,
-                              ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighbor,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborInt8,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborUint8,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNop,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopInt8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopUint8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMin,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinInt8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinUint8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMin,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinInt8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinUint8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMag,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::Float32>,
+                                 DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagInt8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagUint8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbour,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbour,
-                              AlignCornersResizeNearestNeighbourTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourInt8,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourInt8,
-                              AlignCornersResizeNearestNeighbourTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourUint8,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourUint8,
-                              AlignCornersResizeNearestNeighbourTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbour,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbour,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourInt8,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourInt8,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourUint8,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourUint8,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
 
 // Resize NearestNeighbor - NHWC
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopNhwc,
-                              ResizeNearestNeighborNopTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopInt8Nhwc,
-                              ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopUint8Nhwc,
-                              ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborNhwc,
-                              SimpleResizeNearestNeighborTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborInt8Nhwc,
-                              SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborUint8Nhwc,
-                              SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinNhwc,
-                              ResizeNearestNeighborSqMinTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinInt8Nhwc,
-                              ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinUint8Nhwc,
-                              ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinNhwc,
-                              ResizeNearestNeighborMinTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinInt8Nhwc,
-                              ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinUint8Nhwc,
-                              ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagNhwc,
-                              ResizeNearestNeighborMagTest<DataType::Float32>,
-                              DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagInt8Nhwc,
-                              ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagUint8Nhwc,
-                              ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopNhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborNhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborInt8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinNhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinNhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagNhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::Float32>,
+                                 DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourNhwc,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourNhwc,
-                              AlignCornersResizeNearestNeighbourTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourInt8Nhwc,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourInt8Nhwc,
-                              AlignCornersResizeNearestNeighbourTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourUint8Nhwc,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourUint8Nhwc,
-                              AlignCornersResizeNearestNeighbourTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourNhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourNhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourInt8Nhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourInt8Nhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourUint8Nhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourUint8Nhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
 
 // Rsqrt
-ARMNN_AUTO_TEST_CASE_WITH_THF(Rsqrt2d, Rsqrt2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Rsqrt3d, Rsqrt3dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RsqrtZero, RsqrtZeroTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RsqrtNegative, RsqrtNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Rsqrt2d, ClContextControlFixture, Rsqrt2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Rsqrt3d, ClContextControlFixture, Rsqrt3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RsqrtZero, ClContextControlFixture, RsqrtZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RsqrtNegative, ClContextControlFixture, RsqrtNegativeTest<DataType::Float32>)
 
 // Quantize
-ARMNN_AUTO_TEST_CASE_WITH_THF(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(QuantizeClampUint8, QuantizeClampUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QuantizeSimpleUint8, ClContextControlFixture, QuantizeSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QuantizeClampUint8, ClContextControlFixture, QuantizeClampUint8Test)
 
 // Dequantize
-ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
-ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test)
-ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test)
-ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeSimpleUint8, ClContextControlFixture, DequantizeSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeOffsetUint8, ClContextControlFixture, DequantizeOffsetUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeSimpleInt16, ClContextControlFixture, DequantizeSimpleInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeSimpleUint8ToFp16, ClContextControlFixture, DequantizeSimpleUint8ToFp16Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeSimpleInt16ToFp16, ClContextControlFixture, DequantizeSimpleInt16ToFp16Test)
 
 // Transpose
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeFloat32ValueSet3Test, TransposeValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeQASymmS8, SimpleTransposeTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeQASymm8, SimpleTransposeTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymm8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeQSymm16, SimpleTransposeTest<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQSymm16ValueSet1Test, TransposeValueSet1Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQSymm16ValueSet2Test, TransposeValueSet2Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQSymm16ValueSet3Test, TransposeValueSet3Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimpleTransposeFloat32, ClContextControlFixture, SimpleTransposeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeFloat32ValueSet1Test, ClContextControlFixture, TransposeValueSet1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeFloat32ValueSet2Test, ClContextControlFixture, TransposeValueSet2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeFloat32ValueSet3Test, ClContextControlFixture, TransposeValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimpleTransposeQASymmS8, ClContextControlFixture, SimpleTransposeTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymmS8ValueSet1Test, ClContextControlFixture, TransposeValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymmS8ValueSet2Test, ClContextControlFixture, TransposeValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymmS8ValueSet3Test, ClContextControlFixture, TransposeValueSet3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimpleTransposeQASymm8, ClContextControlFixture, SimpleTransposeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymm8ValueSet1Test, ClContextControlFixture, TransposeValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymm8ValueSet2Test, ClContextControlFixture, TransposeValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymm8ValueSet3Test, ClContextControlFixture, TransposeValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimpleTransposeQSymm16, ClContextControlFixture, SimpleTransposeTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQSymm16ValueSet1Test, ClContextControlFixture, TransposeValueSet1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQSymm16ValueSet2Test, ClContextControlFixture, TransposeValueSet2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQSymm16ValueSet3Test, ClContextControlFixture, TransposeValueSet3Test<DataType::QSymmS16>)
 
 // TransposeConvolution2d
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeConvolution2dFloatNchw,
-                              SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeConvolution2dFloatNhwc,
-                              SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeConvolution2dUint8Nchw,
-                              SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeConvolution2dUint8Nhwc,
-                              SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedSimpleTransposeConvolution2dFloatNchw,
-                              SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              false,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
-                              SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
-                              SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
-                              SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedSimpleTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(PaddedTransposeConvolution2dFloatNchw,
-                              PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PaddedTransposeConvolution2dFloatNhwc,
-                              PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PaddedTransposeConvolution2dUint8Nchw,
-                              PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PaddedTransposeConvolution2dUint8Nhwc,
-                              PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PaddedTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PaddedTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PaddedTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PaddedTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedPaddedTransposeConvolution2dFloatNchw,
-                              PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              false,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
-                              PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
-                              PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
-                              PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedPaddedTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedTransposeConvolution2dFloatNchw,
-                              StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedTransposeConvolution2dFloatNhwc,
-                              StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedTransposeConvolution2dUint8Nchw,
-                              StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedTransposeConvolution2dUint8Nhwc,
-                              StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedStridedTransposeConvolution2dFloatNchw,
-                              StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              false,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedStridedTransposeConvolution2dFloatNhwc,
-                              StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedStridedTransposeConvolution2dUint8Nchw,
-                              StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
-                              StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedStridedTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedStridedTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedStridedTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiChannelTransposeConvolution2dFloatNchw,
-                              MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiChannelTransposeConvolution2dFloatNhwc,
-                              MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiChannelTransposeConvolution2dUint8Nchw,
-                              MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiChannelTransposeConvolution2dUint8Nhwc,
-                              MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiChannelTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiChannelTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiChannelTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiChannelTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 DataLayout::NHWC)
 
 // Abs
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs2d, Abs2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs3d, Abs3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs2d, ClContextControlFixture, Abs2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs3d, ClContextControlFixture, Abs3dTest<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(AbsZero, AbsZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AbsZero, ClContextControlFixture, AbsZeroTest<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs2dFloat16, Abs2dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs3dFloat16, Abs3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs2dFloat16, ClContextControlFixture, Abs2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs3dFloat16, ClContextControlFixture, Abs3dTest<DataType::Float16>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(AbsZeroFloat16, AbsZeroTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AbsZeroFloat16, ClContextControlFixture, AbsZeroTest<DataType::Float16>)
 
 // ArgMinMax
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinFloat32, ArgMinSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxFloat32, ArgMaxSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinChannel, ArgMinChannelTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinFloat32, ClContextControlFixture, ArgMinSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxFloat32, ClContextControlFixture, ArgMaxSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinChannel, ClContextControlFixture, ArgMinChannelTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxChannel, ClContextControlFixture, ArgMaxChannelTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxHeight, ClContextControlFixture, ArgMaxHeightTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinWidth, ClContextControlFixture, ArgMinWidthTest<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinQAsymm8, ClContextControlFixture, ArgMinSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxQAsymm8, ClContextControlFixture, ArgMaxSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinChannelQAsymm8, ClContextControlFixture, ArgMinChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxChannelQAsymm8, ClContextControlFixture, ArgMaxChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxHeightQAsymm8, ClContextControlFixture, ArgMaxHeightTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinWidthQAsymm8, ClContextControlFixture, ArgMinWidthTest<DataType::QAsymmU8>)
 
 // Neg
-ARMNN_AUTO_TEST_CASE_WITH_THF(Neg2d, Neg2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Neg3d, Neg3dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NegZero, NegZeroTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NegNegative, NegNegativeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Neg2dFloat16, Neg2dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Neg3dFloat16, Neg3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Neg2d, ClContextControlFixture, Neg2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Neg3d, ClContextControlFixture, Neg3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NegZero, ClContextControlFixture, NegZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NegNegative, ClContextControlFixture, NegNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Neg2dFloat16, ClContextControlFixture, Neg2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Neg3dFloat16, ClContextControlFixture, Neg3dTest<DataType::Float16>)
 
 // Exp
-ARMNN_AUTO_TEST_CASE_WITH_THF(Exp2d, Exp2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Exo3d, Exp3dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ExpZero, ExpZeroTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ExpNegative, ExpNegativeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Exp2dFloat16, Exp2dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Exp3dFloat16, Exp3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp2d, ClContextControlFixture, Exp2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exo3d, ClContextControlFixture, Exp3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ExpZero, ClContextControlFixture, ExpZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ExpNegative, ClContextControlFixture, ExpNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp2dFloat16, ClContextControlFixture, Exp2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp3dFloat16, ClContextControlFixture, Exp3dTest<DataType::Float16>)
 
 // Logical
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNot, LogicalNotTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNotInt, LogicalNotIntTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalNot, ClContextControlFixture, LogicalNotTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalNotInt, ClContextControlFixture, LogicalNotIntTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAnd, LogicalAndTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndInt, LogicalAndIntTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast1, LogicalAndBroadcast1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast2, LogicalAndBroadcast2Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast3, LogicalAndBroadcast3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAnd, ClContextControlFixture, LogicalAndTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAndInt, ClContextControlFixture, LogicalAndIntTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAndBroadcast1, ClContextControlFixture, LogicalAndBroadcast1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAndBroadcast2, ClContextControlFixture, LogicalAndBroadcast2Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAndBroadcast3, ClContextControlFixture, LogicalAndBroadcast3Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOr, LogicalOrTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrInt, LogicalOrIntTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast1, LogicalOrBroadcast1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast2, LogicalOrBroadcast2Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast3, LogicalOrBroadcast3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOr, ClContextControlFixture, LogicalOrTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOrInt, ClContextControlFixture, LogicalOrIntTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOrBroadcast1, ClContextControlFixture, LogicalOrBroadcast1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOrBroadcast2, ClContextControlFixture, LogicalOrBroadcast2Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOrBroadcast3, ClContextControlFixture, LogicalOrBroadcast3Test)
 
 // ReduceSum
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumFloat32, ReduceSumSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_1, ReduceSumSingleAxisTest1<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_2, ReduceSumSingleAxisTest2<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_3, ReduceSumSingleAxisTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceSumFloat32, ClContextControlFixture, ReduceSumSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceSumSingleAxisFloat32_1, ClContextControlFixture, ReduceSumSingleAxisTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceSumSingleAxisFloat32_2, ClContextControlFixture, ReduceSumSingleAxisTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceSumSingleAxisFloat32_3, ClContextControlFixture, ReduceSumSingleAxisTest3<DataType::Float32>)
 
 // ReduceMax
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMaxFloat32, ReduceMaxSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMaxNegativeAxisFloat32, ReduceMaxNegativeAxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMax2Float32, ReduceMaxSimpleTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceMaxFloat32, ClContextControlFixture, ReduceMaxSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceMaxNegativeAxisFloat32, ClContextControlFixture, ReduceMaxNegativeAxisTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceMax2Float32, ClContextControlFixture, ReduceMaxSimpleTest2<DataType::Float32>)
 
 // ReduceMin
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinFloat32, ReduceMinSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinNegativeAxisFloat32, ReduceMinNegativeAxisTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceMinFloat32, ClContextControlFixture, ReduceMinSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceMinNegativeAxisFloat32, ClContextControlFixture, ReduceMinNegativeAxisTest<DataType::Float32>)
 
 // Cast
-ARMNN_AUTO_TEST_CASE_WITH_THF(CastInt32ToFloat, CastInt32ToFloat2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloat16ToFloat32, CastFloat16ToFloat322dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToFloat16, CastFloat32ToFloat162dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToUInt8, CastFloat32ToUInt82dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastInt32ToFloat, ClContextControlFixture, CastInt32ToFloat2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloat16ToFloat32, ClContextControlFixture, CastFloat16ToFloat322dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloatToFloat16, ClContextControlFixture, CastFloat32ToFloat162dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloatToUInt8, ClContextControlFixture, CastFloat32ToUInt82dTest)
 
 #if defined(ARMNNREF_ENABLED)
 
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextControlFixture") {}
+
 // The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
 // if the reference backend is not built
 
@@ -1386,4 +1990,4 @@
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClMemCopyTests.cpp b/src/backends/cl/test/ClMemCopyTests.cpp
index 1048e73..98b873f 100644
--- a/src/backends/cl/test/ClMemCopyTests.cpp
+++ b/src/backends/cl/test/ClMemCopyTests.cpp
@@ -11,44 +11,44 @@
 #include <reference/RefWorkloadFactory.hpp>
 #include <reference/test/RefWorkloadFactoryHelper.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(ClMemCopy)
-
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpu)
+TEST_SUITE("ClMemCopy")
+{
+TEST_CASE("CopyBetweenCpuAndGpu")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpu)
+TEST_CASE("CopyBetweenGpuAndCpu")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpuWithSubtensors)
+TEST_CASE("CopyBetweenCpuAndGpuWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpuWithSubtensors)
+TEST_CASE("CopyBetweenGpuAndCpuWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index a41c5f8..d91e9b4 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -14,12 +14,11 @@
 
 #include <Filesystem.hpp>
 
+#include <doctest/doctest.h>
 
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(ClOptimizedNetwork)
-
-BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
+TEST_SUITE("ClOptimizedNetwork")
+{
+TEST_CASE("OptimizeValidateGpuDeviceSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -35,7 +34,7 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
     // validate workloads
     armnn::ClWorkloadFactory fact =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
@@ -43,13 +42,13 @@
     const armnn::Graph& theGraph = GetGraphForTesting(optNet.get());
     for (auto&& layer : theGraph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
-        BOOST_CHECK_NO_THROW(
+        CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
+        CHECK_NOTHROW(
             layer->CreateWorkload(fact));
     }
 }
 
-BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
+TEST_CASE("FP16TurboModeTestOnGpuAcc")
 {
     // Test to check when Fp16 Turbo mode set
     // it converts the Fp32 network to Fp16 Network
@@ -96,17 +95,17 @@
     const armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
 
     // Tests that all layers are present in the graph.
-    BOOST_TEST(graph.GetNumLayers() == 5);
+    CHECK(graph.GetNumLayers() == 5);
 
     // Tests that the vertices exist and have correct names.
-    BOOST_TEST(GraphHasNamedLayer(graph, "input layer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp32_to_fp16-0-input layer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "activation layer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp16_to_fp32-0-output layer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "output layer"));
+    CHECK(GraphHasNamedLayer(graph, "input layer"));
+    CHECK(GraphHasNamedLayer(graph, "convert_fp32_to_fp16-0-input layer"));
+    CHECK(GraphHasNamedLayer(graph, "activation layer"));
+    CHECK(GraphHasNamedLayer(graph, "convert_fp16_to_fp32-0-output layer"));
+    CHECK(GraphHasNamedLayer(graph, "output layer"));
 }
 
-BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
+TEST_CASE("FastMathEnabledTestOnGpuAcc")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -127,16 +126,16 @@
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
     *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
-    BOOST_CHECK(optimizedNet);
+    CHECK(optimizedNet);
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    BOOST_TEST(modelOptionsOut.size() == 1);
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+    CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckMLGOTuningFile)
+TEST_CASE("CheckMLGOTuningFile")
 {
     class ClBackendContextTestClass : public armnn::ClBackendContext
     {
@@ -202,7 +201,7 @@
     catch (std::exception &e)
     {
         std::cerr << "Unable to write to file at location [" << validFile.c_str() << "] : " << e.what() << std::endl;
-        BOOST_TEST(false);
+        CHECK(false);
     }
 
     armnn::IRuntime::CreationOptions creationOptions1;
@@ -216,7 +215,7 @@
 
     creationOptions1.m_BackendOptions.emplace_back(validOptions);
     ClBackendContextTestClass clBackendContext1(creationOptions1);
-    BOOST_TEST(clBackendContext1.call_reload_from_file());
+    CHECK(clBackendContext1.call_reload_from_file());
 
     armnn::BackendOptions invalidOptions
             {
@@ -229,7 +228,7 @@
     armnn::IRuntime::CreationOptions creationOptions2;
     creationOptions2.m_BackendOptions.emplace_back(invalidOptions);
     ClBackendContextTestClass clBackendContext2(creationOptions2);
-    BOOST_TEST(clBackendContext2.call_reload_from_file() == false);
+    CHECK(clBackendContext2.call_reload_from_file() == false);
 
     armnn::BackendOptions invalidPathOptions
             {
@@ -242,7 +241,7 @@
     armnn::IRuntime::CreationOptions creationOptions3;
     creationOptions3.m_BackendOptions.emplace_back(invalidPathOptions);
     ClBackendContextTestClass clBackendContext3(creationOptions3);
-    BOOST_TEST(clBackendContext3.call_reload_from_file() == false);
+    CHECK(clBackendContext3.call_reload_from_file() == false);
 }
 
-BOOST_AUTO_TEST_SUITE_END();
+}
diff --git a/src/backends/cl/test/ClRuntimeTests.cpp b/src/backends/cl/test/ClRuntimeTests.cpp
index 33e86b6..db01fa7 100644
--- a/src/backends/cl/test/ClRuntimeTests.cpp
+++ b/src/backends/cl/test/ClRuntimeTests.cpp
@@ -11,15 +11,15 @@
 #include <test/ProfilingTestUtils.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #ifdef WITH_VALGRIND
 #include <valgrind/memcheck.h>
 #endif
 
-BOOST_AUTO_TEST_SUITE(ClRuntime)
-
-BOOST_AUTO_TEST_CASE(RuntimeValidateGpuDeviceSupportLayerNoFallback)
+TEST_SUITE("ClRuntime")
+{
+TEST_CASE("RuntimeValidateGpuDeviceSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -35,17 +35,17 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Load it into the runtime. It should success.
     armnn::NetworkId netId;
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
 }
 
 #ifdef ARMNN_LEAK_CHECKING_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksGpuAcc)
+TEST_CASE("RuntimeMemoryLeaksGpuAcc")
 {
-    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+    CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
     armnn::IRuntime::CreationOptions options;
     armnn::RuntimeImpl runtime(options);
     armnn::RuntimeLoadedNetworksReserve(&runtime);
@@ -59,21 +59,21 @@
 
     {
         ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkGpuAcc");
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
         // In the second run we check for all remaining memory
         // in use after the network was unloaded. If there is any
         // then it will be treated as a memory leak.
         CreateAndDropDummyNetwork(backends, runtime);
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
-        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
-        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
     }
 }
 #endif
 
 // Note: this part of the code is due to be removed when we fully trust the gperftools based results.
 #if defined(WITH_VALGRIND)
-BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage)
+TEST_CASE("RuntimeMemoryUsage")
 {
     // From documentation:
 
@@ -135,12 +135,12 @@
     VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
 
     // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
-    BOOST_TEST(leakedBefore == leakedAfter);
+    CHECK(leakedBefore == leakedAfter);
 
     // Add resonable threshold after and before running valgrind with the ACL clear cache function.
     // TODO Threshold set to 80k until the root cause of the memory leakage is found and fixed. Revert threshold
     // value to 1024 when fixed.
-    BOOST_TEST(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
+    CHECK(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
 
     // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
     // so they are assigned to, but still considered unused, causing a warning.
@@ -149,9 +149,9 @@
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(ProfilingPostOptimisationStructureGpuAcc)
+TEST_CASE("ProfilingPostOptimisationStructureGpuAcc")
 {
     VerifyPostOptimisationStructureTestImpl(armnn::Compute::GpuAcc);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index 5afafcb..1974d4d 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -13,15 +13,15 @@
 #include <backendsCommon/TensorHandle.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <set>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Fp16Support)
-
-BOOST_AUTO_TEST_CASE(Fp16DataTypeSupport)
+TEST_SUITE("Fp16Support")
+{
+TEST_CASE("Fp16DataTypeSupport")
 {
     Graph graph;
 
@@ -40,12 +40,12 @@
     inputLayer2->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
     additionLayer->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
 
-    BOOST_CHECK(inputLayer1->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
-    BOOST_CHECK(inputLayer2->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
-    BOOST_CHECK(additionLayer->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+    CHECK(inputLayer1->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+    CHECK(inputLayer2->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+    CHECK(additionLayer->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
 }
 
-BOOST_AUTO_TEST_CASE(Fp16AdditionTest)
+TEST_CASE("Fp16AdditionTest")
 {
    using namespace half_float::literal;
    // Create runtime in which test will run
@@ -104,7 +104,7 @@
    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
    // Checks the results.
-   BOOST_TEST(outputData == std::vector<Half>({ 101.0_h, 202.0_h, 303.0_h, 404.0_h})); // Add
+   CHECK(outputData == std::vector<Half>({ 101.0_h, 202.0_h, 303.0_h, 404.0_h})); // Add
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp
index 7c8e277..0da1db7 100644
--- a/src/backends/cl/test/OpenClTimerTest.cpp
+++ b/src/backends/cl/test/OpenClTimerTest.cpp
@@ -21,7 +21,7 @@
 
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <iostream>
 
@@ -38,11 +38,10 @@
     ClContextControl m_ClContextControl;
 };
 
-BOOST_FIXTURE_TEST_SUITE(OpenClTimerBatchNorm, OpenClFixture)
-using FactoryType = ClWorkloadFactory;
-
-BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
+TEST_CASE_FIXTURE(OpenClFixture, "OpenClTimerBatchNorm")
 {
+//using FactoryType = ClWorkloadFactory;
+
     auto memoryManager = ClWorkloadFactoryHelper::GetMemoryManager();
     ClWorkloadFactory workloadFactory = ClWorkloadFactoryHelper::GetFactory(memoryManager);
 
@@ -109,7 +108,7 @@
 
     OpenClTimer openClTimer;
 
-    BOOST_CHECK_EQUAL(openClTimer.GetName(), "OpenClKernelTimer");
+    CHECK_EQ(openClTimer.GetName(), "OpenClKernelTimer");
 
     //Start the timer
     openClTimer.Start();
@@ -120,15 +119,13 @@
     //Stop the timer
     openClTimer.Stop();
 
-    BOOST_CHECK_EQUAL(openClTimer.GetMeasurements().size(), 1);
+    CHECK_EQ(openClTimer.GetMeasurements().size(), 1);
 
-    BOOST_CHECK_EQUAL(openClTimer.GetMeasurements().front().m_Name,
+    CHECK_EQ(openClTimer.GetMeasurements().front().m_Name,
                       "OpenClKernelTimer/0: batchnormalization_layer_nchw GWS[1,3,2]");
 
-    BOOST_CHECK(openClTimer.GetMeasurements().front().m_Value > 0);
+    CHECK(openClTimer.GetMeasurements().front().m_Value > 0);
 
 }
 
-BOOST_AUTO_TEST_SUITE_END()
-
 #endif //aarch64 or x86_64
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index a8c0c8a..e3d73be 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -18,8 +18,10 @@
 #include <neon/workloads/NeonWorkloadUtils.hpp>
 #include <neon/workloads/NeonWorkloads.hpp>
 
-BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon)
+#include <doctest/doctest.h>
 
+TEST_SUITE("CreateWorkloadNeon")
+{
 namespace
 {
 
@@ -77,18 +79,18 @@
     ActivationQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
+TEST_CASE("CreateActivationFloat16Workload")
 {
     NeonCreateActivationWorkloadTest<DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
+TEST_CASE("CreateActivationFloatWorkload")
 {
     NeonCreateActivationWorkloadTest<DataType::Float32>();
 }
@@ -109,13 +111,13 @@
     auto inputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto inputHandle2 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
+TEST_CASE("CreateAdditionFloat16Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
                                       AdditionQueueDescriptor,
@@ -124,7 +126,7 @@
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+TEST_CASE("CreateAdditionFloatWorkload")
 {
     NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
                                       AdditionQueueDescriptor,
@@ -133,7 +135,7 @@
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+TEST_CASE("CreateSubtractionFloat16Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
@@ -142,7 +144,7 @@
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+TEST_CASE("CreateSubtractionFloatWorkload")
 {
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
@@ -150,7 +152,7 @@
                                       DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
+TEST_CASE("CreateSubtractionUint8Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
@@ -159,7 +161,7 @@
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload)
+TEST_CASE("CreateMultiplicationFloat16Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
@@ -168,7 +170,7 @@
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
+TEST_CASE("CreateMultiplicationFloatWorkload")
 {
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
@@ -176,7 +178,7 @@
                                       DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
+TEST_CASE("CreateMultiplicationUint8Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
@@ -184,7 +186,7 @@
                                       DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+TEST_CASE("CreateDivisionFloatWorkloadTest")
 {
     NeonCreateElementwiseWorkloadTest<NeonDivisionWorkload,
                                       DivisionQueueDescriptor,
@@ -210,28 +212,28 @@
     TensorShape inputShape  = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
+TEST_CASE("CreateBatchNormalizationFloat16NchwWorkload")
 {
     NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NhwcWorkload)
+TEST_CASE("CreateBatchNormalizationFloat16NhwcWorkload")
 {
     NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NHWC);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
+TEST_CASE("CreateBatchNormalizationFloatNchwWorkload")
 {
     NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
+TEST_CASE("CreateBatchNormalizationFloatNhwcWorkload")
 {
     NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NHWC);
 }
@@ -252,33 +254,33 @@
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle,  TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle,  TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
+TEST_CASE("CreateConvolution2dFloat16NchwWorkload")
 {
     NeonCreateConvolution2dWorkloadTest<DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
+TEST_CASE("CreateConvolution2dFloat16NhwcWorkload")
 {
     NeonCreateConvolution2dWorkloadTest<DataType::Float16>(DataLayout::NHWC);
 }
 
 #endif
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+TEST_CASE("CreateConvolution2dFloatNchwWorkload")
 {
     NeonCreateConvolution2dWorkloadTest<DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+TEST_CASE("CreateConvolution2dFloatNhwcWorkload")
 {
     NeonCreateConvolution2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload)
+TEST_CASE("CreateConvolution2dFastMathEnabledWorkload")
 {
     Graph graph;
     using ModelOptions = std::vector<BackendOptions>;
@@ -324,17 +326,17 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
                                                                : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat32NhwcWorkload)
+TEST_CASE("CreateDepthWiseConvolution2dFloat32NhwcWorkload")
 {
     NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float32>(DataLayout::NHWC);
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat16NhwcWorkload)
+TEST_CASE("CreateDepthWiseConvolution2dFloat16NhwcWorkload")
 {
     NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float16>(DataLayout::NHWC);
 }
@@ -357,28 +359,28 @@
     // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
     float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
     float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType, outputQScale)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType, outputQScale)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16Workload)
+TEST_CASE("CreateFullyConnectedFloat16Workload")
 {
     NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkload)
+TEST_CASE("CreateFullyConnectedFloatWorkload")
 {
     NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedQAsymmU8Workload)
+TEST_CASE("CreateFullyConnectedQAsymmU8Workload")
 {
     NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedQAsymmS8Workload)
+TEST_CASE("CreateFullyConnectedQAsymmS8Workload")
 {
     NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::QAsymmS8>();
 }
@@ -400,28 +402,28 @@
     TensorShape inputShape  = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
+TEST_CASE("CreateNormalizationFloat16NchwWorkload")
 {
     NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
+TEST_CASE("CreateNormalizationFloat16NhwcWorkload")
 {
     NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNchwWorkload)
+TEST_CASE("CreateNormalizationFloatNchwWorkload")
 {
     NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNhwcWorkload)
+TEST_CASE("CreateNormalizationFloatNhwcWorkload")
 {
     NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
 }
@@ -443,33 +445,33 @@
     Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16Workload)
+TEST_CASE("CreatePooling2dFloat16Workload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
+TEST_CASE("CreatePooling2dFloatNchwWorkload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
+TEST_CASE("CreatePooling2dFloatNhwcWorkload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload)
+TEST_CASE("CreatePooling2dUint8NchwWorkload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
+TEST_CASE("CreatePooling2dUint8NhwcWorkload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NHWC);
 }
@@ -495,24 +497,24 @@
     auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto alphaHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType)));
+    CHECK(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-    BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+TEST_CASE("CreatePreluFloat16Workload")
 {
     NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float16);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
+TEST_CASE("CreatePreluFloatWorkload")
 {
     NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
+TEST_CASE("CreatePreluUint8Workload")
 {
     NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
 }
@@ -530,23 +532,23 @@
     ReshapeQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
+TEST_CASE("CreateReshapeFloat16Workload")
 {
     NeonCreateReshapeWorkloadTest<DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
+TEST_CASE("CreateReshapeFloatWorkload")
 {
     NeonCreateReshapeWorkloadTest<DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
+TEST_CASE("CreateReshapeUint8Workload")
 {
     NeonCreateReshapeWorkloadTest<DataType::QAsymmU8>();
 }
@@ -569,34 +571,34 @@
     {
         case DataLayout::NHWC:
             predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             break;
         default: // DataLayout::NCHW
             predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     }
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
+TEST_CASE("CreateResizeFloat32NchwWorkload")
 {
     NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
+TEST_CASE("CreateResizeUint8NchwWorkload")
 {
     NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
+TEST_CASE("CreateResizeFloat32NhwcWorkload")
 {
     NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+TEST_CASE("CreateResizeUint8NhwcWorkload")
 {
     NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
@@ -625,28 +627,28 @@
         tensorInfo.SetQuantizationOffset(-128);
         tensorInfo.SetQuantizationScale(1.f / 256);
     }
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
+TEST_CASE("CreateSoftmaxFloat16Workload")
 {
     NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload)
+TEST_CASE("CreateSoftmaxFloatWorkload")
 {
     NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmU8Workload)
+TEST_CASE("CreateSoftmaxQAsymmU8Workload")
 {
     NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmS8Workload)
+TEST_CASE("CreateSoftmaxQAsymmS8Workload")
 {
     NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmS8>();
 }
@@ -664,31 +666,31 @@
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 1, 2, 2, 1 }, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 1, 1, 1, 4 }, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 1, 2, 2, 1 }, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 1, 1, 1, 4 }, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
+TEST_CASE("CreateSpaceToDepthFloat32Workload")
 {
     NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
+TEST_CASE("CreateSpaceToDepthFloat16Workload")
 {
     NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
+TEST_CASE("CreateSpaceToDepthQAsymm8Workload")
 {
     NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
+TEST_CASE("CreateSpaceToDepthQSymm16Workload")
 {
     NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
+TEST_CASE("CreateSplitterWorkload")
 {
     Graph graph;
     NeonWorkloadFactory factory =
@@ -699,19 +701,19 @@
     // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
     SplitterQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32)));
 
     auto outputHandle0 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32)));
 
     auto outputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32)));
 
     auto outputHandle2 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcat)
+TEST_CASE("CreateSplitterConcat")
 {
     // Tests that it is possible to decide which output of the splitter layer
     // should be lined to which input of the concat layer.
@@ -736,17 +738,17 @@
     armnn::IAclTensorHandle* mIn0 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
     armnn::IAclTensorHandle* mIn1 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(mIn0);
-    BOOST_TEST(mIn1);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(mIn0);
+    CHECK(mIn1);
 
     bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
+TEST_CASE("CreateSingleOutputMultipleInputs")
 {
     // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
     // We created a splitter with two outputs. That each of those outputs is used by two different activation layers
@@ -773,24 +775,24 @@
     armnn::IAclTensorHandle* activ1_1Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
 
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(activ0_0Im);
-    BOOST_TEST(activ0_1Im);
-    BOOST_TEST(activ1_0Im);
-    BOOST_TEST(activ1_1Im);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(activ0_0Im);
+    CHECK(activ0_1Im);
+    CHECK(activ1_0Im);
+    CHECK(activ1_1Im);
 
     bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
                              (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
 #if defined(ARMNNREF_ENABLED)
 
 // This test unit needs the reference backend, it's not available if the reference backend is not built
 
-BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsNeon)
+TEST_CASE("CreateMemCopyWorkloadsNeon")
 {
     NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
@@ -819,28 +821,28 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
                 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
+TEST_CASE("CreateL2NormalizationFloat16NchwWorkload")
 {
     NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
+TEST_CASE("CreateL2NormalizationFloat16NhwcWorkload")
 {
     NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationNchwWorkload)
+TEST_CASE("CreateL2NormalizationNchwWorkload")
 {
     NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload)
+TEST_CASE("CreateL2NormalizationNhwcWorkload")
 {
     NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
 }
@@ -860,18 +862,18 @@
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
     armnn::TensorInfo tensorInfo({4, 1}, DataType);
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat16Workload)
+TEST_CASE("CreateLogSoftmaxFloat16Workload")
 {
     NeonCreateLogSoftmaxWorkloadTest<NeonLogSoftmaxWorkload, DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloatWorkload)
+TEST_CASE("CreateLogSoftmaxFloatWorkload")
 {
     NeonCreateLogSoftmaxWorkloadTest<NeonLogSoftmaxWorkload, DataType::Float32>();
 }
@@ -890,11 +892,11 @@
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 2, 2 }, DataType::Float32)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 2, 4 }, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 2, 2 }, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 2, 4 }, DataType::Float32)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
+TEST_CASE("CreateLSTMWorkloadFloatWorkload")
 {
     NeonCreateLstmWorkloadTest<NeonLstmFloatWorkload>();
 }
@@ -914,37 +916,37 @@
     auto inputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
+TEST_CASE("CreateConcatDim0Float32Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
+TEST_CASE("CreateConcatDim1Float32Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
+TEST_CASE("CreateConcatDim3Float32Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
+TEST_CASE("CreateConcatDim0Uint8Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
+TEST_CASE("CreateConcatDim1Uint8Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
+TEST_CASE("CreateConcatDim3Uint8Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
 }
@@ -971,25 +973,25 @@
     for (unsigned int i = 0; i < numInputs; ++i)
     {
         auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[i]);
-        BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+        CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
     }
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+TEST_CASE("CreateStackFloat32Workload")
 {
     NeonCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
+TEST_CASE("CreateStackFloat16Workload")
 {
     NeonCreateStackWorkloadTest<armnn::DataType::Float16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+TEST_CASE("CreateStackUint8Workload")
 {
     NeonCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
@@ -1005,27 +1007,27 @@
     QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
 
     IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2})));
-    BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((inputHandle->GetShape() == TensorShape({2, 2})));
+    CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 
     IAclTensorHandle* cellStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
-    BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateInHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[2]);
-    BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((outputStateInHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 
     IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQuantizedLstmWorkload)
+TEST_CASE("CreateQuantizedLstmWorkload")
 {
     NeonCreateQuantizedLstmWorkloadTest<NeonQuantizedLstmWorkload>();
 }
@@ -1040,21 +1042,21 @@
     QLstmQueueDescriptor queueDescriptor = workload->GetData();
 
     IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+    CHECK((inputHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
 
     IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
-    BOOST_TEST((outputHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+    CHECK((outputHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQLstmWorkloadTest)
+TEST_CASE("CreateQLstmWorkloadTest")
 {
     NeonCreateQLstmWorkloadTest<NeonQLstmWorkload>();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index dc0a609..5190e2f 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -22,14 +22,14 @@
 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
 #include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonEndToEnd)
-
-std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc};
+TEST_SUITE("NeonEndToEnd")
+{
+std::vector<armnn::BackendId> neonDefaultBackends = {armnn::Compute::CpuAcc};
 
 // Abs
-BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32)
+TEST_CASE("NeonAbsEndToEndTestFloat32")
 {
     std::vector<float> expectedOutput =
     {
@@ -37,22 +37,22 @@
         3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
     };
 
-    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
                                                              UnaryOperation::Abs,
                                                              expectedOutput);
 }
 
 // Constant
-BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32)
+TEST_CASE("ConstantUsage_Neon_Float32")
 {
-    BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
+    CHECK(ConstantUsageFloat32Test(neonDefaultBackends));
 }
 
 #if defined(ARMNNREF_ENABLED)
 
 // This test unit needs the reference backend, it's not available if the reference backend is not built
 
-BOOST_AUTO_TEST_CASE(FallbackToCpuRef)
+TEST_CASE("FallbackToCpuRef")
 {
     using namespace armnn;
 
@@ -83,519 +83,523 @@
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
 }
 
 #endif
 
-BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndTest)
+TEST_CASE("NeonGreaterSimpleEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
                                                        ComparisonOperation::Greater,
                                                        expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndUint8Test)
+TEST_CASE("NeonGreaterSimpleEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends,
                                                                ComparisonOperation::Greater,
                                                                expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndTest)
+TEST_CASE("NeonGreaterBroadcastEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
                                                           ComparisonOperation::Greater,
                                                           expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test)
+TEST_CASE("NeonGreaterBroadcastEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends,
                                                                   ComparisonOperation::Greater,
                                                                   expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test)
+TEST_CASE("NeonConcatEndToEndDim0Test")
 {
-    ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
+TEST_CASE("NeonConcatEndToEndDim0Uint8Test")
 {
-    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
+TEST_CASE("NeonConcatEndToEndDim1Test")
 {
-    ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
+TEST_CASE("NeonConcatEndToEndDim1Uint8Test")
 {
-    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
+TEST_CASE("NeonConcatEndToEndDim3Test")
 {
-    ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
+TEST_CASE("NeonConcatEndToEndDim3Uint8Test")
 {
-    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
 // DepthToSpace
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat32)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat32")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
+TEST_CASE("DephtToSpaceEndToEndNchwUint8")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
+TEST_CASE("DephtToSpaceEndToEndNchwInt16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat32")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
+TEST_CASE("DephtToSpaceEndToEndNhwcUint8")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
+TEST_CASE("DephtToSpaceEndToEndNhwcInt16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Dequantize
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+TEST_CASE("DequantizeEndToEndSimpleTest")
 {
-    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+TEST_CASE("DequantizeEndToEndOffsetTest")
 {
-    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
+    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat32)
+TEST_CASE("NeonEluEndToEndTestFloat32")
 {
-    EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+    EluEndToEndTest<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat16)
+TEST_CASE("NeonEluEndToEndTestFloat16")
 {
-    EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+    EluEndToEndTest<armnn::DataType::Float16>(neonDefaultBackends);
 }
 
 // HardSwish
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestFloat32)
+TEST_CASE("NeonHardSwishEndToEndTestFloat32")
 {
-    HardSwishEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestFloat16)
+TEST_CASE("NeonHardSwishEndToEndTestFloat16")
 {
-    HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::Float16>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestQAsymmS8)
+TEST_CASE("NeonHardSwishEndToEndTestQAsymmS8")
 {
-    HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestQAsymmU8)
+TEST_CASE("NeonHardSwishEndToEndTestQAsymmU8")
 {
-    HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
+TEST_CASE("NeonPreluEndToEndFloat32Test")
 {
-    PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
+    PreluEndToEndNegativeTest<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonPreluEndToEndTestUint8Test)
+TEST_CASE("NeonPreluEndToEndTestUint8Test")
 {
-    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
+    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest1)
+TEST_CASE("NeonSpaceToDepthNhwcEndToEndTest1")
 {
-    SpaceToDepthNhwcEndToEndTest1(defaultBackends);
+    SpaceToDepthNhwcEndToEndTest1(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNchwEndToEndTest1)
+TEST_CASE("NeonSpaceToDepthNchwEndToEndTest1")
 {
-    SpaceToDepthNchwEndToEndTest1(defaultBackends);
+    SpaceToDepthNchwEndToEndTest1(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest2)
+TEST_CASE("NeonSpaceToDepthNhwcEndToEndTest2")
 {
-    SpaceToDepthNhwcEndToEndTest2(defaultBackends);
+    SpaceToDepthNhwcEndToEndTest2(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNchwEndToEndTest2)
+TEST_CASE("NeonSpaceToDepthNchwEndToEndTest2")
 {
-    SpaceToDepthNchwEndToEndTest2(defaultBackends);
+    SpaceToDepthNchwEndToEndTest2(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndTest)
+TEST_CASE("NeonSplitter1dEndToEndTest")
 {
-    Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndUint8Test)
+TEST_CASE("NeonSplitter1dEndToEndUint8Test")
 {
-    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndTest)
+TEST_CASE("NeonSplitter2dDim0EndToEndTest")
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndTest)
+TEST_CASE("NeonSplitter2dDim1EndToEndTest")
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndUint8Test)
+TEST_CASE("NeonSplitter2dDim0EndToEndUint8Test")
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndUint8Test)
+TEST_CASE("NeonSplitter2dDim1EndToEndUint8Test")
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndTest)
+TEST_CASE("NeonSplitter3dDim0EndToEndTest")
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndTest)
+TEST_CASE("NeonSplitter3dDim1EndToEndTest")
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndTest)
+TEST_CASE("NeonSplitter3dDim2EndToEndTest")
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndUint8Test)
+TEST_CASE("NeonSplitter3dDim0EndToEndUint8Test")
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndUint8Test)
+TEST_CASE("NeonSplitter3dDim1EndToEndUint8Test")
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndUint8Test)
+TEST_CASE("NeonSplitter3dDim2EndToEndUint8Test")
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndTest)
+TEST_CASE("NeonSplitter4dDim0EndToEndTest")
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndTest)
+TEST_CASE("NeonSplitter4dDim1EndToEndTest")
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndTest)
+TEST_CASE("NeonSplitter4dDim2EndToEndTest")
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndTest)
+TEST_CASE("NeonSplitter4dDim3EndToEndTest")
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim0EndToEndUint8Test")
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim1EndToEndUint8Test")
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim2EndToEndUint8Test")
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim3EndToEndUint8Test")
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonQuantizedLstmEndToEndTest)
+TEST_CASE("NeonQuantizedLstmEndToEndTest")
 {
-    QuantizedLstmEndToEnd(defaultBackends);
+    QuantizedLstmEndToEnd(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNchwTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndFloatNchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
-        defaultBackends, armnn::DataLayout::NCHW);
+        neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NchwTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndUint8NchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-        defaultBackends, armnn::DataLayout::NCHW);
+        neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNhwcTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndFloatNhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
-        defaultBackends, armnn::DataLayout::NHWC);
+        neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NhwcTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndUint8NhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-        defaultBackends, armnn::DataLayout::NHWC);
+        neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportNonAlignedInputPointerTest)
+TEST_CASE("NeonImportNonAlignedInputPointerTest")
 {
-    ImportNonAlignedInputPointerTest(defaultBackends);
+    ImportNonAlignedInputPointerTest(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonExportNonAlignedOutputPointerTest)
+TEST_CASE("NeonExportNonAlignedOutputPointerTest")
 {
-    ExportNonAlignedOutputPointerTest(defaultBackends);
+    ExportNonAlignedOutputPointerTest(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportAlignedPointerTest)
+TEST_CASE("NeonImportAlignedPointerTest")
 {
-    ImportAlignedPointerTest(defaultBackends);
+    ImportAlignedPointerTest(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportOnlyWorkload)
+TEST_CASE("NeonImportOnlyWorkload")
 {
-    ImportOnlyWorkload(defaultBackends);
+    ImportOnlyWorkload(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonExportOnlyWorkload)
+TEST_CASE("NeonExportOnlyWorkload")
 {
-    ExportOnlyWorkload(defaultBackends);
+    ExportOnlyWorkload(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportAndExportWorkload)
+TEST_CASE("NeonImportAndExportWorkload")
 {
-    ImportAndExportWorkload(defaultBackends);
+    ImportAndExportWorkload(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonExportOutputWithSeveralOutputSlotConnectionsTest)
+TEST_CASE("NeonExportOutputWithSeveralOutputSlotConnectionsTest")
 {
-    ExportOutputWithSeveralOutputSlotConnectionsTest(defaultBackends);
+    ExportOutputWithSeveralOutputSlotConnectionsTest(neonDefaultBackends);
 }
 
 // InstanceNormalization
-BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest1)
+TEST_CASE("NeonInstanceNormalizationNchwEndToEndTest1")
 {
-    InstanceNormalizationNchwEndToEndTest1(defaultBackends);
+    InstanceNormalizationNchwEndToEndTest1(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest2)
+TEST_CASE("NeonInstanceNormalizationNchwEndToEndTest2")
 {
-    InstanceNormalizationNchwEndToEndTest2(defaultBackends);
+    InstanceNormalizationNchwEndToEndTest2(neonDefaultBackends);
 }
 
 // Fill
-BOOST_AUTO_TEST_CASE(NeonFillEndToEndTest)
+TEST_CASE("NeonFillEndToEndTest")
 {
-    FillEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTestFloat16)
+TEST_CASE("RefFillEndToEndTestFloat16")
 {
-    FillEndToEnd<armnn::DataType::Float16>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Float16>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonFillEndToEndTestInt32)
+TEST_CASE("NeonFillEndToEndTestInt32")
 {
-    FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Signed32>(neonDefaultBackends);
 }
 
 // ArgMinMax
-BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTest)
+TEST_CASE("NeonArgMaxSimpleTest")
 {
-    ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTest)
+TEST_CASE("NeonArgMinSimpleTest")
 {
-    ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0Test)
+TEST_CASE("NeonArgMaxAxis0Test")
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis0Test)
+TEST_CASE("NeonArgMinAxis0Test")
 {
-    ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1Test)
+TEST_CASE("NeonArgMaxAxis1Test")
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis1Test)
+TEST_CASE("NeonArgMinAxis1Test")
 {
-    ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2Test)
+TEST_CASE("NeonArgMaxAxis2Test")
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis2Test)
+TEST_CASE("NeonArgMinAxis2Test")
 {
-    ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3Test)
+TEST_CASE("NeonArgMaxAxis3Test")
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis3Test)
+TEST_CASE("NeonArgMinAxis3Test")
 {
-    ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxSimpleTestQuantisedAsymm8")
 {
-    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTestQuantisedAsymm8)
+TEST_CASE("NeonArgMinSimpleTestQuantisedAsymm8")
 {
-    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis0TestQuantisedAsymm8")
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis0TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis0TestQuantisedAsymm8")
 {
-    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis1TestQuantisedAsymm8")
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis1TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis1TestQuantisedAsymm8")
 {
-    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis2TestQuantisedAsymm8")
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis2TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis2TestQuantisedAsymm8")
 {
-    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis3TestQuantisedAsymm8")
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis3TestQuantisedAsymm8")
 {
-    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonStridedSliceInvalidSliceEndToEndTest)
+TEST_CASE("NeonStridedSliceInvalidSliceEndToEndTest")
 {
-    StridedSliceInvalidSliceEndToEndTest(defaultBackends);
+    StridedSliceInvalidSliceEndToEndTest(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest, * boost::unit_test::disabled())
-{
-    std::vector<float> boxEncodings({
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, -1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f
-                                    });
-    std::vector<float> scores({
-                                  0.0f, 0.9f, 0.8f,
-                                  0.0f, 0.75f, 0.72f,
-                                  0.0f, 0.6f, 0.5f,
-                                  0.0f, 0.93f, 0.95f,
-                                  0.0f, 0.5f, 0.4f,
-                                  0.0f, 0.3f, 0.2f
-                              });
-    std::vector<float> anchors({
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 100.5f, 1.0f, 1.0f
-                               });
-    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
-}
+// DISABLED
+//TEST_CASE("NeonDetectionPostProcessRegularNmsTest")
+//{
+//    std::vector<float> boxEncodings({
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, -1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f
+//                                    });
+//    std::vector<float> scores({
+//                                  0.0f, 0.9f, 0.8f,
+//                                  0.0f, 0.75f, 0.72f,
+//                                  0.0f, 0.6f, 0.5f,
+//                                  0.0f, 0.93f, 0.95f,
+//                                  0.0f, 0.5f, 0.4f,
+//                                  0.0f, 0.3f, 0.2f
+//                              });
+//    std::vector<float> anchors({
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 100.5f, 1.0f, 1.0f
+//                               });
+//    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
+//                                                                     boxEncodings,
+//                                                                     scores,
+//                                                                     anchors);
+//}
 
 inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& info)
 {
@@ -605,136 +609,141 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test, * boost::unit_test::disabled())
+// DISABLED
+//TEST_CASE("NeonDetectionPostProcessRegularNmsUint8Test")
+//{
+//    armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
+//    armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
+//    armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
+//
+//    boxEncodingsInfo.SetQuantizationScale(1.0f);
+//    boxEncodingsInfo.SetQuantizationOffset(1);
+//    scoresInfo.SetQuantizationScale(0.01f);
+//    scoresInfo.SetQuantizationOffset(0);
+//    anchorsInfo.SetQuantizationScale(0.5f);
+//    anchorsInfo.SetQuantizationOffset(0);
+//
+//    std::vector<float> boxEncodings({
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, -1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f
+//                                    });
+//    std::vector<float> scores({
+//                                  0.0f, 0.9f, 0.8f,
+//                                  0.0f, 0.75f, 0.72f,
+//                                  0.0f, 0.6f, 0.5f,
+//                                  0.0f, 0.93f, 0.95f,
+//                                  0.0f, 0.5f, 0.4f,
+//                                  0.0f, 0.3f, 0.2f
+//                              });
+//    std::vector<float> anchors({
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 100.5f, 1.0f, 1.0f
+//                               });
+//
+//    std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
+//    std::vector<uint8_t> qScores(scores.size(), 0);
+//    std::vector<uint8_t> qAnchors(anchors.size(), 0);
+//    QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
+//    QuantizeData(qScores.data(), scores.data(), scoresInfo);
+//    QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
+//    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, qBoxEncodings,
+//                                                                             qScores, qAnchors,
+//                                                                             1.0f, 1, 0.01f, 0, 0.5f, 0);
+//}
+//
+//TEST_CASE("NeonDetectionPostProcessFastNmsTest")
+//{
+//    std::vector<float> boxEncodings({
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, -1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f
+//                                    });
+//    std::vector<float> scores({
+//                                  0.0f, 0.9f, 0.8f,
+//                                  0.0f, 0.75f, 0.72f,
+//                                  0.0f, 0.6f, 0.5f,
+//                                  0.0f, 0.93f, 0.95f,
+//                                  0.0f, 0.5f, 0.4f,
+//                                  0.0f, 0.3f, 0.2f
+//                              });
+//    std::vector<float> anchors({
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 100.5f, 1.0f, 1.0f
+//                               });
+//    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
+//                                                                  boxEncodings,
+//                                                                  scores,
+//                                                                  anchors);
+//}
+//
+// DISABLED
+//TEST_CASE("NeonDetectionPostProcessFastNmsUint8Test")
+//{
+//    armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
+//    armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
+//    armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
+//
+//    boxEncodingsInfo.SetQuantizationScale(1.0f);
+//    boxEncodingsInfo.SetQuantizationOffset(1);
+//    scoresInfo.SetQuantizationScale(0.01f);
+//    scoresInfo.SetQuantizationOffset(0);
+//    anchorsInfo.SetQuantizationScale(0.5f);
+//    anchorsInfo.SetQuantizationOffset(0);
+//
+//    std::vector<float> boxEncodings({
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, -1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f
+//                                    });
+//    std::vector<float> scores({
+//                                  0.0f, 0.9f, 0.8f,
+//                                  0.0f, 0.75f, 0.72f,
+//                                  0.0f, 0.6f, 0.5f,
+//                                  0.0f, 0.93f, 0.95f,
+//                                  0.0f, 0.5f, 0.4f,
+//                                  0.0f, 0.3f, 0.2f
+//                              });
+//    std::vector<float> anchors({
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 100.5f, 1.0f, 1.0f
+//                               });
+//
+//    std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
+//    std::vector<uint8_t> qScores(scores.size(), 0);
+//    std::vector<uint8_t> qAnchors(anchors.size(), 0);
+//    QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
+//    QuantizeData(qScores.data(), scores.data(), scoresInfo);
+//    QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
+//    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, qBoxEncodings,
+//                                                                          qScores, qAnchors,
+//                                                                          1.0f, 1, 0.01f, 0, 0.5f, 0);
+//}
+
+TEST_CASE("NeonQLstmEndToEndTest")
 {
-    armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
-    armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
-    armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
-
-    boxEncodingsInfo.SetQuantizationScale(1.0f);
-    boxEncodingsInfo.SetQuantizationOffset(1);
-    scoresInfo.SetQuantizationScale(0.01f);
-    scoresInfo.SetQuantizationOffset(0);
-    anchorsInfo.SetQuantizationScale(0.5f);
-    anchorsInfo.SetQuantizationOffset(0);
-
-    std::vector<float> boxEncodings({
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, -1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f
-                                    });
-    std::vector<float> scores({
-                                  0.0f, 0.9f, 0.8f,
-                                  0.0f, 0.75f, 0.72f,
-                                  0.0f, 0.6f, 0.5f,
-                                  0.0f, 0.93f, 0.95f,
-                                  0.0f, 0.5f, 0.4f,
-                                  0.0f, 0.3f, 0.2f
-                              });
-    std::vector<float> anchors({
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 100.5f, 1.0f, 1.0f
-                               });
-
-    std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
-    std::vector<uint8_t> qScores(scores.size(), 0);
-    std::vector<uint8_t> qAnchors(anchors.size(), 0);
-    QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
-    QuantizeData(qScores.data(), scores.data(), scoresInfo);
-    QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
-    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
-                                                                             qScores, qAnchors,
-                                                                             1.0f, 1, 0.01f, 0, 0.5f, 0);
+    QLstmEndToEnd(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest, * boost::unit_test::disabled())
-{
-    std::vector<float> boxEncodings({
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, -1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f
-                                    });
-    std::vector<float> scores({
-                                  0.0f, 0.9f, 0.8f,
-                                  0.0f, 0.75f, 0.72f,
-                                  0.0f, 0.6f, 0.5f,
-                                  0.0f, 0.93f, 0.95f,
-                                  0.0f, 0.5f, 0.4f,
-                                  0.0f, 0.3f, 0.2f
-                              });
-    std::vector<float> anchors({
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 100.5f, 1.0f, 1.0f
-                               });
-    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
 }
-
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsUint8Test, * boost::unit_test::disabled())
-{
-    armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
-    armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
-    armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
-
-    boxEncodingsInfo.SetQuantizationScale(1.0f);
-    boxEncodingsInfo.SetQuantizationOffset(1);
-    scoresInfo.SetQuantizationScale(0.01f);
-    scoresInfo.SetQuantizationOffset(0);
-    anchorsInfo.SetQuantizationScale(0.5f);
-    anchorsInfo.SetQuantizationOffset(0);
-
-    std::vector<float> boxEncodings({
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, -1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f
-                                    });
-    std::vector<float> scores({
-                                  0.0f, 0.9f, 0.8f,
-                                  0.0f, 0.75f, 0.72f,
-                                  0.0f, 0.6f, 0.5f,
-                                  0.0f, 0.93f, 0.95f,
-                                  0.0f, 0.5f, 0.4f,
-                                  0.0f, 0.3f, 0.2f
-                              });
-    std::vector<float> anchors({
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 100.5f, 1.0f, 1.0f
-                               });
-
-    std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
-    std::vector<uint8_t> qScores(scores.size(), 0);
-    std::vector<uint8_t> qAnchors(anchors.size(), 0);
-    QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
-    QuantizeData(qScores.data(), scores.data(), scoresInfo);
-    QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
-    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
-                                                                          qScores, qAnchors,
-                                                                          1.0f, 1, 0.01f, 0, 0.5f, 0);
-}
-
-BOOST_AUTO_TEST_CASE(NeonQLstmEndToEndTest)
-{
-    QLstmEndToEnd(defaultBackends);
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 383a5f6..e7a56a4 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -8,24 +8,24 @@
 
 #include <test/GraphUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonFallback)
-
-BOOST_AUTO_TEST_CASE(FallbackImportToCpuAcc)
+TEST_SUITE("NeonFallback")
+{
+TEST_CASE("FallbackImportToCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend objectN
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -73,12 +73,12 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -131,37 +131,37 @@
 
     // Contains ImportMemGeneric
     std::size_t found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemImport));
+    CHECK((layer4->GetType() == LayerType::MemImport));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(FallbackPaddingCopyToCpuAcc)
+TEST_CASE("FallbackPaddingCopyToCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend object
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -208,11 +208,11 @@
     armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -261,37 +261,37 @@
 
     // Contains CopyMemGeneric between the backends
     std::size_t found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric for the output
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain ImportMemGeneric
     found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer3->GetType() == LayerType::MemCopy));
+    CHECK((layer3->GetType() == LayerType::MemCopy));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(FallbackImportFromCpuAcc)
+TEST_CASE("FallbackImportFromCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend object
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -339,12 +339,12 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -398,37 +398,37 @@
 
     // Contains ImportMemGeneric
     std::size_t found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemImport));
+    CHECK((layer4->GetType() == LayerType::MemImport));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
+TEST_CASE("FallbackPaddingCopyFromCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend object
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -475,11 +475,11 @@
     armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -528,37 +528,37 @@
 
     // Contains CopyMemGeneric between the backends
     std::size_t found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric for the output
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain ImportMemGeneric
     found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer3->GetType() == LayerType::MemCopy));
+    CHECK((layer3->GetType() == LayerType::MemCopy));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
+TEST_CASE("FallbackDisableImportFromCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend object
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -604,12 +604,12 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -663,21 +663,21 @@
 
     // Contains CopyMemGeneric between the backends
     std::size_t found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain ImportMemGeneric
     found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
 #if defined(ARMCOMPUTECL_ENABLED)
-BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
+TEST_CASE("NeonImportEnabledFallbackToCl")
 {
     using namespace armnn;
 
@@ -728,18 +728,18 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -779,7 +779,7 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr = inputData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
 
     auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
     std::copy(inputData2.begin(), inputData2.end(), intputPtr);
@@ -808,21 +808,21 @@
 
     // Executed Subtraction using GpuAcc
     std::size_t found = dump.find("ClSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
     for(unsigned int i = 0; i < numElements; ++i)
     {
-        BOOST_TEST(outputData[i] == expectedOutput[i]);
+        CHECK(outputData[i] == expectedOutput[i]);
     }
     runtime->UnloadNetwork(netId);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportDisabledFallbackToCl)
+TEST_CASE("NeonImportDisabledFallbackToCl")
 {
     using namespace armnn;
 
@@ -872,18 +872,18 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -934,17 +934,17 @@
 
     // Executed Subtraction using GpuAcc
     std::size_t found = dump.find("ClSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
+TEST_CASE("NeonImportEnabledFallbackSubgraphToCl")
 {
     using namespace armnn;
 
@@ -1007,21 +1007,21 @@
     armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
-    BOOST_TEST(CheckOrder(graph, layer6, layer7));
-    BOOST_TEST(CheckOrder(graph, layer7, layer8));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer6, layer7));
+    CHECK(CheckOrder(graph, layer7, layer8));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
-    BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer6->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -1056,7 +1056,7 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr = inputData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
 
     auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
     std::copy(inputData2.begin(), inputData2.end(), intputPtr);
@@ -1085,26 +1085,26 @@
 
     // Executed Subtraction using GpuAcc
     std::size_t found = dump.find("ClSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Correctly switch back to CpuAcc
     found = dump.find("NeonPooling2dWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric for output
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
     runtime->UnloadNetwork(netId);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportDisableFallbackSubgraphToCl)
+TEST_CASE("NeonImportDisableFallbackSubgraphToCl")
 {
     using namespace armnn;
 
@@ -1162,21 +1162,21 @@
     armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
-    BOOST_TEST(CheckOrder(graph, layer6, layer7));
-    BOOST_TEST(CheckOrder(graph, layer7, layer8));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer6, layer7));
+    CHECK(CheckOrder(graph, layer7, layer8));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
-    BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer6->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -1224,19 +1224,19 @@
 
     // Executed Subtraction using GpuAcc
     std::size_t found = dump.find("ClSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Correctly switch back to CpuAcc
     found = dump.find("NeonPooling2dWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonJsonPrinterTests.cpp b/src/backends/neon/test/NeonJsonPrinterTests.cpp
index a8d90fd..6139c75 100644
--- a/src/backends/neon/test/NeonJsonPrinterTests.cpp
+++ b/src/backends/neon/test/NeonJsonPrinterTests.cpp
@@ -7,16 +7,16 @@
 
 #include <backendsCommon/test/JsonPrinterTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
-BOOST_AUTO_TEST_SUITE(NeonJsonPrinter)
-
-BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterCpuAccTest)
+TEST_SUITE("NeonJsonPrinter")
+{
+TEST_CASE("SoftmaxProfilerJsonPrinterCpuAccTest")
 {
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
     RunSoftmaxProfilerJsonPrinterTest(backends);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp
index 13a4c73..494c8f9 100644
--- a/src/backends/neon/test/NeonLayerSupportTests.cpp
+++ b/src/backends/neon/test/NeonLayerSupportTests.cpp
@@ -14,132 +14,132 @@
 #include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
 #include <backendsCommon/test/LayerTests.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(NeonLayerSupport)
-
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Neon)
+TEST_SUITE("NeonLayerSupport")
+{
+TEST_CASE("IsLayerSupportedFloat16Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::Float16>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Neon)
+TEST_CASE("IsLayerSupportedFloat32Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::Float32>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedQAsymmU8Neon)
+TEST_CASE("IsLayerSupportedQAsymmU8Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedQAsymmS8Neon)
+TEST_CASE("IsLayerSupportedQAsymmS8Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QAsymmS8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedQSymmS8Neon)
+TEST_CASE("IsLayerSupportedQSymmS8Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedNeon)
+TEST_CASE("IsConvertFp16ToFp32SupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedNeon)
+TEST_CASE("IsConvertFp32ToFp16SupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsLogicalBinarySupportedNeon)
+TEST_CASE("IsLogicalBinarySupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsLogicalBinaryLayerSupportedTests<armnn::NeonWorkloadFactory,
       armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsLogicalBinaryBroadcastSupportedNeon)
+TEST_CASE("IsLogicalBinaryBroadcastSupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsLogicalBinaryLayerBroadcastSupportedTests<armnn::NeonWorkloadFactory,
       armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsMeanSupportedNeon)
+TEST_CASE("IsMeanSupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsMeanLayerSupportedTests<armnn::NeonWorkloadFactory,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConstantSupportedNeon)
+TEST_CASE("IsConstantSupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::Float16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::Float32>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::QAsymmU8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::Boolean>(reasonIfUnsupported);
-    BOOST_CHECK(!result);
+    CHECK(!result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::QSymmS16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::QSymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::QAsymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::BFloat16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index d12817e..edc8cb9 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -14,10 +14,10 @@
 #include <backendsCommon/test/ActivationFixture.hpp>
 #include <backendsCommon/test/LayerTests.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Compute_ArmComputeNeon)
-
+TEST_SUITE("Compute_ArmComputeNeon")
+{
 using namespace armnn;
 
 using FactoryType = NeonWorkloadFactory;
@@ -308,7 +308,7 @@
 }
 }
 
-BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils)
+TEST_CASE("DepthwiseConv2dUtils")
 {
     const DataType dataType = DataType::Float32;
 
@@ -323,73 +323,73 @@
     // Strides supported: 1,2,3
     descriptor = MakeDepthwiseConv2dDesc(1, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(1, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(1, 3);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(2, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(2, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(2, 3);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(3, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(3, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(3, 3);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     // Supported stride 4
     descriptor = MakeDepthwiseConv2dDesc(4, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     // Supported weights shape 1x1
     TensorInfo weightsInfo1x1({ 1, 1, 1, 1 }, DataType::Float32);
     descriptor = MakeDepthwiseConv2dDesc(1, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo1x1, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo1x1, biasesInfo));
 
     // Supported shape 2x2
     TensorInfo weightsInfo2x2({ 1, 1, 2, 2 }, DataType::Float32);
     descriptor = MakeDepthwiseConv2dDesc(1, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo2x2, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo2x2, biasesInfo));
 
     // Asymmetric padding
     descriptor = MakeDepthwiseConv2dDesc(1, 1, 1, 1, 2, 1, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 }
 
@@ -1498,4 +1498,4 @@
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp b/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp
index 1fef439..5a65b15 100644
--- a/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp
+++ b/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp
@@ -8,8 +8,10 @@
 #include <neon/NeonWorkloadFactory.hpp>
 #include <test/UnitTests.hpp>
 
-BOOST_AUTO_TEST_SUITE(Compute_ArmComputeNeon)
+#include <doctest/doctest.h>
 
+TEST_SUITE("Compute_ArmComputeNeon")
+{
 using namespace armnn;
 
 using FactoryType = NeonWorkloadFactory;
@@ -44,4 +46,4 @@
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonMemCopyTests.cpp b/src/backends/neon/test/NeonMemCopyTests.cpp
index 2bb9e3d..0485092 100644
--- a/src/backends/neon/test/NeonMemCopyTests.cpp
+++ b/src/backends/neon/test/NeonMemCopyTests.cpp
@@ -12,44 +12,44 @@
 #include <reference/RefWorkloadFactory.hpp>
 #include <reference/test/RefWorkloadFactoryHelper.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonMemCopy)
-
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeon)
+TEST_SUITE("NeonMemCopy")
+{
+TEST_CASE("CopyBetweenCpuAndNeon")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu)
+TEST_CASE("CopyBetweenNeonAndCpu")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors)
+TEST_CASE("CopyBetweenCpuAndNeonWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors)
+TEST_CASE("CopyBetweenNeonAndCpuWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index 4944c31..9b448b2 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -10,11 +10,11 @@
 
 #include <neon/NeonWorkloadFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonOptimizedNetwork)
-
-BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
+TEST_SUITE("NeonOptimizedNetwork")
+{
+TEST_CASE("OptimizeValidateCpuAccDeviceSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -30,7 +30,7 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
     // validate workloads
     armnn::NeonWorkloadFactory fact =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
@@ -38,13 +38,13 @@
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     for (auto&& layer : graph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
-        BOOST_CHECK_NO_THROW(
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+        CHECK_NOTHROW(
             layer->CreateWorkload(fact));
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
+TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -72,16 +72,16 @@
     try
     {
         Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
-        BOOST_FAIL("Should have thrown an exception.");
+        FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException& e)
     {
         // Different exceptions are thrown on different backends
     }
-    BOOST_CHECK(errMessages.size() > 0);
+    CHECK(errMessages.size() > 0);
 }
 
-BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)
+TEST_CASE("FastMathEnabledTestOnCpuAcc")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -102,16 +102,16 @@
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
     *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
-    BOOST_CHECK(optimizedNet);
+    CHECK(optimizedNet);
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    BOOST_TEST(modelOptionsOut.size() == 1);
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+    CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }
 
-BOOST_AUTO_TEST_CASE(NumberOfThreadsTestOnCpuAcc)
+TEST_CASE("NumberOfThreadsTestOnCpuAcc")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -134,15 +134,15 @@
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
             *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
-    BOOST_CHECK(optimizedNet);
+    CHECK(optimizedNet);
     std::unique_ptr<armnn::Graph> graphPtr;
     armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
 
     auto modelOptionsOut = impl.GetModelOptions();
 
-    BOOST_TEST(modelOptionsOut.size() == 1);
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsUnsignedInt() == numberOfThreads);
+    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
+    CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsUnsignedInt() == numberOfThreads);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/neon/test/NeonRuntimeTests.cpp b/src/backends/neon/test/NeonRuntimeTests.cpp
index 27361dd..ee5666d 100644
--- a/src/backends/neon/test/NeonRuntimeTests.cpp
+++ b/src/backends/neon/test/NeonRuntimeTests.cpp
@@ -10,11 +10,11 @@
 #include <backendsCommon/test/RuntimeTestImpl.hpp>
 #include <test/ProfilingTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonRuntime)
-
-BOOST_AUTO_TEST_CASE(RuntimeValidateCpuAccDeviceSupportLayerNoFallback)
+TEST_SUITE("NeonRuntime")
+{
+TEST_CASE("RuntimeValidateCpuAccDeviceSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -30,17 +30,17 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Load it into the runtime. It should success.
     armnn::NetworkId netId;
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
 }
 
 #ifdef ARMNN_LEAK_CHECKING_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuAcc)
+TEST_CASE("RuntimeMemoryLeaksCpuAcc")
 {
-    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+    CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
     armnn::IRuntime::CreationOptions options;
     armnn::RuntimeImpl runtime(options);
     armnn::RuntimeLoadedNetworksReserve(&runtime);
@@ -54,21 +54,21 @@
 
     {
         ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuAcc");
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
         // In the second run we check for all remaining memory
         // in use after the network was unloaded. If there is any
         // then it will be treated as a memory leak.
         CreateAndDropDummyNetwork(backends, runtime);
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
-        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
-        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
     }
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(ProfilingPostOptimisationStructureCpuAcc)
+TEST_CASE("ProfilingPostOptimisationStructureCpuAcc")
 {
     VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuAcc);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index 0e24e95..eabf3c8 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -15,13 +15,14 @@
 #include <arm_compute/runtime/Allocator.h>
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include <armnn/utility/Assert.hpp>
 
-BOOST_AUTO_TEST_SUITE(NeonTensorHandleTests)
+TEST_SUITE("NeonTensorHandleTests")
+{
 using namespace armnn;
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesNoPadding)
+TEST_CASE("NeonTensorHandleGetCapabilitiesNoPadding")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
     NeonTensorHandleFactory handleFactory(memoryManager);
@@ -43,18 +44,18 @@
     std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
                                                                          softmax,
                                                                          CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 
     // No padding required for Softmax
     capabilities = handleFactory.GetCapabilities(softmax, output, CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 
     // No padding required for output
     capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 }
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesPadding)
+TEST_CASE("NeonTensorHandleGetCapabilitiesPadding")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
     NeonTensorHandleFactory handleFactory(memoryManager);
@@ -75,20 +76,20 @@
     std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
                                                                          pooling,
                                                                          CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 
     // No padding required for output
     capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 
     // Padding required for Pooling2d
     capabilities = handleFactory.GetCapabilities(pooling, output, CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.size() == 1);
-    BOOST_TEST((capabilities[0].m_CapabilityClass == CapabilityClass::PaddingRequired));
-    BOOST_TEST(capabilities[0].m_Value);
+    CHECK(capabilities.size() == 1);
+    CHECK((capabilities[0].m_CapabilityClass == CapabilityClass::PaddingRequired));
+    CHECK(capabilities[0].m_Value);
 }
 
-BOOST_AUTO_TEST_CASE(ConcatOnXorYSubTensorsNoPaddingRequiredTest)
+TEST_CASE("ConcatOnXorYSubTensorsNoPaddingRequiredTest")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -163,7 +164,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(ConcatonXorYPaddingRequiredTest)
+TEST_CASE("ConcatonXorYPaddingRequiredTest")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -246,7 +247,7 @@
     ARMNN_ASSERT(numberOfSubTensors == 0);
 }
 
-BOOST_AUTO_TEST_CASE(SplitteronXorYNoPaddingRequiredTest)
+TEST_CASE("SplitteronXorYNoPaddingRequiredTest")
 {
     using namespace armnn;
 
@@ -443,14 +444,14 @@
         std::vector<float> out = outputStorage.at(it.first);
         for (unsigned int i = 0; i < out.size(); ++i)
         {
-            BOOST_CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
+            CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
                     "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
 
         }
     }
 }
 
-BOOST_AUTO_TEST_CASE(SplitteronXorYPaddingRequiredTest)
+TEST_CASE("SplitteronXorYPaddingRequiredTest")
 {
     using namespace armnn;
 
@@ -618,14 +619,14 @@
         std::vector<float> out = outputStorage.at(it.first);
         for (unsigned int i = 0; i < out.size(); ++i)
         {
-            BOOST_CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
+            CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
                     "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
 
         }
     }
 }
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryMemoryManaged)
+TEST_CASE("NeonTensorHandleFactoryMemoryManaged")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(
         std::make_unique<arm_compute::Allocator>(),
@@ -641,31 +642,31 @@
     memoryManager->Acquire();
     {
         float* buffer = reinterpret_cast<float*>(handle->Map());
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
         buffer[0] = 1.5f;
         buffer[1] = 2.5f;
-        BOOST_CHECK(buffer[0] == 1.5f); // Memory is writable and readable
-        BOOST_CHECK(buffer[1] == 2.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 1.5f); // Memory is writable and readable
+        CHECK(buffer[1] == 2.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 
     memoryManager->Acquire();
     {
         float* buffer = reinterpret_cast<float*>(handle->Map());
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
         buffer[0] = 3.5f;
         buffer[1] = 4.5f;
-        BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable
-        BOOST_CHECK(buffer[1] == 4.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 3.5f); // Memory is writable and readable
+        CHECK(buffer[1] == 4.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Cannot import as import is disabled
-    BOOST_CHECK_THROW(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc), MemoryImportException);
+    CHECK_THROWS_AS(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc), MemoryImportException);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryImport)
+TEST_CASE("NeonTensorHandleFactoryImport")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(
         std::make_unique<arm_compute::Allocator>(),
@@ -680,25 +681,25 @@
     memoryManager->Acquire();
 
     // No buffer allocated when import is enabled
-    BOOST_CHECK((PolymorphicDowncast<NeonTensorHandle*>(handle.get()))->GetTensor().buffer() == nullptr);
+    CHECK((PolymorphicDowncast<NeonTensorHandle*>(handle.get()))->GetTensor().buffer() == nullptr);
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Correctly import
-    BOOST_CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
     float* buffer = reinterpret_cast<float*>(handle->Map());
-    BOOST_CHECK(buffer != nullptr); // Yields a valid pointer after import
-    BOOST_CHECK(buffer == testPtr); // buffer is pointing to testPtr
+    CHECK(buffer != nullptr); // Yields a valid pointer after import
+    CHECK(buffer == testPtr); // buffer is pointing to testPtr
     // Memory is writable and readable with correct value
-    BOOST_CHECK(buffer[0] == 2.5f);
-    BOOST_CHECK(buffer[1] == 5.5f);
+    CHECK(buffer[0] == 2.5f);
+    CHECK(buffer[1] == 5.5f);
     buffer[0] = 3.5f;
     buffer[1] = 10.0f;
-    BOOST_CHECK(buffer[0] == 3.5f);
-    BOOST_CHECK(buffer[1] == 10.0f);
+    CHECK(buffer[0] == 3.5f);
+    CHECK(buffer[1] == 10.0f);
     memoryManager->Release();
 }
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleSupportsInPlaceComputation)
+TEST_CASE("NeonTensorHandleSupportsInPlaceComputation")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
     NeonTensorHandleFactory handleFactory(memoryManager);
@@ -707,4 +708,4 @@
     ARMNN_ASSERT(handleFactory.SupportsInPlaceComputation());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp
index df014d5..d2bb97c 100644
--- a/src/backends/neon/test/NeonTimerTest.cpp
+++ b/src/backends/neon/test/NeonTimerTest.cpp
@@ -18,23 +18,23 @@
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <cstdlib>
 #include <algorithm>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(NeonTimerInstrument)
+TEST_SUITE("NeonTimerInstrument")
+{
 
-
-BOOST_AUTO_TEST_CASE(NeonTimerGetName)
+TEST_CASE("NeonTimerGetName")
 {
     NeonTimer neonTimer;
-    BOOST_CHECK_EQUAL(neonTimer.GetName(), "NeonKernelTimer");
+    CHECK_EQ(std::string(neonTimer.GetName()), "NeonKernelTimer");
 }
 
-BOOST_AUTO_TEST_CASE(NeonTimerMeasure)
+TEST_CASE("NeonTimerMeasure")
 {
     NeonWorkloadFactory workloadFactory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
@@ -95,19 +95,19 @@
 
     std::vector<Measurement> measurements = neonTimer.GetMeasurements();
 
-    BOOST_CHECK(measurements.size() <= 2);
+    CHECK(measurements.size() <= 2);
     if (measurements.size() > 1)
     {
-        BOOST_CHECK_EQUAL(measurements[0].m_Name, "NeonKernelTimer/0: NEFillBorderKernel");
-        BOOST_CHECK(measurements[0].m_Value > 0.0);
+        CHECK_EQ(measurements[0].m_Name, "NeonKernelTimer/0: NEFillBorderKernel");
+        CHECK(measurements[0].m_Value > 0.0);
     }
     std::ostringstream oss_neon;
     std::ostringstream oss_cpu;
     oss_neon << "NeonKernelTimer/" << measurements.size()-1 << ": NEActivationLayerKernel";
     oss_cpu << "NeonKernelTimer/" << measurements.size()-1 << ": CpuActivationKernel";
-    BOOST_CHECK(measurements[measurements.size()-1].m_Name == oss_neon.str() ||
-                measurements[measurements.size()-1].m_Name == oss_cpu.str());
-    BOOST_CHECK(measurements[measurements.size()-1].m_Value > 0.0);
+    CHECK((measurements[measurements.size()-1].m_Name == oss_neon.str() ||
+                measurements[measurements.size()-1].m_Name == oss_cpu.str()));
+    CHECK(measurements[measurements.size()-1].m_Value > 0.0);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/ArgMinMaxTests.cpp b/src/backends/reference/test/ArgMinMaxTests.cpp
index dce15b2..b79a108 100644
--- a/src/backends/reference/test/ArgMinMaxTests.cpp
+++ b/src/backends/reference/test/ArgMinMaxTests.cpp
@@ -5,11 +5,11 @@
 
 #include <reference/workloads/ArgMinMax.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefArgMinMax)
-
-BOOST_AUTO_TEST_CASE(ArgMinTest)
+TEST_SUITE("RefArgMinMax")
+{
+TEST_CASE("ArgMinTest")
 {
     const armnn::TensorInfo inputInfo({ 1, 2, 3 } , armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Signed64);
@@ -25,14 +25,11 @@
                armnn::ArgMinMaxFunction::Min,
                -2);
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputValues.begin(),
-                                  outputValues.end(),
-                                  expectedValues.begin(),
-                                  expectedValues.end());
+    CHECK(std::equal(outputValues.begin(), outputValues.end(), expectedValues.begin(), expectedValues.end()));
 
 }
 
-BOOST_AUTO_TEST_CASE(ArgMaxTest)
+TEST_CASE("ArgMaxTest")
 {
     const armnn::TensorInfo inputInfo({ 1, 2, 3 } , armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Signed64);
@@ -48,11 +45,8 @@
                armnn::ArgMinMaxFunction::Max,
                -2);
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputValues.begin(),
-                                  outputValues.end(),
-                                  expectedValues.begin(),
-                                  expectedValues.end());
+    CHECK(std::equal(outputValues.begin(), outputValues.end(), expectedValues.begin(), expectedValues.end()));
 
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt
index c71c9d7..76541cf 100644
--- a/src/backends/reference/test/CMakeLists.txt
+++ b/src/backends/reference/test/CMakeLists.txt
@@ -24,3 +24,4 @@
 target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
 target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling)
 target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include)
+target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/third-party/doctest)
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 0f86e7e..4293ef5 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -10,6 +10,8 @@
 #include <reference/RefWorkloadFactory.hpp>
 #include <reference/workloads/RefWorkloads.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -19,8 +21,8 @@
     auto queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
-    BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
+    CHECK((inputHandle->GetTensorInfo() == inputInfo));
+    CHECK((outputHandle->GetTensorInfo() == outputInfo));
 }
 
 template <typename Workload>
@@ -33,9 +35,9 @@
     auto inputHandle0     = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto inputHandle1     = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle    = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
-    BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
-    BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
+    CHECK((inputHandle0->GetTensorInfo() == inputInfo0));
+    CHECK((inputHandle1->GetTensorInfo() == inputInfo1));
+    CHECK((outputHandle->GetTensorInfo() == outputInfo));
 }
 
 armnn::RefWorkloadFactory GetFactory()
@@ -47,8 +49,8 @@
 
 }
 
-BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
-
+TEST_SUITE("CreateWorkloadRef")
+{
 template <typename ActivationWorkloadType, armnn::DataType DataType>
 static void RefCreateActivationWorkloadTest()
 {
@@ -62,12 +64,12 @@
         TensorInfo({ 1, 1 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
+TEST_CASE("CreateActivationFloat32Workload")
 {
     RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
+TEST_CASE("CreateActivationUint8Workload")
 {
     RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
 }
@@ -89,7 +91,7 @@
         TensorInfo({ 2, 3 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest)
+TEST_CASE("CreateSubtractionWorkloadWithBlobTest")
 {
     Graph graph;
     RefWorkloadFactory factory = GetFactory();
@@ -106,7 +108,7 @@
         TensorInfo({ 2, 3 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest)
+TEST_CASE("CreateAdditionWorkloadWithBlobTest")
 {
     Graph graph;
     RefWorkloadFactory factory = GetFactory();
@@ -122,7 +124,7 @@
         TensorInfo({ 2, 3 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest)
+TEST_CASE("CreateMultiplicationWorkloadWithBlobTest")
 {
     Graph              graph;
     RefWorkloadFactory factory  = GetFactory();
@@ -138,7 +140,7 @@
                       TensorInfo({2, 3}, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+TEST_CASE("CreateAdditionFloatWorkload")
 {
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
         AdditionQueueDescriptor,
@@ -146,7 +148,7 @@
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
+TEST_CASE("CreateAdditionUint8Workload")
 {
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
         AdditionQueueDescriptor,
@@ -154,7 +156,7 @@
         armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
+TEST_CASE("CreateAdditionInt16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
         AdditionQueueDescriptor,
@@ -162,7 +164,7 @@
         armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload)
+TEST_CASE("CreateAdditionInt32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<int32_t>,
             AdditionQueueDescriptor,
@@ -170,7 +172,7 @@
             armnn::DataType::Signed32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
+TEST_CASE("CreateSubtractionFloat32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
@@ -178,7 +180,7 @@
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+TEST_CASE("CreateSubtractionFloat16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
@@ -186,7 +188,7 @@
         armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
+TEST_CASE("CreateSubtractionUint8Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
@@ -194,7 +196,7 @@
         armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
+TEST_CASE("CreateSubtractionInt16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
@@ -202,7 +204,7 @@
         armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload)
+TEST_CASE("CreateSubtractionInt32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<int32_t>,
             SubtractionQueueDescriptor,
@@ -210,7 +212,7 @@
             armnn::DataType::Signed32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
+TEST_CASE("CreateMultiplicationFloatWorkload")
 {
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
         MultiplicationQueueDescriptor,
@@ -218,7 +220,7 @@
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
+TEST_CASE("CreateMultiplicationUint8Workload")
 {
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
         MultiplicationQueueDescriptor,
@@ -226,7 +228,7 @@
         armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
+TEST_CASE("CreateMultiplicationInt16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
         MultiplicationQueueDescriptor,
@@ -234,7 +236,7 @@
         armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload)
+TEST_CASE("CreateMultiplicationInt32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<int32_t>,
             MultiplicationQueueDescriptor,
@@ -242,7 +244,7 @@
             armnn::DataType::Signed32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
+TEST_CASE("CreateDivisionFloat32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
@@ -250,7 +252,7 @@
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload)
+TEST_CASE("CreateDivisionFloat16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
@@ -258,7 +260,7 @@
         armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
+TEST_CASE("CreateDivisionUint8Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
@@ -266,7 +268,7 @@
         armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
+TEST_CASE("CreateDivisionInt16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
@@ -274,7 +276,7 @@
         armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionInt32Workload)
+TEST_CASE("CreateDivisionInt32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<int32_t>,
             DivisionQueueDescriptor,
@@ -311,7 +313,7 @@
     CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload)
+TEST_CASE("CreateBatchNormalizationWithBlobFloat32Workload")
 {
     Graph graph;
     RefWorkloadFactory factory = GetFactory();
@@ -329,55 +331,55 @@
     CheckInputOutput(std::move(workload), TensorInfo(inputShape, dataType), TensorInfo(outputShape, dataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
+TEST_CASE("CreateBatchNormalizationFloat32Workload")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
             (DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationFloat32WorkloadNhwc")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float32>
             (DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
+TEST_CASE("CreateBatchNormalizationFloat16Workload")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float16>
             (DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationFloat16WorkloadNhwc")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float16>
             (DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
+TEST_CASE("CreateBatchNormalizationUint8Workload")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
             (DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationUint8WorkloadNhwc")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
             (DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
+TEST_CASE("CreateBatchNormalizationInt16Workload")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
             (DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationInt16WorkloadNhwc")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
             (DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
+TEST_CASE("CreateConvertFp16ToFp32Float32Workload")
 {
     Graph                graph;
     RefWorkloadFactory factory = GetFactory();
@@ -388,7 +390,7 @@
         std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
+TEST_CASE("CreateConvertFp32ToFp16Float16Workload")
 {
     Graph                graph;
     RefWorkloadFactory factory = GetFactory();
@@ -417,17 +419,17 @@
                      TensorInfo(outputShape, DataType::Float32));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+TEST_CASE("CreateConvolution2dFloatNchwWorkload")
 {
     RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+TEST_CASE("CreateConvolution2dFloatNhwcWorkload")
 {
     RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dWithBlobWorkload)
+TEST_CASE("CreateConvolution2dWithBlobWorkload")
 {
     DataLayout dataLayout = DataLayout::NHWC;
     Graph graph;
@@ -464,12 +466,12 @@
                      TensorInfo(outputShape, DataType::Float32));
 }
 
-BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
+TEST_CASE("CreateDepthwiseConvolutionFloat32NhwcWorkload")
 {
     RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefCreateFullyConnectedWithBlobWorkloadTest)
+TEST_CASE("RefCreateFullyConnectedWithBlobWorkloadTest")
 {
     Graph graph;
     RefWorkloadFactory factory = GetFactory();
@@ -499,17 +501,17 @@
         TensorInfo({ 3, 7 }, DataType, outputQScale));
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
+TEST_CASE("CreateFullyConnectedWorkloadFloat32")
 {
     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
+TEST_CASE("CreateFullyConnectedWorkloadQuantisedAsymm8")
 {
     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
+TEST_CASE("CreateFullyConnectedWorkloadQuantisedSymm16")
 {
     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
 }
@@ -541,32 +543,32 @@
     CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload)
+TEST_CASE("CreateRefNormalizationFloat32NchwWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
+TEST_CASE("CreateRefNormalizationFloat32NhwcWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
+TEST_CASE("CreateRefNormalizationUint8NchwWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
+TEST_CASE("CreateRefNormalizationUint8NhwcWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
+TEST_CASE("CreateRefNormalizationInt16NchwWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
+TEST_CASE("CreateRefNormalizationInt16NhwcWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
 }
@@ -599,32 +601,32 @@
                      TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
+TEST_CASE("CreatePooling2dFloat32Workload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
+TEST_CASE("CreatePooling2dFloat32NhwcWorkload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
+TEST_CASE("CreatePooling2dUint8Workload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
+TEST_CASE("CreatePooling2dUint8NhwcWorkload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
+TEST_CASE("CreatePooling2dInt16Workload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
+TEST_CASE("CreatePooling2dInt16NhwcWorkload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
 }
@@ -655,22 +657,22 @@
         tensorInfo);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
+TEST_CASE("CreateSoftmaxFloat32Workload")
 {
     RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
+TEST_CASE("CreateSoftmaxFloat16Workload")
 {
     RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
+TEST_CASE("CreateSoftmaxQuantisedAsymm8Workload")
 {
     RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
+TEST_CASE("CreateSoftmaxQuantisedSymm16Workload")
 {
     RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
 }
@@ -685,29 +687,29 @@
     // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
     SplitterQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
+    CHECK((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
 
     auto outputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
+    CHECK((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
 
     auto outputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
+    CHECK((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
 
     auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
-    BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
+    CHECK((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
+TEST_CASE("CreateSplitterFloat32Workload")
 {
     RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
+TEST_CASE("CreateSplitterFloat16Workload")
 {
     RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
+TEST_CASE("CreateSplitterUint8Workload")
 {
     RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
 }
@@ -735,27 +737,27 @@
     armnn::RefTensorHandle* mIn0 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
     armnn::RefTensorHandle* mIn1 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(mIn0);
-    BOOST_TEST(mIn1);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(mIn0);
+    CHECK(mIn1);
 
     bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
+TEST_CASE("CreateSplitterConcatFloat32")
 {
     RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
+TEST_CASE("CreateSplitterConcatFloat16")
 {
     RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
+TEST_CASE("CreateSplitterConcatUint8")
 {
     RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
 }
@@ -785,26 +787,26 @@
     armnn::RefTensorHandle* activ1_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
 
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(activ0_0Im);
-    BOOST_TEST(activ0_1Im);
-    BOOST_TEST(activ1_0Im);
-    BOOST_TEST(activ1_1Im);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(activ0_0Im);
+    CHECK(activ0_1Im);
+    CHECK(activ1_0Im);
+    CHECK(activ1_1Im);
 
     bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
                              (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
+TEST_CASE("CreateSingleOutputMultipleInputsFloat32")
 {
     RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
+TEST_CASE("CreateSingleOutputMultipleInputsUint8")
 {
     RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
         armnn::DataType::QAsymmU8>();
@@ -838,27 +840,27 @@
                      TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
+TEST_CASE("CreateResizeBilinearFloat32")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
+TEST_CASE("CreateResizeBilinearFloat16")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
+TEST_CASE("CreateResizeBilinearUint8")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
+TEST_CASE("CreateResizeBilinearQuantisedAsymm16")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
+TEST_CASE("CreateResizeBilinearFloat32Nhwc")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
@@ -876,22 +878,22 @@
                      TensorInfo({ 1, 1, 1, 1 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32)
+TEST_CASE("CreateBatchToSpaceNdFloat32")
 {
     RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
+TEST_CASE("CreateBatchToSpaceNdFloat16")
 {
     RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
+TEST_CASE("CreateBatchToSpaceNdUint8")
 {
     RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
+TEST_CASE("CreateBatchToSpaceNdQSymm16")
 {
     RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
 }
@@ -924,32 +926,32 @@
     CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
+TEST_CASE("CreateL2NormalizationFloat32")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
+TEST_CASE("CreateL2NormalizationFloat32Nhwc")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
+TEST_CASE("CreateL2NormalizationInt16")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
+TEST_CASE("CreateL2NormalizationInt16Nhwc")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
+TEST_CASE("CreateL2NormalizationUint8")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
+TEST_CASE("CreateL2NormalizationUint8Nhwc")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
@@ -968,17 +970,17 @@
         TensorInfo({ 1, 4 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
+TEST_CASE("CreateReshapeWorkloadFloat32")
 {
     RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
+TEST_CASE("CreateReshapeWorkloadQuantisedAsymm8")
 {
     RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
+TEST_CASE("CreateReshapeWorkloadQuantisedSymm16")
 {
     RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
 }
@@ -997,52 +999,52 @@
                       TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
+TEST_CASE("CreateConcatDim0Float32Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
+TEST_CASE("CreateConcatDim0Float16Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float16>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
+TEST_CASE("CreateConcatDim0Uint8Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
+TEST_CASE("CreateConcatDim0Uint16Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
+TEST_CASE("CreateConcatDim1Float32Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
+TEST_CASE("CreateConcatDim1Uint8Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
+TEST_CASE("CreateConcatDim2Float32Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
+TEST_CASE("CreateConcatDim2Uint8Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
+TEST_CASE("CreateConcatDim3Float32Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
+TEST_CASE("CreateConcatDim3Uint8Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
 }
@@ -1057,25 +1059,25 @@
     // Check output is as expected
     auto queueDescriptor = workload->GetData();
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
+    CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
+TEST_CASE("CreateConstantUint8Workload")
 {
     RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
 }
 
-BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
+TEST_CASE("CreateConstantInt16Workload")
 {
     RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
 }
 
-BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
+TEST_CASE("CreateConstantFloat32Workload")
 {
     RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
 }
 
-BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
+TEST_CASE("CreateConstantSigned32Workload")
 {
     RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
 }
@@ -1097,53 +1099,53 @@
     // Check output is as expected
     auto queueDescriptor = workload->GetData();
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
+    CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload)
+TEST_CASE("CreatePreluFloat32Workload")
 {
     RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+TEST_CASE("CreatePreluFloat16Workload")
 {
     RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
+TEST_CASE("CreatePreluUint8Workload")
 {
     RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
+TEST_CASE("CreatePreluInt16Workload")
 {
     RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
+TEST_CASE("CreatePreluFloat32NoBroadcastWorkload")
 {
-    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+    CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
                                                  armnn::DataType::Float32),
                       armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
+TEST_CASE("CreatePreluFloat16NoBroadcastWorkload")
 {
-    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+    CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
                                                  armnn::DataType::Float16),
                       armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
+TEST_CASE("CreatePreluUint8NoBroadcastWorkload")
 {
-    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+    CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
                                                  armnn::DataType::QAsymmU8),
                       armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
+TEST_CASE("CreatePreluInt16NoBroadcastWorkload")
 {
-    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+    CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
                                                  armnn::DataType::QSymmS16),
                       armnn::InvalidArgumentException);
 }
@@ -1161,22 +1163,22 @@
                      TensorInfo({ 1, 1, 1, 4 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32)
+TEST_CASE("CreateSpaceToDepthWorkloadFloat32")
 {
     RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
+TEST_CASE("CreateSpaceToDepthWorkloadFloat16")
 {
     RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
+TEST_CASE("CreateSpaceToDepthWorkloadQASymm8")
 {
     RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
+TEST_CASE("CreateSpaceToDepthWorkloadQSymm16")
 {
     RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
 }
@@ -1201,23 +1203,23 @@
     for (unsigned int i = 0; i < numInputs; ++i)
     {
         auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
-        BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
+        CHECK((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
     }
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
+    CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+TEST_CASE("CreateStackFloat32Workload")
 {
     RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+TEST_CASE("CreateStackUint8Workload")
 {
     RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
+TEST_CASE("CreateStackUint16Workload")
 {
     RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
@@ -1241,14 +1243,14 @@
     auto cellStateOutHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
 
-    BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
-    BOOST_TEST((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
-    BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
+    CHECK((inputHandle->GetTensorInfo() == inputInfo));
+    CHECK((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
+    CHECK((outputHandle->GetTensorInfo() == outputInfo));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQLstmWorkload)
+TEST_CASE("CreateQLstmWorkload")
 {
     RefCreateQLstmWorkloadTest<RefQLstmWorkload>();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefDetectionPostProcessTests.cpp b/src/backends/reference/test/RefDetectionPostProcessTests.cpp
index fab6e00..763578b 100644
--- a/src/backends/reference/test/RefDetectionPostProcessTests.cpp
+++ b/src/backends/reference/test/RefDetectionPostProcessTests.cpp
@@ -8,46 +8,46 @@
 #include <armnn/Descriptors.hpp>
 #include <armnn/Types.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefDetectionPostProcess)
-
-BOOST_AUTO_TEST_CASE(TopKSortTest)
+TEST_SUITE("RefDetectionPostProcess")
+{
+TEST_CASE("TopKSortTest")
 {
     unsigned int k = 3;
     unsigned int indices[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
     float values[8] = { 0, 7, 6, 5, 4, 3, 2, 500 };
     armnn::TopKSort(k, indices, values, 8);
-    BOOST_TEST(indices[0] == 7);
-    BOOST_TEST(indices[1] == 1);
-    BOOST_TEST(indices[2] == 2);
+    CHECK(indices[0] == 7);
+    CHECK(indices[1] == 1);
+    CHECK(indices[2] == 2);
 }
 
-BOOST_AUTO_TEST_CASE(FullTopKSortTest)
+TEST_CASE("FullTopKSortTest")
 {
     unsigned int k = 8;
     unsigned int indices[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
     float values[8] = { 0, 7, 6, 5, 4, 3, 2, 500 };
     armnn::TopKSort(k, indices, values, 8);
-    BOOST_TEST(indices[0] == 7);
-    BOOST_TEST(indices[1] == 1);
-    BOOST_TEST(indices[2] == 2);
-    BOOST_TEST(indices[3] == 3);
-    BOOST_TEST(indices[4] == 4);
-    BOOST_TEST(indices[5] == 5);
-    BOOST_TEST(indices[6] == 6);
-    BOOST_TEST(indices[7] == 0);
+    CHECK(indices[0] == 7);
+    CHECK(indices[1] == 1);
+    CHECK(indices[2] == 2);
+    CHECK(indices[3] == 3);
+    CHECK(indices[4] == 4);
+    CHECK(indices[5] == 5);
+    CHECK(indices[6] == 6);
+    CHECK(indices[7] == 0);
 }
 
-BOOST_AUTO_TEST_CASE(IouTest)
+TEST_CASE("IouTest")
 {
     float boxI[4] = { 0.0f, 0.0f, 10.0f, 10.0f };
     float boxJ[4] = { 1.0f, 1.0f, 11.0f, 11.0f };
     float iou = armnn::IntersectionOverUnion(boxI, boxJ);
-    BOOST_TEST(iou == 0.68, boost::test_tools::tolerance(0.001));
+    CHECK(iou == doctest::Approx(0.68).epsilon(0.001f));
 }
 
-BOOST_AUTO_TEST_CASE(NmsFunction)
+TEST_CASE("NmsFunction")
 {
     std::vector<float> boxCorners({
         0.0f, 0.0f, 1.0f, 1.0f,
@@ -63,10 +63,10 @@
     std::vector<unsigned int> result =
         armnn::NonMaxSuppression(6, boxCorners, scores, 0.0, 3, 0.5);
 
-    BOOST_TEST(result.size() == 3);
-    BOOST_TEST(result[0] == 3);
-    BOOST_TEST(result[1] == 0);
-    BOOST_TEST(result[2] == 5);
+    CHECK(result.size() == 3);
+    CHECK(result[0] == 3);
+    CHECK(result[1] == 0);
+    CHECK(result[2] == 5);
 }
 
 void DetectionPostProcessTestImpl(bool useRegularNms,
@@ -149,28 +149,22 @@
                                 detectionScores.data(),
                                 numDetections.data());
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(detectionBoxes.begin(),
+    CHECK(std::equal(detectionBoxes.begin(),
                                   detectionBoxes.end(),
                                   expectedDetectionBoxes.begin(),
-                                  expectedDetectionBoxes.end());
+                                  expectedDetectionBoxes.end()));
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(detectionScores.begin(),
-                                  detectionScores.end(),
-                                  expectedDetectionScores.begin(),
-                                  expectedDetectionScores.end());
+    CHECK(std::equal(detectionScores.begin(), detectionScores.end(),
+        expectedDetectionScores.begin(), expectedDetectionScores.end()));
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(detectionClasses.begin(),
-                                  detectionClasses.end(),
-                                  expectedDetectionClasses.begin(),
-                                  expectedDetectionClasses.end());
+    CHECK(std::equal(detectionClasses.begin(), detectionClasses.end(),
+        expectedDetectionClasses.begin(), expectedDetectionClasses.end()));
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(numDetections.begin(),
-                                  numDetections.end(),
-                                  expectedNumDetections.begin(),
-                                  expectedNumDetections.end());
+    CHECK(std::equal(numDetections.begin(), numDetections.end(),
+        expectedNumDetections.begin(), expectedNumDetections.end()));
 }
 
-BOOST_AUTO_TEST_CASE(RegularNmsDetectionPostProcess)
+TEST_CASE("RegularNmsDetectionPostProcess")
 {
     std::vector<float> expectedDetectionBoxes({
         0.0f, 10.0f, 1.0f, 11.0f,
@@ -186,7 +180,7 @@
                                  expectedDetectionScores, expectedNumDetections);
 }
 
-BOOST_AUTO_TEST_CASE(FastNmsDetectionPostProcess)
+TEST_CASE("FastNmsDetectionPostProcess")
 {
     std::vector<float> expectedDetectionBoxes({
         0.0f, 10.0f, 1.0f, 11.0f,
@@ -201,4 +195,4 @@
                                  expectedDetectionScores, expectedNumDetections);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 910df29..69a2048 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -28,14 +28,14 @@
 #include <backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp>
 #include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefEndToEnd)
-
+TEST_SUITE("RefEndToEnd")
+{
 std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuRef};
 
 // Abs
-BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32)
+TEST_CASE("RefAbsEndToEndTestFloat32")
 {
     std::vector<float> expectedOutput =
     {
@@ -48,7 +48,7 @@
                                                              expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8)
+TEST_CASE("RefAbsEndToEndTestUint8")
 {
     // Note the expected output will be implicitly quantized by the below test function
     std::vector<float> expectedOutput =
@@ -62,7 +62,7 @@
                                                                      expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16)
+TEST_CASE("RefAbsEndToEndTestInt16")
 {
     // Note the expected output will be implicitly quantized by the below test function
     std::vector<float> expectedOutput =
@@ -77,17 +77,17 @@
 }
 
 // Constant
-BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
+TEST_CASE("ConstantUsage_Ref_Float32")
 {
-    BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
+    CHECK(ConstantUsageFloat32Test(defaultBackends));
 }
 
-BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
+TEST_CASE("ConstantUsage_Ref_Uint8")
 {
-    BOOST_TEST(ConstantUsageUint8Test(defaultBackends));
+    CHECK(ConstantUsageUint8Test(defaultBackends));
 }
 
-BOOST_AUTO_TEST_CASE(Unsigned8)
+TEST_CASE("Unsigned8")
 {
     using namespace armnn;
 
@@ -122,7 +122,7 @@
     // Loads it into the runtime.
     NetworkId netId;
     auto error = runtime->LoadNetwork(netId, std::move(optNet));
-    BOOST_TEST(error == Status::Success);
+    CHECK(error == Status::Success);
 
     // Creates structures for input & output.
     std::vector<uint8_t> inputData
@@ -144,14 +144,14 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results.
-    BOOST_TEST(outputData[0] == 0);
-    BOOST_TEST(outputData[1] == 0);
-    BOOST_TEST(outputData[2] == 0);
-    BOOST_TEST(outputData[3] == 255); // softmax has been saturated.
-    BOOST_TEST(outputData[4] == 0);
+    CHECK(outputData[0] == 0);
+    CHECK(outputData[1] == 0);
+    CHECK(outputData[2] == 0);
+    CHECK(outputData[3] == 255); // softmax has been saturated.
+    CHECK(outputData[4] == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TrivialAdd)
+TEST_CASE("TrivialAdd")
 {
     // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp.
 
@@ -211,21 +211,21 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results
-    BOOST_TEST(outputData[0] == 101);
-    BOOST_TEST(outputData[1] == 202);
-    BOOST_TEST(outputData[2] == 303);
-    BOOST_TEST(outputData[3] == 404);
-    BOOST_TEST(outputData[4] == 505);
-    BOOST_TEST(outputData[5] == 606);
-    BOOST_TEST(outputData[6] == 707);
-    BOOST_TEST(outputData[7] == 808);
-    BOOST_TEST(outputData[8] == 909);
-    BOOST_TEST(outputData[9] == 1010);
-    BOOST_TEST(outputData[10] == 1111);
-    BOOST_TEST(outputData[11] == 1212);
+    CHECK(outputData[0] == 101);
+    CHECK(outputData[1] == 202);
+    CHECK(outputData[2] == 303);
+    CHECK(outputData[3] == 404);
+    CHECK(outputData[4] == 505);
+    CHECK(outputData[5] == 606);
+    CHECK(outputData[6] == 707);
+    CHECK(outputData[7] == 808);
+    CHECK(outputData[8] == 909);
+    CHECK(outputData[9] == 1010);
+    CHECK(outputData[10] == 1111);
+    CHECK(outputData[11] == 1212);
 }
 
-BOOST_AUTO_TEST_CASE(MultipleOutputs)
+TEST_CASE("MultipleOutputs")
 {
     using namespace armnn;
 
@@ -306,12 +306,12 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results.
-    BOOST_TEST(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
-    BOOST_TEST(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
-    BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
+    CHECK(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
+    CHECK(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
+    CHECK(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
 }
 
-BOOST_AUTO_TEST_CASE(TrivialMin)
+TEST_CASE("TrivialMin")
 {
     using namespace armnn;
 
@@ -369,13 +369,13 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results
-    BOOST_TEST(outputData[0] == 1);
-    BOOST_TEST(outputData[1] == 1);
-    BOOST_TEST(outputData[2] == 3);
-    BOOST_TEST(outputData[3] == 2);
+    CHECK(outputData[0] == 1);
+    CHECK(outputData[1] == 1);
+    CHECK(outputData[2] == 3);
+    CHECK(outputData[3] == 2);
 }
 
-BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest)
+TEST_CASE("RefEqualSimpleEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1,  0, 0, 0, 0,
                                                 0, 0, 0, 0,  1, 1, 1, 1 });
@@ -385,7 +385,7 @@
                                                        expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest)
+TEST_CASE("RefGreaterSimpleEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
@@ -395,7 +395,7 @@
                                                        expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
+TEST_CASE("RefEqualSimpleEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1,  0, 0, 0, 0,
                                                 0, 0, 0, 0,  1, 1, 1, 1 });
@@ -405,7 +405,7 @@
                                                                expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
+TEST_CASE("RefGreaterSimpleEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
@@ -415,7 +415,7 @@
                                                                expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest)
+TEST_CASE("RefEqualBroadcastEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 1, 0, 1, 1, 0, 0,
                                                 0, 0, 0, 0, 0, 0 });
@@ -425,7 +425,7 @@
                                                           expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest)
+TEST_CASE("RefGreaterBroadcastEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
@@ -435,7 +435,7 @@
                                                           expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
+TEST_CASE("RefEqualBroadcastEndToEndUint8Test")
 {
     const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
                                                  0, 0, 0, 0, 0, 0 });
@@ -445,7 +445,7 @@
                                                                   expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
+TEST_CASE("RefGreaterBroadcastEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
@@ -455,249 +455,249 @@
                                                                   expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndFloat32NHWCTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndUint8NHWCTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndQSymm16NHWCTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndFloat32NCHWTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndUint8NCHWTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndQSymm16NCHWTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexUint8NHWCTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexUint8NCHWTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
+TEST_CASE("RefConcatEndToEndDim0Test")
 {
     ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test)
+TEST_CASE("RefConcatEndToEndDim0Uint8Test")
 {
     ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
+TEST_CASE("RefConcatEndToEndDim1Test")
 {
     ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test)
+TEST_CASE("RefConcatEndToEndDim1Uint8Test")
 {
     ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
+TEST_CASE("RefConcatEndToEndDim2Test")
 {
     ConcatDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test)
+TEST_CASE("RefConcatEndToEndDim2Uint8Test")
 {
     ConcatDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
+TEST_CASE("RefConcatEndToEndDim3Test")
 {
     ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test)
+TEST_CASE("RefConcatEndToEndDim3Uint8Test")
 {
     ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestFloat32)
+TEST_CASE("RefEluEndToEndTestFloat32")
 {
     EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestFloat16)
+TEST_CASE("RefEluEndToEndTestFloat16")
 {
     EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestBFloat16)
+TEST_CASE("RefEluEndToEndTestBFloat16")
 {
     EluEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQAsymmS8)
+TEST_CASE("RefEluEndToEndTestQAsymmS8")
 {
     EluEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQAsymmU8)
+TEST_CASE("RefEluEndToEndTestQAsymmU8")
 {
     EluEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQSymmS16)
+TEST_CASE("RefEluEndToEndTestQSymmS16")
 {
     EluEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTest)
+TEST_CASE("RefFillEndToEndTest")
 {
     FillEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTestFloat16)
+TEST_CASE("RefFillEndToEndTestFloat16")
 {
     FillEndToEnd<armnn::DataType::Float16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTestInt32)
+TEST_CASE("RefFillEndToEndTestInt32")
 {
     FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFullyConnectedEndToEndTestInt32)
+TEST_CASE("RefFullyConnectedEndToEndTestInt32")
 {
     FullyConnectedWithDynamicWeightsEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
+TEST_CASE("RefGatherFloatTest")
 {
     GatherEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherUint8Test)
+TEST_CASE("RefGatherUint8Test")
 {
     GatherEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherInt16Test)
+TEST_CASE("RefGatherInt16Test")
 {
     GatherEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherMultiDimFloatTest)
+TEST_CASE("RefGatherMultiDimFloatTest")
 {
     GatherMultiDimEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherMultiDimUint8Test)
+TEST_CASE("RefGatherMultiDimUint8Test")
 {
     GatherMultiDimEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherMultiDimInt16Test)
+TEST_CASE("RefGatherMultiDimInt16Test")
 {
     GatherMultiDimEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 // DepthToSpace
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat32)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat32")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
+TEST_CASE("DephtToSpaceEndToEndNchwUint8")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
+TEST_CASE("DephtToSpaceEndToEndNchwInt16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat32")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
+TEST_CASE("DephtToSpaceEndToEndNhwcUint8")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
+TEST_CASE("DephtToSpaceEndToEndNhwcInt16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Dequantize
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+TEST_CASE("DequantizeEndToEndSimpleTest")
 {
     DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+TEST_CASE("DequantizeEndToEndOffsetTest")
 {
     DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleInt16Test)
+TEST_CASE("DequantizeEndToEndSimpleInt16Test")
 {
     DequantizeEndToEndSimple<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetInt16Test)
+TEST_CASE("DequantizeEndToEndOffsetInt16Test")
 {
     DequantizeEndToEndOffset<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsTest)
+TEST_CASE("RefDetectionPostProcessRegularNmsTest")
 {
     std::vector<float> boxEncodings({
         0.0f, 0.0f, 0.0f, 0.0f,
@@ -734,7 +734,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsUint8Test)
+TEST_CASE("RefDetectionPostProcessRegularNmsUint8Test")
 {
     armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
     armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
@@ -783,7 +783,7 @@
                                                                              1.0f, 1, 0.01f, 0, 0.5f, 0);
 }
 
-BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsTest)
+TEST_CASE("RefDetectionPostProcessFastNmsTest")
 {
     std::vector<float> boxEncodings({
         0.0f, 0.0f, 0.0f, 0.0f,
@@ -812,7 +812,7 @@
     DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
 }
 
-BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test)
+TEST_CASE("RefDetectionPostProcessFastNmsUint8Test")
 {
     armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
     armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
@@ -862,494 +862,493 @@
 }
 
 // HardSwish
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestFloat32)
+TEST_CASE("RefHardSwishEndToEndTestFloat32")
 {
     HardSwishEndToEndTest<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestFloat16)
+TEST_CASE("RefHardSwishEndToEndTestFloat16")
 {
     HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestBFloat16)
+TEST_CASE("RefHardSwishEndToEndTestBFloat16")
 {
-HardSwishEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestQAsymmS8)
+TEST_CASE("RefHardSwishEndToEndTestQAsymmS8")
 {
     HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestQAsymmU8)
+TEST_CASE("RefHardSwishEndToEndTestQAsymmU8")
 {
     HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestQSymmS16)
+TEST_CASE("RefHardSwishEndToEndTestQSymmS16")
 {
     HardSwishEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 // LogSoftmax
-BOOST_AUTO_TEST_CASE(RefLogSoftmaxEndToEndTest)
+TEST_CASE("RefLogSoftmaxEndToEndTest")
 {
     LogSoftmaxEndToEndTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestFloat32)
+TEST_CASE("RefPreluEndToEndTestFloat32")
 {
     PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestUint8)
+TEST_CASE("RefPreluEndToEndTestUint8")
 {
     PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestQSymm16)
+TEST_CASE("RefPreluEndToEndTestQSymm16")
 {
     PreluEndToEndPositiveTest<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSpaceToDepthNhwcEndToEndTest1)
+TEST_CASE("RefSpaceToDepthNhwcEndToEndTest1")
 {
     SpaceToDepthNhwcEndToEndTest1(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSpaceToDepthNchwEndToEndTest1)
+TEST_CASE("RefSpaceToDepthNchwEndToEndTest1")
 {
     SpaceToDepthNchwEndToEndTest1(defaultBackends);
-
 }
 
-BOOST_AUTO_TEST_CASE(RefSpaceToDepthNhwcEndToEndTest2)
+TEST_CASE("RefSpaceToDepthNhwcEndToEndTest2")
 {
     SpaceToDepthNhwcEndToEndTest2(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSpaceToDepthNchwEndToEndTest2)
+TEST_CASE("RefSpaceToDepthNchwEndToEndTest2")
 {
     SpaceToDepthNchwEndToEndTest2(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndTest)
+TEST_CASE("RefSplitter1dEndToEndTest")
 {
     Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndUint8Test)
+TEST_CASE("RefSplitter1dEndToEndUint8Test")
 {
     Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndTest)
+TEST_CASE("RefSplitter2dDim0EndToEndTest")
 {
     Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndTest)
+TEST_CASE("RefSplitter2dDim1EndToEndTest")
 {
     Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndUint8Test)
+TEST_CASE("RefSplitter2dDim0EndToEndUint8Test")
 {
     Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndUint8Test)
+TEST_CASE("RefSplitter2dDim1EndToEndUint8Test")
 {
     Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndTest)
+TEST_CASE("RefSplitter3dDim0EndToEndTest")
 {
     Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndTest)
+TEST_CASE("RefSplitter3dDim1EndToEndTest")
 {
     Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndTest)
+TEST_CASE("RefSplitter3dDim2EndToEndTest")
 {
     Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndUint8Test)
+TEST_CASE("RefSplitter3dDim0EndToEndUint8Test")
 {
     Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndUint8Test)
+TEST_CASE("RefSplitter3dDim1EndToEndUint8Test")
 {
     Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndUint8Test)
+TEST_CASE("RefSplitter3dDim2EndToEndUint8Test")
 {
     Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndTest)
+TEST_CASE("RefSplitter4dDim0EndToEndTest")
 {
     Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndTest)
+TEST_CASE("RefSplitter4dDim1EndToEndTest")
 {
     Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndTest)
+TEST_CASE("RefSplitter4dDim2EndToEndTest")
 {
     Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndTest)
+TEST_CASE("RefSplitter4dDim3EndToEndTest")
 {
     Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndUint8Test)
+TEST_CASE("RefSplitter4dDim0EndToEndUint8Test")
 {
     Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndUint8Test)
+TEST_CASE("RefSplitter4dDim1EndToEndUint8Test")
 {
     Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndUint8Test)
+TEST_CASE("RefSplitter4dDim2EndToEndUint8Test")
 {
     Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndUint8Test)
+TEST_CASE("RefSplitter4dDim3EndToEndUint8Test")
 {
     Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 // TransposeConvolution2d
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNchwTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndFloatNchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NchwTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndUint8NchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NchwTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndInt16NchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNhwcTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndFloatNhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NhwcTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndUint8NhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NhwcTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndInt16NhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Resize Bilinear
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNchwTest)
+TEST_CASE("RefResizeBilinearEndToEndFloatNchwTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NchwTest)
+TEST_CASE("RefResizeBilinearEndToEndUint8NchwTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NchwTest)
+TEST_CASE("RefResizeBilinearEndToEndInt16NchwTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNhwcTest)
+TEST_CASE("RefResizeBilinearEndToEndFloatNhwcTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NhwcTest)
+TEST_CASE("RefResizeBilinearEndToEndUint8NhwcTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NhwcTest)
+TEST_CASE("RefResizeBilinearEndToEndInt16NhwcTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Resize NearestNeighbor
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNchwTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndFloatNchwTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NchwTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndUint8NchwTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NchwTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndInt16NchwTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNhwcTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndFloatNhwcTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NhwcTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndUint8NhwcTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndInt16NhwcTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // InstanceNormalization
-BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNhwcEndToEndTest1)
+TEST_CASE("RefInstanceNormalizationNhwcEndToEndTest1")
 {
     InstanceNormalizationNhwcEndToEndTest1(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest1)
+TEST_CASE("RefInstanceNormalizationNchwEndToEndTest1")
 {
     InstanceNormalizationNchwEndToEndTest1(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNhwcEndToEndTest2)
+TEST_CASE("RefInstanceNormalizationNhwcEndToEndTest2")
 {
     InstanceNormalizationNhwcEndToEndTest2(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest2)
+TEST_CASE("RefInstanceNormalizationNchwEndToEndTest2")
 {
     InstanceNormalizationNchwEndToEndTest2(defaultBackends);
 }
 
 // ArgMinMax
-BOOST_AUTO_TEST_CASE(RefArgMaxSimpleTest)
+TEST_CASE("RefArgMaxSimpleTest")
 {
     ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxSimpleUint8Test)
+TEST_CASE("RefArgMaxSimpleUint8Test")
 {
     ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest)
+TEST_CASE("RefArgMinSimpleTest")
 {
     ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinSimpleUint8Test)
+TEST_CASE("RefArgMinSimpleUint8Test")
 {
     ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test)
+TEST_CASE("RefArgMaxAxis0Test")
 {
     ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Uint8Test)
+TEST_CASE("RefArgMaxAxis0Uint8Test")
 {
     ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test)
+TEST_CASE("RefArgMinAxis0Test")
 {
     ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis0Uint8Test)
+TEST_CASE("RefArgMinAxis0Uint8Test")
 {
 
     ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test)
+TEST_CASE("RefArgMaxAxis1Test")
 {
     ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Uint8Test)
+TEST_CASE("RefArgMaxAxis1Uint8Test")
 {
     ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test)
+TEST_CASE("RefArgMinAxis1Test")
 {
     ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis1Uint8Test)
+TEST_CASE("RefArgMinAxis1Uint8Test")
 {
 
     ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test)
+TEST_CASE("RefArgMaxAxis2Test")
 {
     ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Uint8Test)
+TEST_CASE("RefArgMaxAxis2Uint8Test")
 {
     ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test)
+TEST_CASE("RefArgMinAxis2Test")
 {
     ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis2Uint8Test)
+TEST_CASE("RefArgMinAxis2Uint8Test")
 {
 
     ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test)
+TEST_CASE("RefArgMaxAxis3Test")
 {
     ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Uint8Test)
+TEST_CASE("RefArgMaxAxis3Uint8Test")
 {
     ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test)
+TEST_CASE("RefArgMinAxis3Test")
 {
     ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis3Uint8Test)
+TEST_CASE("RefArgMinAxis3Uint8Test")
 {
 
     ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefQLstmEndToEndTest)
+TEST_CASE("RefQLstmEndToEndTest")
 {
     QLstmEndToEnd(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTest)
+TEST_CASE("RefRankEndToEndTest")
 {
     RankEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestFloat16)
+TEST_CASE("RefRankEndToEndTestFloat16")
 {
     RankEndToEnd<armnn::DataType::Float16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestInt32)
+TEST_CASE("RefRankEndToEndTestInt32")
 {
     RankEndToEnd<armnn::DataType::Signed32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestQAsymmS8)
+TEST_CASE("RefRankEndToEndTestQAsymmS8")
 {
     RankEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestQSymmS16)
+TEST_CASE("RefRankEndToEndTestQSymmS16")
 {
     RankEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestQSymmS8)
+TEST_CASE("RefRankEndToEndTestQSymmS8")
 {
     RankEndToEnd<armnn::DataType::QSymmS8>(defaultBackends);
 }
 
 #if !defined(__ANDROID__)
 // Only run these tests on non Android platforms
-BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest)
+TEST_CASE("RefImportNonAlignedPointerTest")
 {
     ImportNonAlignedInputPointerTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefExportNonAlignedPointerTest)
+TEST_CASE("RefExportNonAlignedPointerTest")
 {
     ExportNonAlignedOutputPointerTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefImportAlignedPointerTest)
+TEST_CASE("RefImportAlignedPointerTest")
 {
     ImportAlignedPointerTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefImportOnlyWorkload)
+TEST_CASE("RefImportOnlyWorkload")
 {
     ImportOnlyWorkload(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefExportOnlyWorkload)
+TEST_CASE("RefExportOnlyWorkload")
 {
     ExportOnlyWorkload(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefImportAndExportWorkload)
+TEST_CASE("RefImportAndExportWorkload")
 {
     ImportAndExportWorkload(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefExportOutputWithSeveralOutputSlotConnectionsTest)
+TEST_CASE("RefExportOutputWithSeveralOutputSlotConnectionsTest")
 {
     ExportOutputWithSeveralOutputSlotConnectionsTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefStridedSliceInvalidSliceEndToEndTest)
+TEST_CASE("RefStridedSliceInvalidSliceEndToEndTest")
 {
     StridedSliceInvalidSliceEndToEndTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefThreadSafeFP32StridedSlicedEndToEndTest)
+TEST_CASE("RefThreadSafeFP32StridedSlicedEndToEndTest")
 {
     armnn::experimental::StridedSlicedEndToEndTest<armnn::DataType::Float32>(defaultBackends, 1);
 }
 
-BOOST_AUTO_TEST_CASE(RefAsyncFP32StridedSlicedMultiThreadedEndToEndTest)
+TEST_CASE("RefAsyncFP32StridedSlicedMultiThreadedEndToEndTest")
 {
     armnn::experimental::StridedSlicedMultiThreadedEndToEndTest<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefAsyncFP32StridedSlicedScheduledMultiThreadedEndToEndTest)
+TEST_CASE("RefAsyncFP32StridedSlicedScheduledMultiThreadedEndToEndTest")
 {
     armnn::experimental::StridedSlicedEndToEndTest<armnn::DataType::Float32>(defaultBackends, 3);
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefJsonPrinterTests.cpp b/src/backends/reference/test/RefJsonPrinterTests.cpp
index ff604a7..15b591a 100644
--- a/src/backends/reference/test/RefJsonPrinterTests.cpp
+++ b/src/backends/reference/test/RefJsonPrinterTests.cpp
@@ -7,16 +7,16 @@
 
 #include <backendsCommon/test/JsonPrinterTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
-BOOST_AUTO_TEST_SUITE(RefJsonPrinter)
-
-BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterCpuRefTest)
+TEST_SUITE("RefJsonPrinter")
+{
+TEST_CASE("SoftmaxProfilerJsonPrinterCpuRefTest")
 {
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     RunSoftmaxProfilerJsonPrinterTest(backends);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp
index a148706..1adc54e 100644
--- a/src/backends/reference/test/RefLayerSupportTests.cpp
+++ b/src/backends/reference/test/RefLayerSupportTests.cpp
@@ -13,7 +13,7 @@
 #include <backendsCommon/test/LayerTests.hpp>
 #include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <string>
 
@@ -27,13 +27,14 @@
 
 } // anonymous namespace
 
-BOOST_AUTO_TEST_SUITE(RefLayerSupported)
-
-BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches)
+TEST_SUITE("RefLayerSupported")
+{
+TEST_CASE("IsLayerSupportedLayerTypeMatches")
 {
     LayerTypeMatchesTest();
 }
-BOOST_AUTO_TEST_CASE(IsLayerSupportedReferenceAddition)
+
+TEST_CASE("IsLayerSupportedReferenceAddition")
 {
     armnn::TensorShape shape0 = {1,1,3,4};
     armnn::TensorShape shape1 = {4};
@@ -44,232 +45,232 @@
 
     armnn::RefLayerSupport supportChecker;
     std::string reasonNotSupported;
-    BOOST_CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported));
+    CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported));
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedBFloat16Reference)
+TEST_CASE("IsLayerSupportedBFloat16Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::BFloat16>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Reference)
+TEST_CASE("IsLayerSupportedFloat16Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float16>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference)
+TEST_CASE("IsLayerSupportedFloat32Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float32>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference)
+TEST_CASE("IsLayerSupportedUint8Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedInt8Reference)
+TEST_CASE("IsLayerSupportedInt8Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedInt16Reference)
+TEST_CASE("IsLayerSupportedInt16Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS16>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference)
+TEST_CASE("IsConvertFp16ToFp32SupportedReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputReference)
+TEST_CASE("IsConvertFp16ToFp32SupportedFp32InputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type input");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float32 data type input");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputReference)
+TEST_CASE("IsConvertFp16ToFp32SupportedFp16OutputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type output");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float16 data type output");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedReference)
+TEST_CASE("IsConvertBf16ToFp32SupportedReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
       armnn::DataType::BFloat16, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedFp32InputReference)
+TEST_CASE("IsConvertBf16ToFp32SupportedFp32InputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedBf16OutputReference)
+TEST_CASE("IsConvertBf16ToFp32SupportedBf16OutputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
       armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedReference)
+TEST_CASE("IsConvertFp32ToBf16SupportedReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
       armnn::DataType::Float32, armnn::DataType::BFloat16>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedBf16InputReference)
+TEST_CASE("IsConvertFp32ToBf16SupportedBf16InputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
       armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedFp32OutputReference)
+TEST_CASE("IsConvertFp32ToBf16SupportedFp32OutputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedReference)
+TEST_CASE("IsConvertFp32ToFp16SupportedReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputReference)
+TEST_CASE("IsConvertFp32ToFp16SupportedFp16InputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type input");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float16 data type input");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputReference)
+TEST_CASE("IsConvertFp32ToFp16SupportedFp32OutputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type output");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float32 data type output");
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedMeanDimensionsReference)
+TEST_CASE("IsLayerSupportedMeanDimensionsReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsMeanLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerNotSupportedMeanDimensionsReference)
+TEST_CASE("IsLayerNotSupportedMeanDimensionsReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsMeanLayerNotSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
+    CHECK(!result);
 
-    BOOST_CHECK(reasonIfUnsupported.find(
+    CHECK(reasonIfUnsupported.find(
         "Reference Mean: Expected 4 dimensions but got 2 dimensions instead, for the 'output' tensor.")
         != std::string::npos);
 }
 
-BOOST_AUTO_TEST_CASE(IsConstantSupportedRef)
+TEST_CASE("IsConstantSupportedRef")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Float16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Float32>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::QAsymmU8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Boolean>(reasonIfUnsupported);
-    BOOST_CHECK(!result);
+    CHECK(!result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::QSymmS16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::QSymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::QAsymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::BFloat16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index df48877..6bc6f8a 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -11,9 +11,8 @@
 
 #include <test/UnitTests.hpp>
 
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(Compute_Reference)
+TEST_SUITE("Compute_Reference")
+{
 
 using namespace armnn;
 
@@ -1437,15 +1436,15 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQSymm16ValueSet3Test, PermuteValueSet3Test<DataType::QSymmS16>)
 
 // Lstm
-BOOST_AUTO_TEST_CASE(LstmUtilsZeroVector) {
+TEST_CASE("LstmUtilsZeroVector") {
                               LstmUtilsZeroVectorTest(); }
-BOOST_AUTO_TEST_CASE(LstmUtilsMeanStddevNormalization) {
+TEST_CASE("LstmUtilsMeanStddevNormalization") {
                               LstmUtilsMeanStddevNormalizationNoneZeroInputTest();
                               LstmUtilsMeanStddevNormalizationAllZeroInputTest();
                               LstmUtilsMeanStddevNormalizationMixedZeroInputTest(); }
-BOOST_AUTO_TEST_CASE(LstmUtilsVectorBatchVectorCwiseProduct) {
+TEST_CASE("LstmUtilsVectorBatchVectorCwiseProduct") {
                               LstmUtilsVectorBatchVectorCwiseProductTest(); }
-BOOST_AUTO_TEST_CASE(LstmUtilsVectorBatchVectorAdd) {
+TEST_CASE("LstmUtilsVectorBatchVectorAdd") {
                               LstmUtilsVectorBatchVectorAddTest(); }
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
@@ -1873,43 +1872,37 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(Abs3dQuantisedSymm16, Abs3dTest<DataType::QSymmS16>)
 
 // Detection PostProcess
-BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsFloat)
+TEST_CASE("DetectionPostProcessRegularNmsFloat")
 {
     DetectionPostProcessRegularNmsFloatTest<RefWorkloadFactory>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsFloat)
+TEST_CASE("DetectionPostProcessFastNmsFloat")
 {
     DetectionPostProcessFastNmsFloatTest<RefWorkloadFactory>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt8)
+TEST_CASE("DetectionPostProcessRegularNmsInt8")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QAsymmS8>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QAsymmS8>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt8)
+TEST_CASE("DetectionPostProcessFastNmsInt8")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QAsymmS8>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QAsymmS8>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8)
+TEST_CASE("DetectionPostProcessRegularNmsUint8")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QAsymmU8>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QAsymmU8>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsUint8)
+TEST_CASE("DetectionPostProcessFastNmsUint8")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QAsymmU8>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QAsymmU8>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt16)
+TEST_CASE("DetectionPostProcessRegularNmsInt16")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QSymmS16>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QSymmS16>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16)
+TEST_CASE("DetectionPostProcessFastNmsInt16")
 {
-    DetectionPostProcessFastNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QSymmS16>();
+    DetectionPostProcessFastNmsQuantizedTest<RefWorkloadFactory, DataType::QSymmS16>();
 }
 
 // Dequantize
@@ -2271,4 +2264,4 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinFloat32, ReduceMinSimpleTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinNegativeAxisFloat32, ReduceMinNegativeAxisTest<DataType::Float32>)
 
-BOOST_AUTO_TEST_SUITE_END()
+}
\ No newline at end of file
diff --git a/src/backends/reference/test/RefMemoryManagerTests.cpp b/src/backends/reference/test/RefMemoryManagerTests.cpp
index 15b7c2a..960e7cc 100644
--- a/src/backends/reference/test/RefMemoryManagerTests.cpp
+++ b/src/backends/reference/test/RefMemoryManagerTests.cpp
@@ -5,47 +5,48 @@
 
 #include <reference/RefMemoryManager.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefMemoryManagerTests)
+TEST_SUITE("RefMemoryManagerTests")
+{
 using namespace armnn;
 using Pool = RefMemoryManager::Pool;
 
-BOOST_AUTO_TEST_CASE(ManageOneThing)
+TEST_CASE("ManageOneThing")
 {
     RefMemoryManager memoryManager;
 
     Pool* pool = memoryManager.Manage(10);
 
-    BOOST_CHECK(pool);
+    CHECK(pool);
 
     memoryManager.Acquire();
 
-    BOOST_CHECK(memoryManager.GetPointer(pool) != nullptr); // Yields a valid pointer
+    CHECK(memoryManager.GetPointer(pool) != nullptr); // Yields a valid pointer
 
     memoryManager.Release();
 }
 
-BOOST_AUTO_TEST_CASE(ManageTwoThings)
+TEST_CASE("ManageTwoThings")
 {
     RefMemoryManager memoryManager;
 
     Pool* pool1 = memoryManager.Manage(10);
     Pool* pool2 = memoryManager.Manage(5);
 
-    BOOST_CHECK(pool1);
-    BOOST_CHECK(pool2);
+    CHECK(pool1);
+    CHECK(pool2);
 
     memoryManager.Acquire();
 
     void *p1 = memoryManager.GetPointer(pool1);
     void *p2 = memoryManager.GetPointer(pool2);
 
-    BOOST_CHECK(p1);
-    BOOST_CHECK(p2);
-    BOOST_CHECK(p1 != p2);
+    CHECK(p1);
+    CHECK(p2);
+    CHECK(p1 != p2);
 
     memoryManager.Release();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 2f25b6c..578d667 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -7,13 +7,13 @@
 #include <Network.hpp>
 
 #include <reference/RefWorkloadFactory.hpp>
-
-#include <boost/test/unit_test.hpp>
 #include <test/GraphUtils.hpp>
 
-BOOST_AUTO_TEST_SUITE(RefOptimizedNetwork)
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads)
+TEST_SUITE("RefOptimizedNetwork")
+{
+TEST_CASE("OptimizeValidateCpuRefWorkloads")
 {
     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
 
@@ -73,17 +73,17 @@
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     graph.AllocateDynamicBuffers();
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Validates workloads.
     armnn::RefWorkloadFactory fact;
     for (auto&& layer : graph)
     {
-        BOOST_CHECK_NO_THROW(layer->CreateWorkload(fact));
+        CHECK_NOTHROW(layer->CreateWorkload(fact));
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer)
+TEST_CASE("OptimizeValidateWorkloadsCpuRefPermuteLayer")
 {
     // Create runtime in which test will run
     armnn::IRuntime::CreationOptions options;
@@ -115,11 +115,11 @@
 
     for (auto&& layer : graph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer)
+TEST_CASE("OptimizeValidateWorkloadsCpuRefMeanLayer")
 {
     // Create runtime in which test will run
     armnn::IRuntime::CreationOptions options;
@@ -149,11 +149,11 @@
     graph.AllocateDynamicBuffers();
     for (auto&& layer : graph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
     }
 }
 
-BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef)
+TEST_CASE("DebugTestOnCpuRef")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -192,14 +192,14 @@
     graph.AllocateDynamicBuffers();
 
     // Tests that all layers are present in the graph.
-    BOOST_TEST(graph.GetNumLayers() == 5);
+    CHECK(graph.GetNumLayers() == 5);
 
     // Tests that the vertices exist and have correct names.
-    BOOST_TEST(GraphHasNamedLayer(graph, "InputLayer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterInputLayer_0"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "ActivationLayer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterActivationLayer_0"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "OutputLayer"));
+    CHECK(GraphHasNamedLayer(graph, "InputLayer"));
+    CHECK(GraphHasNamedLayer(graph, "DebugLayerAfterInputLayer_0"));
+    CHECK(GraphHasNamedLayer(graph, "ActivationLayer"));
+    CHECK(GraphHasNamedLayer(graph, "DebugLayerAfterActivationLayer_0"));
+    CHECK(GraphHasNamedLayer(graph, "OutputLayer"));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefRuntimeTests.cpp b/src/backends/reference/test/RefRuntimeTests.cpp
index 17d5816..6fd4910 100644
--- a/src/backends/reference/test/RefRuntimeTests.cpp
+++ b/src/backends/reference/test/RefRuntimeTests.cpp
@@ -9,14 +9,15 @@
 
 #include <backendsCommon/test/RuntimeTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefRuntime)
 
 #ifdef ARMNN_LEAK_CHECKING_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuRef)
+TEST_SUITE("RefRuntime")
 {
-    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+TEST_CASE("RuntimeMemoryLeaksCpuRef")
+{
+    CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
 
     armnn::IRuntime::CreationOptions options;
     armnn::RuntimeImpl runtime(options);
@@ -31,16 +32,16 @@
 
     {
         ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuRef");
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
         // In the second run we check for all remaining memory
         // in use after the network was unloaded. If there is any
         // then it will be treated as a memory leak.
         CreateAndDropDummyNetwork(backends, runtime);
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
-        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
-        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
     }
 }
+}
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp
index dadd1de..39f5a2a 100644
--- a/src/backends/reference/test/RefTensorHandleTests.cpp
+++ b/src/backends/reference/test/RefTensorHandleTests.cpp
@@ -5,12 +5,13 @@
 #include <reference/RefTensorHandle.hpp>
 #include <reference/RefTensorHandleFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefTensorHandleTests)
+TEST_SUITE("RefTensorHandleTests")
+{
 using namespace armnn;
 
-BOOST_AUTO_TEST_CASE(AcquireAndRelease)
+TEST_CASE("AcquireAndRelease")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
 
@@ -24,11 +25,11 @@
     {
         float* buffer = reinterpret_cast<float*>(handle.Map());
 
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
 
         buffer[0] = 2.5f;
 
-        BOOST_CHECK(buffer[0] == 2.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 2.5f); // Memory is writable and readable
 
     }
     memoryManager->Release();
@@ -37,16 +38,16 @@
     {
         float* buffer = reinterpret_cast<float*>(handle.Map());
 
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
 
         buffer[0] = 3.5f;
 
-        BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 3.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleFactoryMemoryManaged)
+TEST_CASE("RefTensorHandleFactoryMemoryManaged")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
     RefTensorHandleFactory handleFactory(memoryManager);
@@ -60,31 +61,31 @@
     memoryManager->Acquire();
     {
         float* buffer = reinterpret_cast<float*>(handle->Map());
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
         buffer[0] = 1.5f;
         buffer[1] = 2.5f;
-        BOOST_CHECK(buffer[0] == 1.5f); // Memory is writable and readable
-        BOOST_CHECK(buffer[1] == 2.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 1.5f); // Memory is writable and readable
+        CHECK(buffer[1] == 2.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 
     memoryManager->Acquire();
     {
         float* buffer = reinterpret_cast<float*>(handle->Map());
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
         buffer[0] = 3.5f;
         buffer[1] = 4.5f;
-        BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable
-        BOOST_CHECK(buffer[1] == 4.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 3.5f); // Memory is writable and readable
+        CHECK(buffer[1] == 4.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Cannot import as import is disabled
-    BOOST_CHECK(!handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    CHECK(!handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleFactoryImport)
+TEST_CASE("RefTensorHandleFactoryImport")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
     RefTensorHandleFactory handleFactory(memoryManager);
@@ -97,25 +98,25 @@
     memoryManager->Acquire();
 
     // No buffer allocated when import is enabled
-    BOOST_CHECK_THROW(handle->Map(), armnn::NullPointerException);
+    CHECK_THROWS_AS(handle->Map(), armnn::NullPointerException);
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Correctly import
-    BOOST_CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
     float* buffer = reinterpret_cast<float*>(handle->Map());
-    BOOST_CHECK(buffer != nullptr); // Yields a valid pointer after import
-    BOOST_CHECK(buffer == testPtr); // buffer is pointing to testPtr
+    CHECK(buffer != nullptr); // Yields a valid pointer after import
+    CHECK(buffer == testPtr); // buffer is pointing to testPtr
     // Memory is writable and readable with correct value
-    BOOST_CHECK(buffer[0] == 2.5f);
-    BOOST_CHECK(buffer[1] == 5.5f);
+    CHECK(buffer[0] == 2.5f);
+    CHECK(buffer[1] == 5.5f);
     buffer[0] = 3.5f;
     buffer[1] = 10.0f;
-    BOOST_CHECK(buffer[0] == 3.5f);
-    BOOST_CHECK(buffer[1] == 10.0f);
+    CHECK(buffer[0] == 3.5f);
+    CHECK(buffer[1] == 10.0f);
     memoryManager->Release();
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleImport)
+TEST_CASE("RefTensorHandleImport")
 {
     TensorInfo info({ 1, 1, 2, 1 }, DataType::Float32);
     RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
@@ -124,24 +125,24 @@
     handle.Allocate();
 
     // No buffer allocated when import is enabled
-    BOOST_CHECK_THROW(handle.Map(), armnn::NullPointerException);
+    CHECK_THROWS_AS(handle.Map(), armnn::NullPointerException);
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Correctly import
-    BOOST_CHECK(handle.Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    CHECK(handle.Import(static_cast<void*>(testPtr), MemorySource::Malloc));
     float* buffer = reinterpret_cast<float*>(handle.Map());
-    BOOST_CHECK(buffer != nullptr); // Yields a valid pointer after import
-    BOOST_CHECK(buffer == testPtr); // buffer is pointing to testPtr
+    CHECK(buffer != nullptr); // Yields a valid pointer after import
+    CHECK(buffer == testPtr); // buffer is pointing to testPtr
     // Memory is writable and readable with correct value
-    BOOST_CHECK(buffer[0] == 2.5f);
-    BOOST_CHECK(buffer[1] == 5.5f);
+    CHECK(buffer[0] == 2.5f);
+    CHECK(buffer[1] == 5.5f);
     buffer[0] = 3.5f;
     buffer[1] = 10.0f;
-    BOOST_CHECK(buffer[0] == 3.5f);
-    BOOST_CHECK(buffer[1] == 10.0f);
+    CHECK(buffer[0] == 3.5f);
+    CHECK(buffer[1] == 10.0f);
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleGetCapabilities)
+TEST_CASE("RefTensorHandleGetCapabilities")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
     RefTensorHandleFactory handleFactory(memoryManager);
@@ -155,10 +156,10 @@
     std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
                                                                          output,
                                                                          CapabilityClass::PaddingRequired);
-    BOOST_CHECK(capabilities.empty());
+    CHECK(capabilities.empty());
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleSupportsInPlaceComputation)
+TEST_CASE("RefTensorHandleSupportsInPlaceComputation")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
     RefTensorHandleFactory handleFactory(memoryManager);
@@ -167,7 +168,7 @@
     ARMNN_ASSERT(!(handleFactory.SupportsInPlaceComputation()));
 }
 
-BOOST_AUTO_TEST_CASE(TestManagedConstTensorHandle)
+TEST_CASE("TestManagedConstTensorHandle")
 {
     // Initialize arguments
     void* mem = nullptr;
@@ -178,31 +179,31 @@
 
     // Test managed handle is initialized with m_Mapped unset and once Map() called its set
     ManagedConstTensorHandle managedHandle(passThroughHandle);
-    BOOST_CHECK(!managedHandle.IsMapped());
+    CHECK(!managedHandle.IsMapped());
     managedHandle.Map();
-    BOOST_CHECK(managedHandle.IsMapped());
+    CHECK(managedHandle.IsMapped());
 
     // Test it can then be unmapped
     managedHandle.Unmap();
-    BOOST_CHECK(!managedHandle.IsMapped());
+    CHECK(!managedHandle.IsMapped());
 
     // Test member function
-    BOOST_CHECK(managedHandle.GetTensorInfo() == info);
+    CHECK(managedHandle.GetTensorInfo() == info);
 
     // Test that nullptr tensor handle doesn't get mapped
     ManagedConstTensorHandle managedHandleNull(nullptr);
-    BOOST_CHECK(!managedHandleNull.IsMapped());
-    BOOST_CHECK_THROW(managedHandleNull.Map(), armnn::Exception);
-    BOOST_CHECK(!managedHandleNull.IsMapped());
+    CHECK(!managedHandleNull.IsMapped());
+    CHECK_THROWS_AS(managedHandleNull.Map(), armnn::Exception);
+    CHECK(!managedHandleNull.IsMapped());
 
     // Check Unmap() when m_Mapped already false
     managedHandleNull.Unmap();
-    BOOST_CHECK(!managedHandleNull.IsMapped());
+    CHECK(!managedHandleNull.IsMapped());
 }
 
 #if !defined(__ANDROID__)
 // Only run these tests on non Android platforms
-BOOST_AUTO_TEST_CASE(CheckSourceType)
+TEST_CASE("CheckSourceType")
 {
     TensorInfo info({1}, DataType::Float32);
     RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
@@ -210,18 +211,18 @@
     int* testPtr = new int(4);
 
     // Not supported
-    BOOST_CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBuf));
+    CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBuf));
 
     // Not supported
-    BOOST_CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBufProtected));
+    CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBufProtected));
 
     // Supported
-    BOOST_CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
+    CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
 
     delete testPtr;
 }
 
-BOOST_AUTO_TEST_CASE(ReusePointer)
+TEST_CASE("ReusePointer")
 {
     TensorInfo info({1}, DataType::Float32);
     RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
@@ -231,12 +232,12 @@
     handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc);
 
     // Reusing previously Imported pointer
-    BOOST_CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
+    CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
 
     delete testPtr;
 }
 
-BOOST_AUTO_TEST_CASE(MisalignedPointer)
+TEST_CASE("MisalignedPointer")
 {
     TensorInfo info({2}, DataType::Float32);
     RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
@@ -247,11 +248,11 @@
     // Increment pointer by 1 byte
     void* misalignedPtr = static_cast<void*>(reinterpret_cast<char*>(testPtr) + 1);
 
-    BOOST_CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc));
+    CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc));
 
     delete[] testPtr;
 }
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}