IVGCVSW-5963 'Move unit tests to new framework'

* Used doctest in ArmNN unit tests

Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Ia9cf5fc72775878885c5f864abf2c56b3a935f1a
diff --git a/CMakeLists.txt b/CMakeLists.txt
index 5bfc1b0..0f8ae2c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -619,6 +619,7 @@
         src/profiling/test/SendTimelinePacketTests.cpp
         src/profiling/test/TimelinePacketTests.cpp
         src/profiling/test/TimelineUtilityMethodsTests.cpp
+        third-party/doctest/doctest.h
         )
 
     if(ARMNNREF)
diff --git a/profiling/server/src/basePipeServer/tests/BasePipeServerTests.cpp b/profiling/server/src/basePipeServer/tests/BasePipeServerTests.cpp
index 0917927..f307d98 100644
--- a/profiling/server/src/basePipeServer/tests/BasePipeServerTests.cpp
+++ b/profiling/server/src/basePipeServer/tests/BasePipeServerTests.cpp
@@ -8,37 +8,35 @@
 #include <SocketProfilingConnection.hpp>
 #include <Processes.hpp>
 
-#include <boost/test/test_tools.hpp>
-#include <boost/test/unit_test_suite.hpp>
+#include <doctest/doctest.h>
 
-
-BOOST_AUTO_TEST_SUITE(BasePipeServerTests)
-
+TEST_SUITE("BasePipeServerTests")
+{
 using namespace armnn;
 using namespace arm::pipe;
 
-BOOST_AUTO_TEST_CASE(BasePipeServerTest)
+TEST_CASE("BasePipeServerTest")
 {
     // Setup the mock service to bind to the UDS.
     std::string udsNamespace = "gatord_namespace";
 
     // Try to initialize a listening socket through the ConnectionHandler
-    BOOST_CHECK_NO_THROW(ConnectionHandler connectionHandler(udsNamespace, true));
+    CHECK_NOTHROW(ConnectionHandler connectionHandler(udsNamespace, true));
 
-    // The socket should close once we leave the scope of BOOST_CHECK_NO_THROW
+    // The socket should close once we leave the scope of CHECK_NOTHROW
     // and socketProfilingConnection should fail to connect
-    BOOST_CHECK_THROW(profiling::SocketProfilingConnection socketProfilingConnection,
+    CHECK_THROWS_AS(profiling::SocketProfilingConnection socketProfilingConnection,
                       arm::pipe::SocketConnectionException);
 
     // Try to initialize a listening socket through the ConnectionHandler again
     ConnectionHandler connectionHandler(udsNamespace, true);
     // socketProfilingConnection should connect now
     profiling::SocketProfilingConnection socketProfilingConnection;
-    BOOST_TEST(socketProfilingConnection.IsOpen());
+    CHECK(socketProfilingConnection.IsOpen());
 
     auto basePipeServer = connectionHandler.GetNewBasePipeServer(false);
     // GetNewBasePipeServer will return null if it fails to create a socket
-    BOOST_TEST(basePipeServer.get());
+    CHECK(basePipeServer.get());
 
     profiling::BufferManager bufferManager;
     profiling::SendCounterPacket sendCounterPacket(bufferManager);
@@ -50,15 +48,15 @@
     const unsigned char* readBuffer = packetBuffer->GetReadableData();
     unsigned int readBufferSize = packetBuffer->GetSize();
 
-    BOOST_TEST(readBuffer);
-    BOOST_TEST(readBufferSize > 0u);
+    CHECK(readBuffer);
+    CHECK(readBufferSize > 0u);
 
     socketProfilingConnection.WritePacket(readBuffer,readBufferSize);
     bufferManager.MarkRead(packetBuffer);
 
-    BOOST_TEST(basePipeServer.get()->WaitForStreamMetaData());
-    BOOST_TEST(basePipeServer.get()->GetStreamMetadataPid() == armnnUtils::Processes::GetCurrentId());
-    BOOST_TEST(basePipeServer.get()->GetStreamMetadataMaxDataLen() == MAX_METADATA_PACKET_LENGTH);
+    CHECK(basePipeServer.get()->WaitForStreamMetaData());
+    CHECK(basePipeServer.get()->GetStreamMetadataPid() == armnnUtils::Processes::GetCurrentId());
+    CHECK(basePipeServer.get()->GetStreamMetadataMaxDataLen() == MAX_METADATA_PACKET_LENGTH);
 
     // Now try a simple PeriodicCounterSelectionPacket
     sendCounterPacket.SendPeriodicCounterSelectionPacket(50, {1,2,3,4,5});
@@ -67,18 +65,18 @@
     readBuffer = packetBuffer->GetReadableData();
     readBufferSize = packetBuffer->GetSize();
 
-    BOOST_TEST(readBuffer);
-    BOOST_TEST(readBufferSize > 0u);
+    CHECK(readBuffer);
+    CHECK(readBufferSize > 0u);
 
     socketProfilingConnection.WritePacket(readBuffer,readBufferSize);
     bufferManager.MarkRead(packetBuffer);
 
     auto packet1 = basePipeServer.get()->WaitForPacket(500);
 
-    BOOST_TEST(!packet1.IsEmpty());
-    BOOST_TEST(packet1.GetPacketFamily() == 0);
-    BOOST_TEST(packet1.GetPacketId() == 4);
-    BOOST_TEST(packet1.GetLength() == 14);
+    CHECK(!packet1.IsEmpty());
+    CHECK(packet1.GetPacketFamily() == 0);
+    CHECK(packet1.GetPacketId() == 4);
+    CHECK(packet1.GetLength() == 14);
 
     // Try and send the packet back to the client
     basePipeServer.get()->SendPacket(packet1.GetPacketFamily(),
@@ -88,12 +86,12 @@
 
     auto packet2 = socketProfilingConnection.ReadPacket(500);
 
-    BOOST_TEST(!packet2.IsEmpty());
-    BOOST_TEST(packet2.GetPacketFamily() == 0);
-    BOOST_TEST(packet2.GetPacketId() == 4);
-    BOOST_TEST(packet2.GetLength() == 14);
+    CHECK(!packet2.IsEmpty());
+    CHECK(packet2.GetPacketFamily() == 0);
+    CHECK(packet2.GetPacketId() == 4);
+    CHECK(packet2.GetLength() == 14);
 
     socketProfilingConnection.Close();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/profiling/server/src/timelineDecoder/tests/TimelineTests.cpp b/profiling/server/src/timelineDecoder/tests/TimelineTests.cpp
index e779706..4fcd7a0 100644
--- a/profiling/server/src/timelineDecoder/tests/TimelineTests.cpp
+++ b/profiling/server/src/timelineDecoder/tests/TimelineTests.cpp
@@ -15,11 +15,10 @@
 #include <PacketBuffer.hpp>
 #include <TimelinePacketWriterFactory.hpp>
 
-#include <boost/test/test_tools.hpp>
-#include <boost/test/unit_test_suite.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(TimelineDecoderTests)
-
+TEST_SUITE("TimelineDecoderTests")
+{
 void SendTimelinePacketToCommandHandler(const unsigned char* packetBuffer,
                                         arm::pipe::CommandHandlerFunctor& CommandHandler)
 {
@@ -38,7 +37,7 @@
 
     arm::pipe::Packet packet(header[0], PacketDataLength, uniquePacketData);
 
-    BOOST_CHECK(std::memcmp(packetBuffer + offset, packet.GetData(), packet.GetLength()) == 0);
+    CHECK(std::memcmp(packetBuffer + offset, packet.GetData(), packet.GetLength()) == 0);
 
     CommandHandler(packet);
 }
@@ -69,7 +68,7 @@
     model.m_Relationships.emplace_back(relationship);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineDirectoryTest)
+TEST_CASE("TimelineDirectoryTest")
 {
     uint32_t uint8_t_size  = sizeof(uint8_t);
     uint32_t uint32_t_size = sizeof(uint32_t);
@@ -101,13 +100,13 @@
     std::unique_ptr<armnn::profiling::IPacketBuffer> packetBuffer = bufferManager.GetReadableBuffer();
 
     uint8_t readStreamVersion = ReadUint8(packetBuffer, offset);
-    BOOST_CHECK(readStreamVersion == 4);
+    CHECK(readStreamVersion == 4);
     offset += uint8_t_size;
     uint8_t readPointerBytes = ReadUint8(packetBuffer, offset);
-    BOOST_CHECK(readPointerBytes == uint64_t_size);
+    CHECK(readPointerBytes == uint64_t_size);
     offset += uint8_t_size;
     uint8_t readThreadIdBytes = ReadUint8(packetBuffer, offset);
-    BOOST_CHECK(readThreadIdBytes == armnn::profiling::ThreadIdSize);
+    CHECK(readThreadIdBytes == armnn::profiling::ThreadIdSize);
     offset += uint8_t_size;
 
     uint32_t declarationSize = arm::pipe::ReadUint32(packetBuffer->GetReadableData(), offset);
@@ -126,25 +125,25 @@
         arm::pipe::SwTraceMessage& bufferMessage = swTraceBufferMessages[index];
         arm::pipe::SwTraceMessage& handlerMessage = timelineDirectoryCaptureCommandHandler.m_SwTraceMessages[index];
 
-        BOOST_CHECK(bufferMessage.m_Name == handlerMessage.m_Name);
-        BOOST_CHECK(bufferMessage.m_UiName == handlerMessage.m_UiName);
-        BOOST_CHECK(bufferMessage.m_Id == handlerMessage.m_Id);
+        CHECK(bufferMessage.m_Name == handlerMessage.m_Name);
+        CHECK(bufferMessage.m_UiName == handlerMessage.m_UiName);
+        CHECK(bufferMessage.m_Id == handlerMessage.m_Id);
 
-        BOOST_CHECK(bufferMessage.m_ArgTypes.size() == handlerMessage.m_ArgTypes.size());
+        CHECK(bufferMessage.m_ArgTypes.size() == handlerMessage.m_ArgTypes.size());
         for(uint32_t i = 0; i < bufferMessage.m_ArgTypes.size(); ++i)
         {
-            BOOST_CHECK(bufferMessage.m_ArgTypes[i] == handlerMessage.m_ArgTypes[i]);
+            CHECK(bufferMessage.m_ArgTypes[i] == handlerMessage.m_ArgTypes[i]);
         }
 
-        BOOST_CHECK(bufferMessage.m_ArgNames.size() == handlerMessage.m_ArgNames.size());
+        CHECK(bufferMessage.m_ArgNames.size() == handlerMessage.m_ArgNames.size());
         for(uint32_t i = 0; i < bufferMessage.m_ArgNames.size(); ++i)
         {
-            BOOST_CHECK(bufferMessage.m_ArgNames[i] == handlerMessage.m_ArgNames[i]);
+            CHECK(bufferMessage.m_ArgNames[i] == handlerMessage.m_ArgNames[i]);
         }
     }
 }
 
-BOOST_AUTO_TEST_CASE(TimelineCaptureTest)
+TEST_CASE("TimelineCaptureTest")
 {
     armnn::profiling::BufferManager bufferManager(50);
     armnn::profiling::TimelinePacketWriterFactory timelinePacketWriterFactory(bufferManager);
@@ -163,11 +162,11 @@
         armnn::profiling::ThreadIdSize);
 
     using Status = arm::pipe::ITimelineDecoder::TimelineStatus;
-    BOOST_CHECK(timelineDecoder.SetEntityCallback(PushEntity)             == Status::TimelineStatus_Success);
-    BOOST_CHECK(timelineDecoder.SetEventClassCallback(PushEventClass)     == Status::TimelineStatus_Success);
-    BOOST_CHECK(timelineDecoder.SetEventCallback(PushEvent)               == Status::TimelineStatus_Success);
-    BOOST_CHECK(timelineDecoder.SetLabelCallback(PushLabel)               == Status::TimelineStatus_Success);
-    BOOST_CHECK(timelineDecoder.SetRelationshipCallback(PushRelationship) == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetEntityCallback(PushEntity)             == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetEventClassCallback(PushEventClass)     == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetEventCallback(PushEvent)               == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetLabelCallback(PushLabel)               == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetRelationshipCallback(PushRelationship) == Status::TimelineStatus_Success);
 
     const uint64_t entityGuid = 111111u;
     const uint64_t eventClassGuid = 22222u;
@@ -241,26 +240,26 @@
 
     for (unsigned long i = 0; i < 10; ++i)
     {
-        BOOST_CHECK(model.m_Entities[i].m_Guid == entityGuid);
+        CHECK(model.m_Entities[i].m_Guid == entityGuid);
 
-        BOOST_CHECK(model.m_EventClasses[i].m_Guid == eventClassGuid);
+        CHECK(model.m_EventClasses[i].m_Guid == eventClassGuid);
 
-        BOOST_CHECK(model.m_Events[i].m_TimeStamp == timestamp);
-        BOOST_CHECK(model.m_Events[i].m_ThreadId == uint64ThreadId);
-        BOOST_CHECK(model.m_Events[i].m_Guid == eventGuid);
+        CHECK(model.m_Events[i].m_TimeStamp == timestamp);
+        CHECK(model.m_Events[i].m_ThreadId == uint64ThreadId);
+        CHECK(model.m_Events[i].m_Guid == eventGuid);
 
-        BOOST_CHECK(model.m_Labels[i].m_Guid == labelGuid);
-        BOOST_CHECK(model.m_Labels[i].m_Name == labelName);
+        CHECK(model.m_Labels[i].m_Guid == labelGuid);
+        CHECK(model.m_Labels[i].m_Name == labelName);
 
-        BOOST_CHECK(model.m_Relationships[i].m_RelationshipType ==
+        CHECK(model.m_Relationships[i].m_RelationshipType ==
             arm::pipe::ITimelineDecoder::RelationshipType::DataLink);
-        BOOST_CHECK(model.m_Relationships[i].m_Guid == relationshipGuid);
-        BOOST_CHECK(model.m_Relationships[i].m_HeadGuid == headGuid);
-        BOOST_CHECK(model.m_Relationships[i].m_TailGuid == tailGuid);
+        CHECK(model.m_Relationships[i].m_Guid == relationshipGuid);
+        CHECK(model.m_Relationships[i].m_HeadGuid == headGuid);
+        CHECK(model.m_Relationships[i].m_TailGuid == tailGuid);
     }
 }
 
-BOOST_AUTO_TEST_CASE(TimelineCaptureTestMultipleStringsInBuffer)
+TEST_CASE("TimelineCaptureTestMultipleStringsInBuffer")
 {
     armnn::profiling::BufferManager               bufferManager(50);
     armnn::profiling::TimelinePacketWriterFactory timelinePacketWriterFactory(bufferManager);
@@ -278,11 +277,11 @@
         armnn::profiling::ThreadIdSize);
 
     using Status = arm::pipe::TimelineDecoder::TimelineStatus;
-    BOOST_CHECK(timelineDecoder.SetEntityCallback(PushEntity) == Status::TimelineStatus_Success);
-    BOOST_CHECK(timelineDecoder.SetEventClassCallback(PushEventClass) == Status::TimelineStatus_Success);
-    BOOST_CHECK(timelineDecoder.SetEventCallback(PushEvent) == Status::TimelineStatus_Success);
-    BOOST_CHECK(timelineDecoder.SetLabelCallback(PushLabel) == Status::TimelineStatus_Success);
-    BOOST_CHECK(timelineDecoder.SetRelationshipCallback(PushRelationship) == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetEntityCallback(PushEntity) == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetEventClassCallback(PushEventClass) == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetEventCallback(PushEvent) == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetLabelCallback(PushLabel) == Status::TimelineStatus_Success);
+    CHECK(timelineDecoder.SetRelationshipCallback(PushRelationship) == Status::TimelineStatus_Success);
 
     const uint64_t entityGuid         = 111111u;
     const uint64_t eventClassGuid     = 22222u;
@@ -346,28 +345,28 @@
 
     for ( unsigned long i = 0; i < 9; ++i )
     {
-        BOOST_CHECK(model.m_Entities[i].m_Guid == entityGuid);
+        CHECK(model.m_Entities[i].m_Guid == entityGuid);
 
-        BOOST_CHECK(model.m_EventClasses[i].m_Guid == eventClassGuid);
+        CHECK(model.m_EventClasses[i].m_Guid == eventClassGuid);
 
-        BOOST_CHECK(model.m_Labels[i].m_Guid == labelGuid);
+        CHECK(model.m_Labels[i].m_Guid == labelGuid);
 
-        BOOST_CHECK(model.m_Events[i].m_TimeStamp == timestamp);
-        BOOST_CHECK(model.m_Events[i].m_ThreadId == uint64ThreadId);
-        BOOST_CHECK(model.m_Events[i].m_Guid == eventGuid);
+        CHECK(model.m_Events[i].m_TimeStamp == timestamp);
+        CHECK(model.m_Events[i].m_ThreadId == uint64ThreadId);
+        CHECK(model.m_Events[i].m_Guid == eventGuid);
 
-        BOOST_CHECK(model.m_Relationships[i].m_RelationshipType ==
+        CHECK(model.m_Relationships[i].m_RelationshipType ==
             arm::pipe::ITimelineDecoder::RelationshipType::DataLink);
-        BOOST_CHECK(model.m_Relationships[i].m_Guid == relationshipGuid);
-        BOOST_CHECK(model.m_Relationships[i].m_HeadGuid == headGuid);
-        BOOST_CHECK(model.m_Relationships[i].m_TailGuid == tailGuid);
+        CHECK(model.m_Relationships[i].m_Guid == relationshipGuid);
+        CHECK(model.m_Relationships[i].m_HeadGuid == headGuid);
+        CHECK(model.m_Relationships[i].m_TailGuid == tailGuid);
     }
     for ( unsigned long i = 0; i < 9; i += 3 )
     {
-        BOOST_CHECK(model.m_Labels[i].m_Name == labelName);
-        BOOST_CHECK(model.m_Labels[i+1].m_Name == labelName2);
-        BOOST_CHECK(model.m_Labels[i+2].m_Name == labelName3);
+        CHECK(model.m_Labels[i].m_Name == labelName);
+        CHECK(model.m_Labels[i+1].m_Name == labelName2);
+        CHECK(model.m_Labels[i+2].m_Name == labelName3);
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ConstTensorLayerVisitor.cpp b/src/armnn/test/ConstTensorLayerVisitor.cpp
index f3485c7..baafcf4 100644
--- a/src/armnn/test/ConstTensorLayerVisitor.cpp
+++ b/src/armnn/test/ConstTensorLayerVisitor.cpp
@@ -6,56 +6,56 @@
 #include "ConstTensorLayerVisitor.hpp"
 #include "Network.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace armnn
 {
 
 void TestConvolution2dLayerVisitor::CheckDescriptor(const Convolution2dDescriptor &convolution2dDescriptor)
 {
-    BOOST_CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
-    BOOST_CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
-    BOOST_CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
-    BOOST_CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
-    BOOST_CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
-    BOOST_CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
-    BOOST_CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
-    BOOST_CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
+    CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
+    CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
+    CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
+    CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
+    CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
+    CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
+    CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
+    CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
 }
 
 void TestDepthwiseConvolution2dLayerVisitor::CheckDescriptor(
         const DepthwiseConvolution2dDescriptor& convolution2dDescriptor)
 {
-    BOOST_CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
-    BOOST_CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
-    BOOST_CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
-    BOOST_CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
-    BOOST_CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
-    BOOST_CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
-    BOOST_CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
-    BOOST_CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
+    CHECK(m_Descriptor.m_PadLeft == convolution2dDescriptor.m_PadLeft);
+    CHECK(m_Descriptor.m_PadRight == convolution2dDescriptor.m_PadRight);
+    CHECK(m_Descriptor.m_PadTop == convolution2dDescriptor.m_PadTop);
+    CHECK(m_Descriptor.m_PadBottom == convolution2dDescriptor.m_PadBottom);
+    CHECK(m_Descriptor.m_StrideX == convolution2dDescriptor.m_StrideX);
+    CHECK(m_Descriptor.m_StrideY == convolution2dDescriptor.m_StrideY);
+    CHECK(m_Descriptor.m_BiasEnabled == convolution2dDescriptor.m_BiasEnabled);
+    CHECK(m_Descriptor.m_DataLayout == convolution2dDescriptor.m_DataLayout);
 }
 
 void TestFullyConnectedLayerVistor::CheckDescriptor(const FullyConnectedDescriptor& descriptor)
 {
-    BOOST_CHECK(m_Descriptor.m_BiasEnabled == descriptor.m_BiasEnabled);
-    BOOST_CHECK(m_Descriptor.m_TransposeWeightMatrix == descriptor.m_TransposeWeightMatrix);
+    CHECK(m_Descriptor.m_BiasEnabled == descriptor.m_BiasEnabled);
+    CHECK(m_Descriptor.m_TransposeWeightMatrix == descriptor.m_TransposeWeightMatrix);
 }
 
 void TestBatchNormalizationLayerVisitor::CheckDescriptor(const BatchNormalizationDescriptor& descriptor)
 {
-    BOOST_CHECK(m_Descriptor.m_Eps == descriptor.m_Eps);
-    BOOST_CHECK(m_Descriptor.m_DataLayout == descriptor.m_DataLayout);
+    CHECK(m_Descriptor.m_Eps == descriptor.m_Eps);
+    CHECK(m_Descriptor.m_DataLayout == descriptor.m_DataLayout);
 }
 
 void TestLstmLayerVisitor::CheckDescriptor(const LstmDescriptor& descriptor)
 {
-    BOOST_CHECK(m_Descriptor.m_ActivationFunc == descriptor.m_ActivationFunc);
-    BOOST_CHECK(m_Descriptor.m_ClippingThresCell == descriptor.m_ClippingThresCell);
-    BOOST_CHECK(m_Descriptor.m_ClippingThresProj == descriptor.m_ClippingThresProj);
-    BOOST_CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
-    BOOST_CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
-    BOOST_CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
+    CHECK(m_Descriptor.m_ActivationFunc == descriptor.m_ActivationFunc);
+    CHECK(m_Descriptor.m_ClippingThresCell == descriptor.m_ClippingThresCell);
+    CHECK(m_Descriptor.m_ClippingThresProj == descriptor.m_ClippingThresProj);
+    CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
+    CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
+    CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
 }
 
 void TestLstmLayerVisitor::CheckConstTensorPtrs(const std::string& name,
@@ -64,11 +64,11 @@
 {
     if (expected == nullptr)
     {
-        BOOST_CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+        CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
     }
     else
     {
-        BOOST_CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+        CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
         if (actual != nullptr)
         {
             CheckConstTensors(*expected, *actual);
@@ -113,11 +113,11 @@
 {
     if (expected == nullptr)
     {
-        BOOST_CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+        CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
     }
     else
     {
-        BOOST_CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+        CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
         if (actual != nullptr)
         {
             CheckConstTensors(*expected, *actual);
@@ -127,11 +127,11 @@
 
 void TestQLstmLayerVisitor::CheckDescriptor(const QLstmDescriptor& descriptor)
 {
-    BOOST_CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
-    BOOST_CHECK(m_Descriptor.m_ProjectionClip == descriptor.m_ProjectionClip);
-    BOOST_CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
-    BOOST_CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
-    BOOST_CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
+    CHECK(m_Descriptor.m_CellClip == descriptor.m_CellClip);
+    CHECK(m_Descriptor.m_ProjectionClip == descriptor.m_ProjectionClip);
+    CHECK(m_Descriptor.m_CifgEnabled == descriptor.m_CifgEnabled);
+    CHECK(m_Descriptor.m_PeepholeEnabled == descriptor.m_PeepholeEnabled);
+    CHECK(m_Descriptor.m_ProjectionEnabled == descriptor.m_ProjectionEnabled);
 }
 
 void TestQLstmLayerVisitor::CheckInputParameters(const LstmInputParams& inputParams)
@@ -211,11 +211,11 @@
 {
     if (expected == nullptr)
     {
-        BOOST_CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
+        CHECK_MESSAGE(actual == nullptr, name + " actual should have been a nullptr");
     }
     else
     {
-        BOOST_CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
+        CHECK_MESSAGE(actual != nullptr, name + " actual should have been set");
         if (actual != nullptr)
         {
             CheckConstTensors(*expected, *actual);
@@ -263,9 +263,9 @@
     CheckConstTensorPtrs("OutputGateBias", m_InputParams.m_OutputGateBias, inputParams.m_OutputGateBias);
 }
 
-BOOST_AUTO_TEST_SUITE(TestConstTensorLayerVisitor)
-
-BOOST_AUTO_TEST_CASE(CheckConvolution2dLayer)
+TEST_SUITE("TestConstTensorLayerVisitor")
+{
+TEST_CASE("CheckConvolution2dLayer")
 {
     Convolution2dDescriptor descriptor;
     descriptor.m_PadLeft = 2;
@@ -288,7 +288,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayer)
+TEST_CASE("CheckNamedConvolution2dLayer")
 {
     const char* layerName = "Convolution2dLayer";
     Convolution2dDescriptor descriptor;
@@ -312,7 +312,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckConvolution2dLayerWithBiases)
+TEST_CASE("CheckConvolution2dLayerWithBiases")
 {
     Convolution2dDescriptor descriptor;
     descriptor.m_PadLeft = 2;
@@ -341,7 +341,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedConvolution2dLayerWithBiases)
+TEST_CASE("CheckNamedConvolution2dLayerWithBiases")
 {
     const char* layerName = "Convolution2dLayer";
     Convolution2dDescriptor descriptor;
@@ -371,7 +371,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayer)
+TEST_CASE("CheckDepthwiseConvolution2dLayer")
 {
     DepthwiseConvolution2dDescriptor descriptor;
     descriptor.m_PadLeft = 2;
@@ -394,7 +394,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayer)
+TEST_CASE("CheckNamedDepthwiseConvolution2dLayer")
 {
     const char* layerName = "DepthwiseConvolution2dLayer";
     DepthwiseConvolution2dDescriptor descriptor;
@@ -421,7 +421,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckDepthwiseConvolution2dLayerWithBiases)
+TEST_CASE("CheckDepthwiseConvolution2dLayerWithBiases")
 {
     DepthwiseConvolution2dDescriptor descriptor;
     descriptor.m_PadLeft = 2;
@@ -450,7 +450,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedDepthwiseConvolution2dLayerWithBiases)
+TEST_CASE("CheckNamedDepthwiseConvolution2dLayerWithBiases")
 {
     const char* layerName = "DepthwiseConvolution2dLayer";
     DepthwiseConvolution2dDescriptor descriptor;
@@ -480,7 +480,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayer)
+TEST_CASE("CheckFullyConnectedLayer")
 {
     FullyConnectedDescriptor descriptor;
     descriptor.m_TransposeWeightMatrix = true;
@@ -497,7 +497,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayer)
+TEST_CASE("CheckNamedFullyConnectedLayer")
 {
     const char* layerName = "FullyConnectedLayer";
     FullyConnectedDescriptor descriptor;
@@ -515,7 +515,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckFullyConnectedLayerWithBiases)
+TEST_CASE("CheckFullyConnectedLayerWithBiases")
 {
     FullyConnectedDescriptor descriptor;
     descriptor.m_TransposeWeightMatrix = true;
@@ -538,7 +538,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedFullyConnectedLayerWithBiases)
+TEST_CASE("CheckNamedFullyConnectedLayerWithBiases")
 {
     const char* layerName = "FullyConnectedLayer";
     FullyConnectedDescriptor descriptor;
@@ -562,7 +562,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckBatchNormalizationLayer)
+TEST_CASE("CheckBatchNormalizationLayer")
 {
     BatchNormalizationDescriptor descriptor;
     descriptor.m_Eps = 0.0002f;
@@ -592,7 +592,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedBatchNormalizationLayer)
+TEST_CASE("CheckNamedBatchNormalizationLayer")
 {
     const char* layerName = "BatchNormalizationLayer";
     BatchNormalizationDescriptor descriptor;
@@ -624,7 +624,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckConstLayer)
+TEST_CASE("CheckConstLayer")
 {
     std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
     std::vector<unsigned int> dimensions = {1, 1, 3, 3};
@@ -638,7 +638,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedConstLayer)
+TEST_CASE("CheckNamedConstLayer")
 {
     const char* layerName = "ConstantLayer";
     std::vector<float> data = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0};
@@ -653,7 +653,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckLstmLayerBasic)
+TEST_CASE("CheckLstmLayerBasic")
 {
     LstmDescriptor descriptor;
     descriptor.m_ActivationFunc = 3;
@@ -725,7 +725,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerBasic)
+TEST_CASE("CheckNamedLstmLayerBasic")
 {
     const char* layerName = "LstmLayer";
     LstmDescriptor descriptor;
@@ -798,7 +798,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckLstmLayerCifgDisabled)
+TEST_CASE("CheckLstmLayerCifgDisabled")
 {
     LstmDescriptor descriptor;
     descriptor.m_ActivationFunc = 3;
@@ -889,7 +889,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerCifgDisabled)
+TEST_CASE("CheckNamedLstmLayerCifgDisabled")
 {
     const char* layerName = "LstmLayer";
     LstmDescriptor descriptor;
@@ -982,7 +982,7 @@
 }
 
 // TODO add one with peephole
-BOOST_AUTO_TEST_CASE(CheckLstmLayerPeephole)
+TEST_CASE("CheckLstmLayerPeephole")
 {
     LstmDescriptor descriptor;
     descriptor.m_ActivationFunc = 3;
@@ -1068,7 +1068,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckLstmLayerPeepholeCifgDisabled)
+TEST_CASE("CheckLstmLayerPeepholeCifgDisabled")
 {
     LstmDescriptor descriptor;
     descriptor.m_ActivationFunc = 3;
@@ -1182,7 +1182,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerPeephole)
+TEST_CASE("CheckNamedLstmLayerPeephole")
 {
     const char* layerName = "LstmLayer";
     LstmDescriptor descriptor;
@@ -1270,7 +1270,7 @@
 }
 
 // TODO add one with projection
-BOOST_AUTO_TEST_CASE(CheckLstmLayerProjection)
+TEST_CASE("CheckLstmLayerProjection")
 {
     LstmDescriptor descriptor;
     descriptor.m_ActivationFunc = 3;
@@ -1356,7 +1356,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedLstmLayerProjection)
+TEST_CASE("CheckNamedLstmLayerProjection")
 {
     const char* layerName = "LstmLayer";
     LstmDescriptor descriptor;
@@ -1443,7 +1443,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerBasic)
+TEST_CASE("CheckQLstmLayerBasic")
 {
     QLstmDescriptor descriptor;
     descriptor.m_ProjectionClip = 0.5f;
@@ -1515,7 +1515,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedQLstmLayerBasic)
+TEST_CASE("CheckNamedQLstmLayerBasic")
 {
     const char* layerName = "QLstmLayer";
     QLstmDescriptor descriptor;
@@ -1588,7 +1588,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabled)
+TEST_CASE("CheckQLstmLayerCifgDisabled")
 {
     QLstmDescriptor descriptor;
     descriptor.m_ProjectionClip = 0.5f;
@@ -1683,7 +1683,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledPeepholeEnabled)
+TEST_CASE("CheckQLstmLayerCifgDisabledPeepholeEnabled")
 {
     QLstmDescriptor descriptor;
     descriptor.m_ProjectionClip = 0.5f;
@@ -1800,7 +1800,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgEnabledPeepholeEnabled)
+TEST_CASE("CheckQLstmLayerCifgEnabledPeepholeEnabled")
 {
     QLstmDescriptor descriptor;
     descriptor.m_ProjectionClip = 0.5f;
@@ -1890,7 +1890,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerProjectionEnabled)
+TEST_CASE("CheckQLstmLayerProjectionEnabled")
 {
     QLstmDescriptor descriptor;
     descriptor.m_ProjectionClip = 0.5f;
@@ -1980,7 +1980,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckQLstmLayerCifgDisabledLayerNormEnabled)
+TEST_CASE("CheckQLstmLayerCifgDisabledLayerNormEnabled")
 {
     QLstmDescriptor descriptor;
     descriptor.m_ProjectionClip = 0.5f;
@@ -2104,7 +2104,7 @@
 }
 
 
-BOOST_AUTO_TEST_CASE(CheckQuantizedLstmLayer)
+TEST_CASE("CheckQuantizedLstmLayer")
 {
     std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
     std::vector<unsigned int> inputToInputWeightsDimensions = {1, 1, 3, 3};
@@ -2193,7 +2193,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckNamedQuantizedLstmLayer)
+TEST_CASE("CheckNamedQuantizedLstmLayer")
 {
     const char* layerName = "LstmLayer";
     std::vector<uint8_t> inputToInputWeightsData = {1, 2, 3, 4, 5, 6, 7, 8, 9};
@@ -2283,6 +2283,6 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
 } // namespace armnn
diff --git a/src/armnn/test/CreateWorkload.hpp b/src/armnn/test/CreateWorkload.hpp
index 12623e6..581c621 100644
--- a/src/armnn/test/CreateWorkload.hpp
+++ b/src/armnn/test/CreateWorkload.hpp
@@ -11,6 +11,7 @@
 #include <ResolveType.hpp>
 
 #include <armnnUtils/DataLayoutIndexed.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
@@ -18,7 +19,7 @@
 #include <backendsCommon/WorkloadData.hpp>
 #include <backendsCommon/WorkloadFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <utility>
 
@@ -36,11 +37,11 @@
                                                const ModelOptions& modelOptions = {})
 {
     std::unique_ptr<IWorkload> workload = layer.CreateWorkload(factory);
-    BOOST_TEST(workload.get() == PolymorphicDowncast<Workload*>(workload.get()),
+    CHECK_MESSAGE(workload.get() == PolymorphicDowncast<Workload*>(workload.get()),
                "Cannot convert to derived class");
     std::string reasonIfUnsupported;
     layer.SetBackendId(factory.GetBackendId());
-    BOOST_TEST(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported, modelOptions));
+    CHECK(factory.IsLayerSupported(layer, layer.GetDataType(), reasonIfUnsupported, modelOptions));
     return std::unique_ptr<Workload>(static_cast<Workload*>(workload.release()));
 }
 
@@ -90,11 +91,11 @@
     auto workload = MakeAndCheckWorkload<ActivationWorkload>(*layer, factory);
 
     ActivationQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_A == 3.5f);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_B == -10.0f);
-    BOOST_TEST((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Parameters.m_A == 3.5f);
+    CHECK(queueDescriptor.m_Parameters.m_B == -10.0f);
+    CHECK((queueDescriptor.m_Parameters.m_Function == ActivationFunction::Abs));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -126,8 +127,8 @@
     auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
 
     DescriptorType queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 2);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -165,9 +166,9 @@
     std::shared_ptr<ActivationDescriptor>
         activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
 
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
@@ -179,14 +180,14 @@
     const ActivationDescriptor* queueDescBlobPtr =
         queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
     IgnoreUnused(queueDescBlobPtr);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 2);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     return workload;
 }
@@ -223,9 +224,9 @@
     std::shared_ptr<ActivationDescriptor>
         activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
 
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
@@ -233,14 +234,14 @@
     auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
 
     DescriptorType queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 2);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
     const ActivationDescriptor* queueDescBlobPtr =
         queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
     IgnoreUnused(queueDescBlobPtr);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
@@ -279,9 +280,9 @@
     std::shared_ptr<ActivationDescriptor>
         activationDescPtr = layer->template GetAdditionalInformation<ActivationDescriptor>();
 
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
@@ -292,11 +293,11 @@
     const ActivationDescriptor* queueDescBlobPtr =
         queueDescriptor.template GetAdditionalInformation<ActivationDescriptor>();
     IgnoreUnused(queueDescBlobPtr);
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    CHECK(queueDescriptor.m_Inputs.size() == 2);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
@@ -324,8 +325,8 @@
     auto workload = MakeAndCheckWorkload<WorkloadType>(*layer, factory);
     DescriptorType queueDescriptor = workload->GetData();
 
-    BOOST_TEST(queueDescriptor.m_Inputs.size()  == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size()  == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     return workload;
 }
@@ -375,14 +376,14 @@
     // Makes the workload and checks it.
     auto workload = MakeAndCheckWorkload<BatchNormalizationWorkloadType>(*layer, factory);
     BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f);
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
-    BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
-    BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
-    BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
+    CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
+    CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
+    CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
+    CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -429,9 +430,9 @@
 
     // Check that the additional information can be queried from the layer
     std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
@@ -450,20 +451,20 @@
     BatchNormalizationQueueDescriptor queueDescriptor = workload->GetData();
     const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
     IgnoreUnused(queueDescBlobPtr);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
-    BOOST_TEST(queueDescriptor.m_Parameters.m_Eps == 0.05f);
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
-    BOOST_TEST((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
-    BOOST_TEST((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
-    BOOST_TEST((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    CHECK(queueDescriptor.m_Parameters.m_Eps == 0.05f);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Mean->GetTensorInfo() == TensorInfo({3}, DataType)));
+    CHECK((queueDescriptor.m_Variance->GetTensorInfo() == TensorInfo({3}, DataType)));
+    CHECK((queueDescriptor.m_Gamma->GetTensorInfo() == TensorInfo({3}, DataType)));
+    CHECK((queueDescriptor.m_Beta->GetTensorInfo() == TensorInfo({3}, DataType)));
+    CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -511,19 +512,19 @@
     auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
 
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled);
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
+    CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
+    CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
+    CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
+    CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+    CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
+    CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
-    BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() ==
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+    CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
         TensorInfo({2}, GetBiasDataType(DataType))));
 
     // Returns so we can do extra, backend-specific tests.
@@ -571,9 +572,9 @@
     // Check that the additional information can be queried from the layer
     std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
 
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(activationDescPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
@@ -592,25 +593,25 @@
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
     const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
     IgnoreUnused(queueDescBlobPtr);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 4);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 3);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 3);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled);
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
-    BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() ==
+    CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
+    CHECK(queueDescriptor.m_Parameters.m_StrideY == 4);
+    CHECK(queueDescriptor.m_Parameters.m_PadLeft == 3);
+    CHECK(queueDescriptor.m_Parameters.m_PadRight == 3);
+    CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+    CHECK(queueDescriptor.m_Parameters.m_BiasEnabled);
+    CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+    CHECK((queueDescriptor.m_Bias->GetTensorInfo() ==
         TensorInfo({2}, GetBiasDataType(DataType))));
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -658,17 +659,17 @@
     auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory, modelOptions);
 
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 0);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 0);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 0);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 0);
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
+    CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadLeft == 0);
+    CHECK(queueDescriptor.m_Parameters.m_PadRight == 0);
+    CHECK(queueDescriptor.m_Parameters.m_PadTop == 0);
+    CHECK(queueDescriptor.m_Parameters.m_PadBottom == 0);
+    CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo(weightShape, DataType)));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -760,17 +761,17 @@
     // make the workload and check it
     auto workload = MakeAndCheckWorkload<LstmWorkload>(*layer, factory);
     LstmQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 3);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 4);
+    CHECK(queueDescriptor.m_Parameters.m_ActivationFunc == 4);
+    CHECK(queueDescriptor.m_Parameters.m_ClippingThresCell == 0.0f);
+    CHECK(queueDescriptor.m_Parameters.m_ClippingThresProj == 0.0f);
+    CHECK(queueDescriptor.m_Inputs.size() == 3);
+    CHECK(queueDescriptor.m_Outputs.size() == 4);
 
-    BOOST_TEST((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
+    CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == TensorInfo({ numUnits, inputSize },
                                                                                      DataType::Float32)));
-    BOOST_TEST((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
+    CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == TensorInfo({ numUnits },
                                                                                      DataType::Float32)));
-    BOOST_TEST((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
+    CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == TensorInfo({ numUnits }, DataType::Float32)));
     return workload;
 }
 
@@ -891,24 +892,24 @@
     QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
 
     // Validate input/output sizes
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 3);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 2);
+    CHECK(queueDescriptor.m_Inputs.size() == 3);
+    CHECK(queueDescriptor.m_Outputs.size() == 2);
 
     // Validate weight tensor info
-    BOOST_TEST((queueDescriptor.m_InputToInputWeights->GetTensorInfo() == inputWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
+    CHECK((queueDescriptor.m_InputToInputWeights->GetTensorInfo() == inputWeightsInfo));
+    CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
+    CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
+    CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
 
-    BOOST_TEST((queueDescriptor.m_RecurrentToInputWeights->GetTensorInfo() == recurrentWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
+    CHECK((queueDescriptor.m_RecurrentToInputWeights->GetTensorInfo() == recurrentWeightsInfo));
+    CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
+    CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
+    CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
 
-    BOOST_TEST((queueDescriptor.m_InputGateBias->GetTensorInfo() == biasInfo));
-    BOOST_TEST((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
-    BOOST_TEST((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
-    BOOST_TEST((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
+    CHECK((queueDescriptor.m_InputGateBias->GetTensorInfo() == biasInfo));
+    CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
+    CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
+    CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
 
     return workload;
 }
@@ -1054,22 +1055,22 @@
     // Create and check workload
     auto workload = MakeAndCheckWorkload<QLstmWorkload>(*layer, factory);
     QLstmQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Parameters.m_CellClip == 0.0f);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_ProjectionClip == 0.0f);
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 3);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 3);
+    CHECK(queueDescriptor.m_Parameters.m_CellClip == 0.0f);
+    CHECK(queueDescriptor.m_Parameters.m_ProjectionClip == 0.0f);
+    CHECK(queueDescriptor.m_Inputs.size() == 3);
+    CHECK(queueDescriptor.m_Outputs.size() == 3);
 
-    BOOST_TEST((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
+    CHECK((queueDescriptor.m_InputToForgetWeights->GetTensorInfo() == inputWeightsInfo));
+    CHECK((queueDescriptor.m_InputToCellWeights->GetTensorInfo() == inputWeightsInfo));
+    CHECK((queueDescriptor.m_InputToOutputWeights->GetTensorInfo() == inputWeightsInfo));
 
-    BOOST_TEST((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
-    BOOST_TEST((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
+    CHECK((queueDescriptor.m_RecurrentToForgetWeights->GetTensorInfo() == recurrentWeightsInfo));
+    CHECK((queueDescriptor.m_RecurrentToCellWeights->GetTensorInfo() == recurrentWeightsInfo));
+    CHECK((queueDescriptor.m_RecurrentToOutputWeights->GetTensorInfo() == recurrentWeightsInfo));
 
-    BOOST_TEST((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
-    BOOST_TEST((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
-    BOOST_TEST((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
+    CHECK((queueDescriptor.m_ForgetGateBias->GetTensorInfo() == biasInfo));
+    CHECK((queueDescriptor.m_CellBias->GetTensorInfo() == biasInfo));
+    CHECK((queueDescriptor.m_OutputGateBias->GetTensorInfo() == biasInfo));
 
     return workload;
 }
@@ -1112,19 +1113,19 @@
     auto workload = MakeAndCheckWorkload<Convolution2dWorkload>(*layer, factory);
 
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+    CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
+    CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadRight == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+    CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
 
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({2, 3, 3, 3},
         DataType, inputsQScale)));
-    BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo()
+    CHECK((queueDescriptor.m_Bias->GetTensorInfo()
                 == TensorInfo({2},  GetBiasDataType(DataType), inputsQScale)));
 
     // Returns so we can do extra, backend-specific tests.
@@ -1169,18 +1170,18 @@
     auto workload = MakeAndCheckWorkload<DepthwiseConvolution2dFloat32Workload>(*layer, factory);
 
     DepthwiseConvolution2dQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 2);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 2);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == false);
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    CHECK(queueDescriptor.m_Parameters.m_StrideX == 1);
+    CHECK(queueDescriptor.m_Parameters.m_StrideY == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadLeft == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
+    CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadBottom == 2);
+    CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == false);
+    CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 2, 4, 4}, DataType)));
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({1, 2, 4, 4}, DataType)));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1218,13 +1219,13 @@
     auto workload = MakeAndCheckWorkload<FullyConnectedWorkload>(*layer, factory);
 
     FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
+    CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+    CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
 
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
-    BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
+    CHECK((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1259,9 +1260,9 @@
 
     // Check that the additional information can be queried from the layer
     std::shared_ptr<ActivationDescriptor> activationDescPtr = layer->GetAdditionalInformation<ActivationDescriptor>();
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
-    BOOST_ASSERT(static_cast<ActivationFunction>(activationDescPtr->m_Function) ==
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(activationDescPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(static_cast<ActivationFunction>(activationDescPtr->m_Function) ==
         armnn::ActivationFunction::BoundedReLu);
 
     // Creates extra layers.
@@ -1281,18 +1282,18 @@
     const ActivationDescriptor* queueDescBlobPtr = queueDescriptor.GetAdditionalInformation<ActivationDescriptor>();
     IgnoreUnused(queueDescBlobPtr);
 
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
-    BOOST_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
-    BOOST_ASSERT(
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_A) == 10.0f);
+    ARMNN_ASSERT(static_cast<float>(queueDescBlobPtr->m_B) == 5.0f);
+    ARMNN_ASSERT(
         static_cast<ActivationFunction>(queueDescBlobPtr->m_Function) == armnn::ActivationFunction::BoundedReLu
     );
 
-    BOOST_TEST(queueDescriptor.m_Parameters.m_BiasEnabled == true);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_TEST((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
-    BOOST_TEST((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
+    CHECK(queueDescriptor.m_Parameters.m_BiasEnabled == true);
+    CHECK(queueDescriptor.m_Parameters.m_TransposeWeightMatrix == true);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Weight->GetTensorInfo() == TensorInfo({7, 20}, DataType, inputsQScale)));
+    CHECK((queueDescriptor.m_Bias->GetTensorInfo() == TensorInfo({7}, GetBiasDataType(DataType), inputsQScale)));
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1336,16 +1337,16 @@
     auto workload = MakeAndCheckWorkload<NormalizationWorkload>(*layer, factory);
 
     NormalizationQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
-    BOOST_TEST((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness));
-    BOOST_TEST(queueDescriptor.m_Parameters.m_NormSize == 3);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_Alpha == 0.5f);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_Beta == -1.0f);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_K == 0.2f);
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    CHECK((queueDescriptor.m_Parameters.m_NormChannelType == NormalizationAlgorithmChannel::Across));
+    CHECK((queueDescriptor.m_Parameters.m_NormMethodType == NormalizationAlgorithmMethod::LocalBrightness));
+    CHECK(queueDescriptor.m_Parameters.m_NormSize == 3);
+    CHECK(queueDescriptor.m_Parameters.m_Alpha == 0.5f);
+    CHECK(queueDescriptor.m_Parameters.m_Beta == -1.0f);
+    CHECK(queueDescriptor.m_Parameters.m_K == 0.2f);
+    CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1388,20 +1389,20 @@
     auto workload = MakeAndCheckWorkload<Pooling2dWorkload>(*layer, factory);
 
     Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
-    BOOST_TEST((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor));
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PoolWidth == 3);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PoolHeight == 3);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideX == 2);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_StrideY == 3);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadLeft == 2);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadRight == 2);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadTop == 1);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_PadBottom == 1);
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    CHECK((queueDescriptor.m_Parameters.m_PoolType == PoolingAlgorithm::Average));
+    CHECK((queueDescriptor.m_Parameters.m_OutputShapeRounding == OutputShapeRounding::Floor));
+    CHECK(queueDescriptor.m_Parameters.m_PoolWidth == 3);
+    CHECK(queueDescriptor.m_Parameters.m_PoolHeight == 3);
+    CHECK(queueDescriptor.m_Parameters.m_StrideX == 2);
+    CHECK(queueDescriptor.m_Parameters.m_StrideY == 3);
+    CHECK(queueDescriptor.m_Parameters.m_PadLeft == 2);
+    CHECK(queueDescriptor.m_Parameters.m_PadRight == 2);
+    CHECK(queueDescriptor.m_Parameters.m_PadTop == 1);
+    CHECK(queueDescriptor.m_Parameters.m_PadBottom == 1);
+    CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
 
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Return so we can do extra, backend-specific tests
     return workload;
@@ -1445,8 +1446,8 @@
     auto workload = MakeAndCheckWorkload<SoftmaxWorkload>(*layer, factory);
 
     SoftmaxQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Return so we can do extra, backend-specific tests.
     return workload;
@@ -1494,19 +1495,19 @@
     auto workload = MakeAndCheckWorkload<SplitterWorkload>(*layer, factory);
 
     SplitterQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 3);
-    BOOST_TEST(queueDescriptor.m_ViewOrigins.size() == 3);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 3);
+    CHECK(queueDescriptor.m_ViewOrigins.size() == 3);
 
-    BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0);
-    BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1);
-    BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3);
-    BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0);
-    BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0);
-    BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0);
-    BOOST_TEST(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0);
-    BOOST_TEST(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
-    BOOST_TEST(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
+    CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[0] == 0);
+    CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[0] == 1);
+    CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[0] == 3);
+    CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[1] == 0);
+    CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[1] == 0);
+    CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[1] == 0);
+    CHECK(queueDescriptor.m_ViewOrigins[0].m_Origin[2] == 0);
+    CHECK(queueDescriptor.m_ViewOrigins[1].m_Origin[2] == 0);
+    CHECK(queueDescriptor.m_ViewOrigins[2].m_Origin[2] == 0);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1536,8 +1537,9 @@
     splitterViews.SetViewOriginCoord(1, 2, 0);
     splitterViews.SetViewOriginCoord(1, 3, 0);
 
+    // create splitter layer
     Layer* const splitter = graph.AddLayer<SplitterLayer>(splitterViews, "splitter");
-    BOOST_TEST_CHECKPOINT("created splitter layer");
+    CHECK(splitter);
 
     armnn::OriginsDescriptor concatViews(2);
     concatViews.SetViewOriginCoord(0, 0, 0);
@@ -1550,28 +1552,31 @@
     concatViews.SetViewOriginCoord(1, 2, 0);
     concatViews.SetViewOriginCoord(1, 3, 0);
 
+    // create concat layer
     Layer* const concat = graph.AddLayer<ConcatLayer>(concatViews, "concat");
-    BOOST_TEST_CHECKPOINT("created concat layer");
+    CHECK(concat);
 
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
 
     // Adds connections.
+    // connect input to splitter
     Connect(input, splitter, inputTensorInfo, 0, 0);
-    BOOST_TEST_CHECKPOINT("connect input to splitter");
+    // connect splitter[0] to concat[1]
     Connect(splitter, concat, splitTensorInfo1, 0, 1); // The splitter & concat are connected up.
-    BOOST_TEST_CHECKPOINT("connect splitter[0] to concat[1]");
+    // connect splitter[1] to concat[0]
     Connect(splitter, concat, splitTensorInfo2, 1, 0); // So that the outputs are flipped round.
-    BOOST_TEST_CHECKPOINT("connect splitter[1] to concat[0]");
+    // connect concat to output
     Connect(concat, output, inputTensorInfo, 0, 0);
-    BOOST_TEST_CHECKPOINT("connect concat to output");
 
+    // created tensor handles
     CreateTensorHandles(graph, factory);
-    BOOST_TEST_CHECKPOINT("created tensor handles");
 
+    // created splitter workload
     auto workloadSplitter = MakeAndCheckWorkload<SplitterWorkload>(*splitter, factory);
-    BOOST_TEST_CHECKPOINT("created splitter workload");
+    CHECK(workloadSplitter);
+    // created concat workload
     auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
-    BOOST_TEST_CHECKPOINT("created concat workload");
+    CHECK(workloadConcat);
 
     return {std::move(workloadSplitter), std::move(workloadConcat)};
 }
@@ -1691,9 +1696,9 @@
     auto workload = MakeAndCheckWorkload<ResizeWorkload>(*layer, factory);
 
     auto queueDescriptor = workload->GetData();
-    BOOST_CHECK(queueDescriptor.m_Inputs.size()  == 1);
-    BOOST_CHECK(queueDescriptor.m_Outputs.size() == 1);
-    BOOST_CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout);
+    CHECK(queueDescriptor.m_Inputs.size()  == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Parameters.m_DataLayout == dataLayout);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1722,8 +1727,8 @@
     auto workload = MakeAndCheckWorkload<BatchToSpaceNdWorkload>(*layer, factory);
 
     BatchToSpaceNdQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     return workload;
 }
@@ -1756,8 +1761,8 @@
     auto workload = MakeAndCheckWorkload<LogSoftmaxWorkload>(*layer, factory);
 
     LogSoftmaxQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Return so we can do extra, backend-specific tests.
     return workload;
@@ -1793,9 +1798,9 @@
     auto workload = MakeAndCheckWorkload<L2NormalizationWorkload>(*layer, factory);
 
     L2NormalizationQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK((queueDescriptor.m_Parameters.m_DataLayout == dataLayout));
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1826,8 +1831,8 @@
     auto workload = MakeAndCheckWorkload<ReshapeWorkload>(*layer, factory);
 
     ReshapeQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1855,8 +1860,8 @@
     auto workload = MakeAndCheckWorkload<ConvertFp16ToFp32Float32Workload>(*layer, factory);
 
     ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1884,8 +1889,8 @@
     auto workload = MakeAndCheckWorkload<ConvertFp32ToFp16Float16Workload>(*layer, factory);
 
     ConvertFp32ToFp16QueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1915,10 +1920,10 @@
     auto workload = MakeAndCheckWorkload<MeanWorkload>(*layer, factory);
 
     MeanQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis);
-    BOOST_TEST(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims);
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Parameters.m_Axis == descriptor.m_Axis);
+    CHECK(queueDescriptor.m_Parameters.m_KeepDims == descriptor.m_KeepDims);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -1944,24 +1949,26 @@
                                                   inputShapes.end(),
                                                   concatAxis);
 
+    // create concat layer
     Layer* const concat = graph.AddLayer<ConcatLayer>(descriptor, "concat");
-    BOOST_TEST_CHECKPOINT("created concat layer");
+    CHECK(concat);
 
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
 
     // Adds connections.
+    // connect input0 to concat
     Connect(input0, concat, inputTensorInfo, 0, 0);
-    BOOST_TEST_CHECKPOINT("connect input0 to concat");
+    // connect input1 to concat
     Connect(input1, concat, inputTensorInfo, 0, 1);
-    BOOST_TEST_CHECKPOINT("connect input1 to concat");
+    // connect concat to output
     Connect(concat, output, outputTensorInfo, 0, 0);
-    BOOST_TEST_CHECKPOINT("connect concat to output");
 
+    // create tensor handles
     CreateTensorHandles(graph, factory);
-    BOOST_TEST_CHECKPOINT("created tensor handles");
 
+    // create concat workload
     auto workloadConcat = MakeAndCheckWorkload<ConcatWorkload>(*concat, factory);
-    BOOST_TEST_CHECKPOINT("created concat workload");
+    CHECK(workloadConcat);
 
     return workloadConcat;
 }
@@ -1979,7 +1986,7 @@
 
     // Add an input layer
     armnn::IConnectableLayer* const inputLayer = net->AddInputLayer(0, "input layer");
-    BOOST_TEST(inputLayer);
+    CHECK(inputLayer);
 
     // ArmNN weights tensor shape is OIHW (out channels, in channels, height, width) for NCHW
     // ArmNN weights tensor shape is OHWI (out channels, height, width, in channels) for NHWC
@@ -2035,11 +2042,11 @@
                                               convLayerName.c_str());
     }
 
-    BOOST_TEST(convLayer);
+    CHECK(convLayer);
 
     // Add an output layer
     armnn::IConnectableLayer* const outputLayer = net->AddOutputLayer(0, "output layer");
-    BOOST_TEST(outputLayer);
+    CHECK(outputLayer);
 
     // set the tensors in the network (NHWC format)
     TensorInfo inputTensorInfo(TensorShape({ 1, 16, 16, 16 }), dataType);
@@ -2070,7 +2077,7 @@
     armnn::OptimizerOptions optimizerOptions;
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec(),
                                                                optimizerOptions);
-    BOOST_CHECK(optimizedNet != nullptr);
+    CHECK(optimizedNet != nullptr);
 
     // Find the PreCompiled layer in the optimised graph
     armnn::Graph& optimisedGraph = GetGraphForTesting(optimizedNet.get());
@@ -2082,7 +2089,7 @@
             preCompiledLayer = layer;
         }
     }
-    BOOST_CHECK(preCompiledLayer != nullptr);
+    CHECK(preCompiledLayer != nullptr);
 
     // Create the TensorHandles.
     CreateTensorHandles(optimisedGraph, factory);
@@ -2091,8 +2098,8 @@
     auto workload = MakeAndCheckWorkload<PreCompiledWorkload>(*preCompiledLayer, factory);
 
     PreCompiledQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size()  == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size()  == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Returns the workload so we can do extra, backend-specific tests.
     // NOTE: We need to return the optimised network as well, otherwise it gets
@@ -2107,21 +2114,23 @@
 {
     armnn::TensorInfo outputTensorInfo(outputShape, DataType);
 
+    // create constant layer
     auto constant = graph.AddLayer<ConstantLayer>("constant");
+    CHECK(constant);
     constant->m_LayerOutput = std::make_unique<ScopedTensorHandle>(outputTensorInfo);
-    BOOST_TEST_CHECKPOINT("created constant layer");
 
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
 
     // Adds connections.
+    // connect constant to output
     Connect(constant, output, outputTensorInfo, 0, 0);
-    BOOST_TEST_CHECKPOINT("connect constant to output");
 
+    // create tensor handles
     CreateTensorHandles(graph, factory);
-    BOOST_TEST_CHECKPOINT("created tensor handles");
 
+    // create Constant workload"
     auto workloadConstant = MakeAndCheckWorkload<ConstantWorkload>(*constant, factory);
-    BOOST_TEST_CHECKPOINT("created Constant workload");
+    CHECK(workloadConstant);
 
     return workloadConstant;
 }
@@ -2136,15 +2145,15 @@
 {
     // Creates the PReLU layer
     Layer* const layer = graph.AddLayer<PreluLayer>("prelu");
-    BOOST_CHECK(layer != nullptr);
+    CHECK(layer != nullptr);
 
     // Creates extra layers
     Layer* const input  = graph.AddLayer<InputLayer> (0, "input");
     Layer* const alpha  = graph.AddLayer<InputLayer> (1, "alpha");
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-    BOOST_CHECK(input  != nullptr);
-    BOOST_CHECK(alpha  != nullptr);
-    BOOST_CHECK(output != nullptr);
+    CHECK(input  != nullptr);
+    CHECK(alpha  != nullptr);
+    CHECK(output != nullptr);
 
     // Connects up
     armnn::TensorInfo inputTensorInfo (inputShape,  dataType);
@@ -2159,8 +2168,8 @@
     auto workload = MakeAndCheckWorkload<PreluWorkload>(*layer, factory);
 
     PreluQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 2);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 2);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     // Returns so we can do extra, backend-specific tests.
     return workload;
@@ -2191,8 +2200,8 @@
     auto workload = MakeAndCheckWorkload<SpaceToDepthWorkload>(*layer, factory);
 
     SpaceToDepthQueueDescriptor queueDescriptor = workload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == 1);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     return workload;
 }
@@ -2211,7 +2220,7 @@
     // Constructs the Stack layer.
     armnn::StackDescriptor descriptor(axis, numInputs, inputShape);
     Layer* const stackLayer = graph.AddLayer<StackLayer>(descriptor, "stack");
-    BOOST_CHECK(stackLayer != nullptr);
+    CHECK(stackLayer != nullptr);
 
     // Constructs layer inputs and output.
     std::vector<Layer*> inputs;
@@ -2221,10 +2230,10 @@
             static_cast<int>(i),
             ("input" + std::to_string(i)).c_str()
         ));
-        BOOST_CHECK(inputs[i] != nullptr);
+        CHECK(inputs[i] != nullptr);
     }
     Layer* const output = graph.AddLayer<OutputLayer>(0, "output");
-    BOOST_CHECK(output != nullptr);
+    CHECK(output != nullptr);
 
     // Adds connections.
     for (unsigned int i=0; i<numInputs; ++i)
@@ -2237,8 +2246,8 @@
 
     auto stackWorkload = MakeAndCheckWorkload<StackWorkload>(*stackLayer, factory);
     StackQueueDescriptor queueDescriptor = stackWorkload->GetData();
-    BOOST_TEST(queueDescriptor.m_Inputs.size() == numInputs);
-    BOOST_TEST(queueDescriptor.m_Outputs.size() == 1);
+    CHECK(queueDescriptor.m_Inputs.size() == numInputs);
+    CHECK(queueDescriptor.m_Outputs.size() == 1);
 
     return stackWorkload;
 }
diff --git a/src/armnn/test/DebugCallbackTest.cpp b/src/armnn/test/DebugCallbackTest.cpp
index 2ae228b..48e2c15 100644
--- a/src/armnn/test/DebugCallbackTest.cpp
+++ b/src/armnn/test/DebugCallbackTest.cpp
@@ -9,10 +9,10 @@
 #include <armnn/Types.hpp>
 #include <Runtime.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(DebugCallback)
-
+TEST_SUITE("DebugCallback")
+{
 namespace
 {
 
@@ -39,7 +39,7 @@
     return net;
 }
 
-BOOST_AUTO_TEST_CASE(RuntimeRegisterDebugCallback)
+TEST_CASE("RuntimeRegisterDebugCallback")
 {
     INetworkPtr net = CreateSimpleNetwork();
 
@@ -52,7 +52,7 @@
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
     NetworkId netId;
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
 
     // Set up callback function
     int callCount = 0;
@@ -83,17 +83,17 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Check that the callback was called twice
-    BOOST_TEST(callCount == 2);
+    CHECK(callCount == 2);
 
     // Check that tensor handles passed to callback have correct shapes
     const std::vector<TensorShape> expectedShapes({TensorShape({1, 1, 1, 5}), TensorShape({1, 1, 1, 5})});
-    BOOST_TEST(tensorShapes == expectedShapes);
+    CHECK(tensorShapes == expectedShapes);
 
     // Check that slot indexes passed to callback are correct
     const std::vector<unsigned int> expectedSlotIndexes({0, 0});
-    BOOST_TEST(slotIndexes == expectedSlotIndexes);
+    CHECK(slotIndexes == expectedSlotIndexes);
 }
 
 } // anonymous namespace
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/EndToEndTest.cpp b/src/armnn/test/EndToEndTest.cpp
index 56ff454..705258e 100644
--- a/src/armnn/test/EndToEndTest.cpp
+++ b/src/armnn/test/EndToEndTest.cpp
@@ -8,13 +8,13 @@
 #include <armnn/INetwork.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <set>
 
-BOOST_AUTO_TEST_SUITE(EndToEnd)
-
-BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork)
+TEST_SUITE("EndToEnd")
+{
+TEST_CASE("ErrorOnLoadNetwork")
 {
     using namespace armnn;
 
@@ -47,13 +47,13 @@
     try
     {
         Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptions(), errMessages);
-        BOOST_FAIL("Should have thrown an exception.");
+        FAIL("Should have thrown an exception.");
     }
     catch (const InvalidArgumentException& e)
     {
         // Different exceptions are thrown on different backends
     }
-    BOOST_CHECK(errMessages.size() > 0);
+    CHECK(errMessages.size() > 0);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ExecutionFrameTest.cpp b/src/armnn/test/ExecutionFrameTest.cpp
index c348021..59accb4 100644
--- a/src/armnn/test/ExecutionFrameTest.cpp
+++ b/src/armnn/test/ExecutionFrameTest.cpp
@@ -3,7 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <ExecutionFrame.hpp>
 
@@ -14,7 +14,9 @@
 // B
 // |
 // C
-BOOST_AUTO_TEST_CASE(NextExecutionFrameTest)
+TEST_SUITE("NextExecutionFrameTestSuite")
+{
+TEST_CASE("NextExecutionFrameTest")
 {
     armnn::ExecutionFrame executionFrameA;
     armnn::ExecutionFrame executionFrameB;
@@ -28,11 +30,12 @@
     auto nextExecutionFrameB = executionFrameB.ExecuteWorkloads(&executionFrameA);
     auto nextExecutionFrameC = executionFrameC.ExecuteWorkloads(&executionFrameB);
 
-    BOOST_CHECK_EQUAL(nextExecutionFrameA, &executionFrameB);
-    BOOST_CHECK_EQUAL(nextExecutionFrameB, &executionFrameC);
+    CHECK_EQ(nextExecutionFrameA, &executionFrameB);
+    CHECK_EQ(nextExecutionFrameB, &executionFrameC);
 
-    BOOST_CHECK(!nextExecutionFrameC);
+    CHECK(!nextExecutionFrameC);
 
-    BOOST_CHECK_NE(nextExecutionFrameA, &executionFrameC);
+    CHECK_NE(nextExecutionFrameA, &executionFrameC);
+}
 }
 
diff --git a/src/armnn/test/FloatingPointConverterTest.cpp b/src/armnn/test/FloatingPointConverterTest.cpp
index d3474de..21a16a3 100644
--- a/src/armnn/test/FloatingPointConverterTest.cpp
+++ b/src/armnn/test/FloatingPointConverterTest.cpp
@@ -8,11 +8,13 @@
 #include <BFloat16.hpp>
 #include <Half.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <vector>
 
-BOOST_AUTO_TEST_SUITE(TestFPConversion)
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_CASE(TestConvertFp32ToFp16)
+TEST_SUITE("TestFPConversion")
+{
+TEST_CASE("TestConvertFp32ToFp16")
 {
     using namespace half_float::literal;
 
@@ -27,14 +29,14 @@
     {
         armnn::Half expected(floatArray[i]);
         armnn::Half actual = convertedBuffer[i];
-        BOOST_CHECK_EQUAL(expected, actual);
+        CHECK_EQ(expected, actual);
 
         float convertedHalf = actual;
-        BOOST_CHECK_CLOSE(floatArray[i], convertedHalf, 0.07);
+        CHECK_EQ(floatArray[i], doctest::Approx(convertedHalf).epsilon(0.07));
     }
 }
 
-BOOST_AUTO_TEST_CASE(TestConvertFp16ToFp32)
+TEST_CASE("TestConvertFp16ToFp32")
 {
     using namespace half_float::literal;
 
@@ -49,11 +51,11 @@
     {
         float expected(halfArray[i]);
         float actual = convertedBuffer[i];
-        BOOST_CHECK_EQUAL(expected, actual);
+        CHECK_EQ(expected, actual);
     }
 }
 
-BOOST_AUTO_TEST_CASE(TestConvertFloat32ToBFloat16)
+TEST_CASE("TestConvertFloat32ToBFloat16")
 {
     float floatArray[] = { 1.704735E38f,   // 0x7F004000 round down
                            0.0f,           // 0x00000000 round down
@@ -102,11 +104,11 @@
     for (size_t i = 0; i < numFloats; i++)
     {
         armnn::BFloat16 actual = convertedBuffer[i];
-        BOOST_CHECK_EQUAL(expectedResult[i], actual.Val());
+        CHECK_EQ(expectedResult[i], actual.Val());
     }
 }
 
-BOOST_AUTO_TEST_CASE(TestConvertBFloat16ToFloat32)
+TEST_CASE("TestConvertBFloat16ToFloat32")
 {
     uint16_t bf16Array[] = { 16256, 16320, 38699, 16384, 49156, 32639 };
     size_t numFloats = sizeof(bf16Array) / sizeof(bf16Array[0]);
@@ -118,8 +120,8 @@
     for (size_t i = 0; i < numFloats; i++)
     {
         float actual = convertedBuffer[i];
-        BOOST_CHECK_EQUAL(expectedResult[i], actual);
+        CHECK_EQ(expectedResult[i], actual);
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/FlowControl.cpp b/src/armnn/test/FlowControl.cpp
index 0259d80..de53060 100644
--- a/src/armnn/test/FlowControl.cpp
+++ b/src/armnn/test/FlowControl.cpp
@@ -7,13 +7,13 @@
 #include <armnn/IRuntime.hpp>
 #include <armnn/INetwork.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <set>
 
-BOOST_AUTO_TEST_SUITE(FlowControl)
-
-BOOST_AUTO_TEST_CASE(ErrorOnLoadNetwork)
+TEST_SUITE("FlowControl")
+{
+TEST_CASE("ErrorOnLoadNetwork")
 {
     using namespace armnn;
 
@@ -56,13 +56,13 @@
     try
     {
         Optimize(*net, backends, runtime->GetDeviceSpec(), OptimizerOptions(), errMessages);
-        BOOST_FAIL("Should have thrown an exception.");
+        FAIL("Should have thrown an exception.");
     }
     catch (const InvalidArgumentException& e)
     {
         // Different exceptions are thrown on different backends
     }
-    BOOST_TEST(errMessages.size() > 0);
+    CHECK(errMessages.size() > 0);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/GraphTests.cpp b/src/armnn/test/GraphTests.cpp
index 69f96d4..0dc2619 100644
--- a/src/armnn/test/GraphTests.cpp
+++ b/src/armnn/test/GraphTests.cpp
@@ -17,29 +17,29 @@
 #include <backendsCommon/TensorHandle.hpp>
 #include <backendsCommon/TensorHandleFactoryRegistry.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Graph)
-
-BOOST_AUTO_TEST_CASE(ClassGraph)
+TEST_SUITE("Graph")
+{
+TEST_CASE("ClassGraph")
 {
     armnn::Graph graph;
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "layerA"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
+    CHECK(GraphHasNamedLayer(graph, "layerA"));
 }
 
-BOOST_AUTO_TEST_CASE(TopologicalSort)
+TEST_CASE("TopologicalSort")
 {
     armnn::Graph graph;
 
     armnn::ActivationDescriptor activationDefaults;
 
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerC"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerD"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerE"));
 
     armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
     armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
@@ -64,14 +64,14 @@
     layerC->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
 
     // check order is valid
-    BOOST_TEST(CheckOrder(graph, layerA, layerD));
-    BOOST_TEST(CheckOrder(graph, layerA, layerE));
-    BOOST_TEST(CheckOrder(graph, layerD, layerC));
-    BOOST_TEST(CheckOrder(graph, layerE, layerB));
-    BOOST_TEST(CheckOrder(graph, layerB, layerC));
+    CHECK(CheckOrder(graph, layerA, layerD));
+    CHECK(CheckOrder(graph, layerA, layerE));
+    CHECK(CheckOrder(graph, layerD, layerC));
+    CHECK(CheckOrder(graph, layerE, layerB));
+    CHECK(CheckOrder(graph, layerB, layerC));
 }
 
-BOOST_AUTO_TEST_CASE(InsertNewLayerBefore)
+TEST_CASE("InsertNewLayerBefore")
 {
     armnn::Graph graph;
     armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
@@ -79,11 +79,11 @@
     std::vector<armnn::Layer*> order;
 
     armnn::ActivationDescriptor activationDefaults;
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
 
     armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
     armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
@@ -108,10 +108,10 @@
     layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layerA, layerB));
-    BOOST_TEST(CheckOrder(graph, layerA, layerC));
-    BOOST_TEST(CheckOrder(graph, layerB, layerD));
-    BOOST_TEST(CheckOrder(graph, layerC, layerD));
+    CHECK(CheckOrder(graph, layerA, layerB));
+    CHECK(CheckOrder(graph, layerA, layerC));
+    CHECK(CheckOrder(graph, layerB, layerD));
+    CHECK(CheckOrder(graph, layerC, layerD));
 
     //    A
     //   / \'
@@ -120,18 +120,18 @@
     //    \ E
     //     \|
     //      D
-    BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerD->GetInputSlot(1),
+    CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerD->GetInputSlot(1),
                                                                       activationDefaults,
                                                                       "layerE"));
 
     armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layerA, layerB));
-    BOOST_TEST(CheckOrder(graph, layerA, layerC));
-    BOOST_TEST(CheckOrder(graph, layerB, layerD));
-    BOOST_TEST(CheckOrder(graph, layerC, layerE));
-    BOOST_TEST(CheckOrder(graph, layerE, layerD));
+    CHECK(CheckOrder(graph, layerA, layerB));
+    CHECK(CheckOrder(graph, layerA, layerC));
+    CHECK(CheckOrder(graph, layerB, layerD));
+    CHECK(CheckOrder(graph, layerC, layerE));
+    CHECK(CheckOrder(graph, layerE, layerD));
 
     //      A
     //     /|
@@ -142,22 +142,22 @@
     //    \ E
     //     \|
     //      D
-    BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetInputSlot(0),
+    CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetInputSlot(0),
                                                                       activationDefaults,
                                                                       "layerF"));
 
     armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layerA, layerB));
-    BOOST_TEST(CheckOrder(graph, layerA, layerF));
-    BOOST_TEST(CheckOrder(graph, layerF, layerC));
-    BOOST_TEST(CheckOrder(graph, layerB, layerD));
-    BOOST_TEST(CheckOrder(graph, layerC, layerE));
-    BOOST_TEST(CheckOrder(graph, layerE, layerD));
+    CHECK(CheckOrder(graph, layerA, layerB));
+    CHECK(CheckOrder(graph, layerA, layerF));
+    CHECK(CheckOrder(graph, layerF, layerC));
+    CHECK(CheckOrder(graph, layerB, layerD));
+    CHECK(CheckOrder(graph, layerC, layerE));
+    CHECK(CheckOrder(graph, layerE, layerD));
 }
 
-BOOST_AUTO_TEST_CASE(InsertNewLayerAfter)
+TEST_CASE("InsertNewLayerAfter")
 {
     armnn::Graph graph;
     armnn::TensorInfo tensorInfo({ 1, 1, 1, 1 }, armnn::DataType::Float32);
@@ -165,11 +165,11 @@
     std::vector<armnn::Layer*> order;
 
     armnn::ActivationDescriptor activationDefaults;
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
-    BOOST_CHECK_NO_THROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::InputLayer>(0, "layerA"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerB"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::ActivationLayer>(activationDefaults, "layerC"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::AdditionLayer>("layerD"));
+    CHECK_NOTHROW(graph.AddLayer<armnn::OutputLayer>(0, "output"));
 
     armnn::Layer* const layerA = GetFirstLayerWithName(graph, "layerA");
     armnn::Layer* const layerB = GetFirstLayerWithName(graph, "layerB");
@@ -194,10 +194,10 @@
     layerD->GetOutputSlot(0).Connect(layerO->GetInputSlot(0));
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layerA, layerB));
-    BOOST_TEST(CheckOrder(graph, layerA, layerC));
-    BOOST_TEST(CheckOrder(graph, layerB, layerD));
-    BOOST_TEST(CheckOrder(graph, layerC, layerD));
+    CHECK(CheckOrder(graph, layerA, layerB));
+    CHECK(CheckOrder(graph, layerA, layerC));
+    CHECK(CheckOrder(graph, layerB, layerD));
+    CHECK(CheckOrder(graph, layerC, layerD));
 
     //    A
     //   / \'
@@ -206,18 +206,18 @@
     //    \ E
     //     \|
     //      D
-    BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetOutputSlot(),
+    CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerC->GetOutputSlot(),
                                                                       activationDefaults,
                                                                       "layerE"));
 
     armnn::Layer* const layerE = GetFirstLayerWithName(graph, "layerE");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layerA, layerB));
-    BOOST_TEST(CheckOrder(graph, layerA, layerC));
-    BOOST_TEST(CheckOrder(graph, layerB, layerD));
-    BOOST_TEST(CheckOrder(graph, layerC, layerE));
-    BOOST_TEST(CheckOrder(graph, layerE, layerD));
+    CHECK(CheckOrder(graph, layerA, layerB));
+    CHECK(CheckOrder(graph, layerA, layerC));
+    CHECK(CheckOrder(graph, layerB, layerD));
+    CHECK(CheckOrder(graph, layerC, layerE));
+    CHECK(CheckOrder(graph, layerE, layerD));
 
 
     //    A
@@ -229,19 +229,19 @@
     //   \  E
     //    \ /
     //     D
-    BOOST_CHECK_NO_THROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerA->GetOutputSlot(),
+    CHECK_NOTHROW(graph.InsertNewLayer<armnn::ActivationLayer>(layerA->GetOutputSlot(),
                                                                       activationDefaults,
                                                                       "layerF"));
 
     armnn::Layer* const layerF = GetFirstLayerWithName(graph, "layerF");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layerA, layerF));
-    BOOST_TEST(CheckOrder(graph, layerF, layerB));
-    BOOST_TEST(CheckOrder(graph, layerF, layerC));
-    BOOST_TEST(CheckOrder(graph, layerB, layerD));
-    BOOST_TEST(CheckOrder(graph, layerC, layerE));
-    BOOST_TEST(CheckOrder(graph, layerE, layerD));
+    CHECK(CheckOrder(graph, layerA, layerF));
+    CHECK(CheckOrder(graph, layerF, layerB));
+    CHECK(CheckOrder(graph, layerF, layerC));
+    CHECK(CheckOrder(graph, layerB, layerD));
+    CHECK(CheckOrder(graph, layerC, layerE));
+    CHECK(CheckOrder(graph, layerE, layerD));
 }
 
 namespace
@@ -282,7 +282,7 @@
         std::sort(sortedNewEdges.begin(), sortedNewEdges.end());
 
         auto last = std::unique(sortedNewEdges.begin(), sortedNewEdges.end());
-        BOOST_CHECK_MESSAGE(last == sortedNewEdges.end(), "New graph contains duplicate edges!");
+        CHECK_MESSAGE(last == sortedNewEdges.end(), "New graph contains duplicate edges!");
     }
 
     // Each new edge must be tested.
@@ -308,13 +308,13 @@
             // Each vertex should correspond to a layer.
             const armnn::Layer* srcLayer = edge.first;
             const armnn::Layer* dstLayer = edge.second;
-            BOOST_TEST(srcLayer);
-            BOOST_TEST(dstLayer);
+            CHECK(srcLayer);
+            CHECK(dstLayer);
 
             // Both layers must have the same compute device.
             if (srcLayer && dstLayer)
             {
-                BOOST_TEST((srcLayer->GetBackendId() == dstLayer->GetBackendId()));
+                CHECK((srcLayer->GetBackendId() == dstLayer->GetBackendId()));
             }
 
             // Marks edge in original graph as observed (by deleting it).
@@ -329,9 +329,9 @@
 
             if (srcLayer == nullptr || dstLayer == nullptr)
             {
-                BOOST_ERROR("At least one of the two ends of a new edge (" << edge.first << ", " << edge.second << ") "
-                            "introduced after adding copy layers to a graph "
-                            "correspond to a layer not known to the graph");
+                FAIL("At least one of the two ends of a new edge (" << edge.first << ", " << edge.second
+                                    << ") introduced after adding copy layers to a graph "
+                                       "correspond to a layer not known to the graph");
                 continue;
             }
 
@@ -341,14 +341,14 @@
 
             if (srcLayerInOrigGraph == dstLayerInOrigGraph)
             {
-                BOOST_ERROR("A new edge ("
-                            << edge.first->GetName()
-                            << ", "
-                            << edge.second->GetName()
-                            << ") introduced after adding copy "
-                               "layers to a graph is invalid. One of the ends should be present in the original "
-                               "graph and the other should not, but "
-                            << (srcLayerInOrigGraph ? "both are" : "none are"));
+                FAIL("A new edge ("
+                                << edge.first->GetName()
+                                << ", "
+                                << edge.second->GetName()
+                                << ") introduced after adding copy "
+                                   "layers to a graph is invalid. One of the ends should be present in the original "
+                                   "graph and the other should not, but "
+                                << (srcLayerInOrigGraph ? "both are" : "none are"));
                 continue;
             }
 
@@ -376,7 +376,7 @@
 
             if (adjEdges.empty())
             {
-                BOOST_ERROR("An edge connecting a layer and a copy layer exists, (" << edge.first << ", " <<
+                FAIL("An edge connecting a layer and a copy layer exists, (" << edge.first << ", " <<
                             edge.second << "),  but no other edges connecting the copy layer '" << copyLayer->GetName()
                             << "' to other layers could be found");
                 continue;
@@ -390,14 +390,14 @@
 
                 if (!adjLayer)
                 {
-                    BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second <<") is adjacent to an edge "
-                                "connecting a layer and a copy layer, (" << edge.first << ", " << edge.second << "), "
-                                "but the non-copy layer in the former does not correspond to a layer");
+                    FAIL("An edge (" << adjEdge.first << ", " << adjEdge.second <<") is adjacent to an "
+                                "edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
+                                "), but the non-copy layer in the former does not correspond to a layer");
                     continue;
                 }
 
                 // Both layers must have different compute devices.
-                BOOST_TEST((nonCopyLayer->GetBackendId() != adjLayer->GetBackendId()));
+                CHECK((nonCopyLayer->GetBackendId() != adjLayer->GetBackendId()));
 
                 // There must exist an edge connecting both layers directly in the original graph.
                 {
@@ -420,8 +420,8 @@
                     }
                     else
                     {
-                        BOOST_ERROR("An edge (" << adjEdge.first << ", " << adjEdge.second << ") is adjacent to an "
-                            "edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
+                        FAIL("An edge (" << adjEdge.first << ", " << adjEdge.second << ") is adjacent to "
+                            "an edge connecting a layer and a copy layer, (" << edge.first << ", " << edge.second <<
                             "), but there is no edge connecting the layers in the original graph");
                     }
                 }
@@ -429,7 +429,7 @@
         }
     }
 
-    BOOST_TEST(origEdges.empty(), "Not all of the edges in the original graph correspond to paths in the new graph");
+    CHECK_MESSAGE(origEdges.empty(), "Not all of the edges in the original graph correspond to paths in the new graph");
 }
 
 struct CopyLayersFixture
@@ -513,7 +513,7 @@
     };
 };
 
-BOOST_FIXTURE_TEST_CASE(AddCopyLayers, CopyLayersFixture)
+TEST_CASE_FIXTURE(CopyLayersFixture, "AddCopyLayers")
 {
     InitialiseTestGraph();
     const armnn::Graph origGraph(m_Graph);
@@ -522,7 +522,7 @@
     TestGraphAfterAddingCopyLayers(m_Graph, origGraph);
 }
 
-BOOST_FIXTURE_TEST_CASE(AddCopyLayersSeveralTimes, CopyLayersFixture)
+TEST_CASE_FIXTURE(CopyLayersFixture, "AddCopyLayersSeveralTimes")
 {
     InitialiseTestGraph();
     m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
@@ -533,11 +533,11 @@
     {
         m_Graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
         const std::vector<Edge> otherEdges = GetEdgeList(m_Graph);
-        BOOST_TEST((edges == otherEdges));
+        CHECK((edges == otherEdges));
     }
 }
 
-BOOST_FIXTURE_TEST_CASE(CopyLayersAddedBetweenSameLayersHaveDifferentNames, CopyLayersFixture)
+TEST_CASE_FIXTURE(CopyLayersFixture, "CopyLayersAddedBetweenSameLayersHaveDifferentNames")
 {
     armnn::Graph graph;
 
@@ -567,13 +567,13 @@
     graph.AddCompatibilityLayers(m_Backends, m_FactoryRegistry);
 
     std::vector<Edge> edges = GetEdgeList(graph);
-    BOOST_CHECK(edges.size() == 6u);
+    CHECK(edges.size() == 6u);
     std::sort(edges.begin(), edges.end());
     auto last = std::unique(edges.begin(), edges.end());
-    BOOST_CHECK_MESSAGE(last == edges.end(), "Found duplicated edges after AddCompatibilityLayers()");
+    CHECK_MESSAGE(last == edges.end(), "Found duplicated edges after AddCompatibilityLayers()");
 }
 
-BOOST_AUTO_TEST_CASE(DuplicateLayerNames)
+TEST_CASE("DuplicateLayerNames")
 {
     armnn::Graph graph;
 
@@ -586,11 +586,11 @@
     inputLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
 
     auto it = graph.TopologicalSort().begin();
-    BOOST_TEST(((*it)->GetType() == armnn::LayerType::Input));
-    BOOST_TEST(((*std::next(it))->GetType() == armnn::LayerType::Output));
+    CHECK(((*it)->GetType() == armnn::LayerType::Input));
+    CHECK(((*std::next(it))->GetType() == armnn::LayerType::Output));
 }
 
-BOOST_AUTO_TEST_CASE(CheckGraphConstTensorSharing)
+TEST_CASE("CheckGraphConstTensorSharing")
 {
     armnn::Graph graph0;
     const float* sharedWeightPtr;
@@ -611,7 +611,7 @@
         // graph1 goes out of scope
     }
 
-    BOOST_TEST(*sharedWeightPtr == 1);
+    CHECK(*sharedWeightPtr == 1);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/InferOutputTests.cpp b/src/armnn/test/InferOutputTests.cpp
index 015ab67..81ad7b2 100644
--- a/src/armnn/test/InferOutputTests.cpp
+++ b/src/armnn/test/InferOutputTests.cpp
@@ -7,10 +7,8 @@
 
 #include <test/UnitTests.hpp>
 
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(LayerValidateOutput)
-
+TEST_SUITE("LayerValidateOutput")
+{
 // ArgMinMax
 ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape4d, ArgMinMaxInferOutputShape4dTest)
 ARMNN_SIMPLE_TEST_CASE(ArgMinMaxInferOutputShape3d, ArgMinMaxInferOutputShape3dTest)
@@ -52,4 +50,4 @@
 // QuantizedLstm
 ARMNN_SIMPLE_TEST_CASE(QuantizedLstmInferOutputShape, QuantizedLstmInferOutputShapeTest)
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/InferOutputTests.hpp b/src/armnn/test/InferOutputTests.hpp
index 0413682..b8276de 100644
--- a/src/armnn/test/InferOutputTests.hpp
+++ b/src/armnn/test/InferOutputTests.hpp
@@ -14,7 +14,7 @@
 #include <layers/PreluLayer.hpp>
 #include <layers/StackLayer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 void ArgMinMaxInferOutputShapeImpl(const armnn::ArgMinMaxDescriptor       descriptor,
                                    const std::vector<armnn::TensorShape>& inputShapes,
@@ -37,11 +37,11 @@
     };
 
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+    CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
 
     armnn::TensorShape expectedOutputShape( { 1, 3, 4 } );
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] == expectedOutputShape);
 }
 
 void ArgMinMaxInferOutputShape3dTest()
@@ -56,11 +56,11 @@
     };
 
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+    CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
 
     armnn::TensorShape expectedOutputShape( { 3, 2 } );
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] == expectedOutputShape);
 }
 
 void ArgMinMaxInferOutputShape2dTest()
@@ -75,11 +75,11 @@
     };
 
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+    CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
 
     armnn::TensorShape expectedOutputShape( { 3 } );
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] == expectedOutputShape);
 }
 
 void ArgMinMaxInferOutputShape1dTest()
@@ -94,11 +94,11 @@
     };
 
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+    CHECK_NOTHROW(ArgMinMaxInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
 
     armnn::TensorShape expectedOutputShape( { 1 } );
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] == expectedOutputShape);
 }
 
 void BatchToSpaceInferOutputShapeTest()
@@ -121,7 +121,7 @@
     const std::vector<unsigned int> expectedDimSizes = {2, 2, 4, 1};
     armnn::TensorShape expectedShape(4, expectedDimSizes.data());
 
-    BOOST_CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
+    CHECK(expectedShape == batchToSpaceLayer->InferOutputShapes(shapes).at(0));
 }
 
 void SpaceToDepthInferOutputShapeTest()
@@ -143,7 +143,7 @@
     const std::vector<unsigned int> expectedDimSizes{ 1, 8, 4, 12 };
     armnn::TensorShape expectedShape(4, expectedDimSizes.data());
 
-    BOOST_CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
+    CHECK(expectedShape == spaceToDepthLayer->InferOutputShapes(shapes).at(0));
 }
 
 void PreluInferOutputShapeImpl(const std::vector<armnn::TensorShape>& inputShapes,
@@ -168,10 +168,10 @@
     };
 
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+    CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
 
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] == expectedOutputShapes[0]);
 }
 
 void PreluInferOutputShapeInputBiggerTest()
@@ -188,10 +188,10 @@
     };
 
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+    CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
 
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] == expectedOutputShapes[0]);
 }
 
 void PreluInferOutputShapeAlphaBiggerTest()
@@ -208,10 +208,10 @@
     };
 
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+    CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
 
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShapes[0]);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] == expectedOutputShapes[0]);
 }
 
 void PreluInferOutputShapeNoMatchTest()
@@ -228,10 +228,10 @@
     };
 
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
+    CHECK_NOTHROW(PreluInferOutputShapeImpl(inputShapes, outputShapes));
 
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] != expectedOutputShapes[0]);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] != expectedOutputShapes[0]);
 }
 
 void CreatePreluLayerHelper(armnn::Graph& graph,
@@ -264,7 +264,7 @@
     CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 });
 
     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
 void PreluValidateTensorShapesFromInputsNoMatchTest()
@@ -275,7 +275,7 @@
     CreatePreluLayerHelper(graph, { 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 7, 3, 2 });
 
     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
-    BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
+    CHECK_THROWS_AS(graph.InferTensorInfos(), armnn::LayerValidationException);
 }
 
 void StackInferOutputShapeImpl(const armnn::StackDescriptor           descriptor,
@@ -307,14 +307,14 @@
     };
 
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+    CHECK_NOTHROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
 
     armnn::TensorShape expectedOutputShape
     (
         { 4, 3, 2 }
     );
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] == expectedOutputShape);
 }
 
 void StackInferOutputShapeFromInputsNoMatchTest()
@@ -338,14 +338,14 @@
 
     // Output shape is inferred from the descriptor, so should still be correct despite mismatching input shapes
     std::vector<armnn::TensorShape> outputShapes;
-    BOOST_CHECK_NO_THROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
+    CHECK_NOTHROW(StackInferOutputShapeImpl(descriptor, inputShapes, outputShapes));
 
     armnn::TensorShape expectedOutputShape
     (
         { 4, 3, 2 }
     );
-    BOOST_CHECK(outputShapes.size() == 1);
-    BOOST_CHECK(outputShapes[0] == expectedOutputShape);
+    CHECK(outputShapes.size() == 1);
+    CHECK(outputShapes[0] == expectedOutputShape);
 }
 
 void CreateStackLayerHelper(armnn::Graph& graph,
@@ -402,7 +402,7 @@
     CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
 
     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
 void StackValidateTensorShapesFromInputsNoMatchTest()
@@ -428,7 +428,7 @@
     CreateStackLayerHelper(graph, descriptor, inputShapes, { 3, 2, 5 });
 
     // Graph::InferTensorInfos calls Layer::ValidateTensorShapesFromInputs
-    BOOST_CHECK_THROW(graph.InferTensorInfos(), armnn::LayerValidationException);
+    CHECK_THROWS_AS(graph.InferTensorInfos(), armnn::LayerValidationException);
 }
 
 void Convolution2dInferOutputShapeTest()
@@ -461,7 +461,7 @@
     const std::vector<unsigned int> expectedOutputSizes = {1, 1, 4, 4};
     armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
 
-    BOOST_CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
+    CHECK(expectedOutputShape == convolution2dLayer->InferOutputShapes(shapes).at(0));
 }
 
 void TransposeConvolution2dInferOutputShapeTest()
@@ -492,7 +492,7 @@
     const std::vector<unsigned int> expectedOutputSizes = {1, 1, 6, 6};
     armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
 
-    BOOST_CHECK(expectedOutputShape == transposeConvolution2dLayer->InferOutputShapes(shapes).at(0));
+    CHECK(expectedOutputShape == transposeConvolution2dLayer->InferOutputShapes(shapes).at(0));
 }
 
 void DepthwiseConvolution2dInferOutputShapeTest()
@@ -525,7 +525,7 @@
     const std::vector<unsigned int> expectedOutputSizes = {1, 2, 4, 4};
     armnn::TensorShape expectedOutputShape(4, expectedOutputSizes.data());
 
-    BOOST_CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
+    CHECK(expectedOutputShape == depthwiseConvolution2dLayer->InferOutputShapes(shapes).at(0));
 }
 
 // QLstm
@@ -577,12 +577,12 @@
     };
 
     std::vector<armnn::TensorShape> actualOutShapes;
-    BOOST_CHECK_NO_THROW(QLstmInferOutputShapeImpl(descriptor, inShapes, actualOutShapes));
+    CHECK_NOTHROW(QLstmInferOutputShapeImpl(descriptor, inShapes, actualOutShapes));
 
-    BOOST_CHECK(actualOutShapes.size() == 3);
-    BOOST_CHECK(expectedOutShapes[0] == actualOutShapes[0]);
-    BOOST_CHECK(expectedOutShapes[1] == actualOutShapes[1]);
-    BOOST_CHECK(expectedOutShapes[2] == actualOutShapes[2]);
+    CHECK(actualOutShapes.size() == 3);
+    CHECK(expectedOutShapes[0] == actualOutShapes[0]);
+    CHECK(expectedOutShapes[1] == actualOutShapes[1]);
+    CHECK(expectedOutShapes[2] == actualOutShapes[2]);
 }
 
 // QuantizedLstm
@@ -624,9 +624,9 @@
     };
 
     std::vector<armnn::TensorShape> actualOutShapes;
-    BOOST_CHECK_NO_THROW(QuantizedLstmInferOutputShapeImpl(inShapes, actualOutShapes));
+    CHECK_NOTHROW(QuantizedLstmInferOutputShapeImpl(inShapes, actualOutShapes));
 
-    BOOST_CHECK(actualOutShapes.size() == 2);
-    BOOST_CHECK(expectedOutShapes[0] == actualOutShapes[0]);
-    BOOST_CHECK(expectedOutShapes[1] == actualOutShapes[1]);
+    CHECK(actualOutShapes.size() == 2);
+    CHECK(expectedOutShapes[0] == actualOutShapes[0]);
+    CHECK(expectedOutShapes[1] == actualOutShapes[1]);
 }
diff --git a/src/armnn/test/InstrumentTests.cpp b/src/armnn/test/InstrumentTests.cpp
index e0d0d94..447a4c9 100644
--- a/src/armnn/test/InstrumentTests.cpp
+++ b/src/armnn/test/InstrumentTests.cpp
@@ -2,7 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include "WallClockTimer.hpp"
 
@@ -11,13 +11,13 @@
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Instruments)
-
-BOOST_AUTO_TEST_CASE(WallClockTimerInMicroseconds)
+TEST_SUITE("Instruments")
+{
+TEST_CASE("WallClockTimerInMicroseconds")
 {
     WallClockTimer wallClockTimer;
 
-    BOOST_CHECK_EQUAL(wallClockTimer.GetName(), "WallClockTimer");
+    CHECK((std::string(wallClockTimer.GetName()) == std::string("WallClockTimer")));
 
     // start the timer
     wallClockTimer.Start();
@@ -28,17 +28,17 @@
    // stop the timer
     wallClockTimer.Stop();
 
-    BOOST_CHECK_EQUAL(wallClockTimer.GetMeasurements().front().m_Name, WallClockTimer::WALL_CLOCK_TIME);
+    CHECK((wallClockTimer.GetMeasurements().front().m_Name == WallClockTimer::WALL_CLOCK_TIME));
 
     // check that WallClockTimer measurement should be >= 10 microseconds
-    BOOST_CHECK_GE(wallClockTimer.GetMeasurements().front().m_Value, std::chrono::microseconds(10).count());
+    CHECK_GE(wallClockTimer.GetMeasurements().front().m_Value, std::chrono::microseconds(10).count());
 }
 
-BOOST_AUTO_TEST_CASE(WallClockTimerInNanoseconds)
+TEST_CASE("WallClockTimerInNanoseconds")
 {
     WallClockTimer wallClockTimer;
 
-    BOOST_CHECK_EQUAL(wallClockTimer.GetName(), "WallClockTimer");
+    CHECK((std::string(wallClockTimer.GetName()) == std::string("WallClockTimer")));
 
     // start the timer
     wallClockTimer.Start();
@@ -49,14 +49,14 @@
     // stop the timer
     wallClockTimer.Stop();
 
-    BOOST_CHECK_EQUAL(wallClockTimer.GetMeasurements().front().m_Name, WallClockTimer::WALL_CLOCK_TIME);
+    CHECK((wallClockTimer.GetMeasurements().front().m_Name == WallClockTimer::WALL_CLOCK_TIME));
 
     // delta is 0.5 microseconds
     const auto delta =
         std::chrono::duration_cast<std::chrono::duration<double, std::micro>>(std::chrono::nanoseconds(500));
 
     // check that WallClockTimer measurement should be >= 0.5 microseconds
-    BOOST_CHECK_GE(wallClockTimer.GetMeasurements().front().m_Value, delta.count());
+    CHECK_GE(wallClockTimer.GetMeasurements().front().m_Value, delta.count());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ModelAccuracyCheckerTest.cpp b/src/armnn/test/ModelAccuracyCheckerTest.cpp
index 93dba7e..47f112e 100644
--- a/src/armnn/test/ModelAccuracyCheckerTest.cpp
+++ b/src/armnn/test/ModelAccuracyCheckerTest.cpp
@@ -4,7 +4,7 @@
 //
 #include "ModelAccuracyChecker.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <iostream>
 #include <string>
@@ -50,12 +50,12 @@
 };
 }
 
-BOOST_AUTO_TEST_SUITE(ModelAccuracyCheckerTest)
-
+TEST_SUITE("ModelAccuracyCheckerTest")
+{
 using TContainer =
         mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
 
-BOOST_FIXTURE_TEST_CASE(TestFloat32OutputTensorAccuracy, TestHelper)
+TEST_CASE_FIXTURE(TestHelper, "TestFloat32OutputTensorAccuracy")
 {
     ModelAccuracyChecker checker(GetValidationLabelSet(), GetModelOutputLabels());
 
@@ -70,7 +70,7 @@
 
     // Top 1 Accuracy
     float totalAccuracy = checker.GetAccuracy(1);
-    BOOST_CHECK(totalAccuracy == 100.0f);
+    CHECK(totalAccuracy == 100.0f);
 
     // Add image 2 and check accuracy
     std::vector<float> inferenceOutputVector2 = {0.10f, 0.0f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
@@ -83,11 +83,11 @@
 
     // Top 1 Accuracy
     totalAccuracy = checker.GetAccuracy(1);
-    BOOST_CHECK(totalAccuracy == 50.0f);
+    CHECK(totalAccuracy == 50.0f);
 
     // Top 2 Accuracy
     totalAccuracy = checker.GetAccuracy(2);
-    BOOST_CHECK(totalAccuracy == 100.0f);
+    CHECK(totalAccuracy == 100.0f);
 
     // Add image 3 and check accuracy
     std::vector<float> inferenceOutputVector3 = {0.0f, 0.10f, 0.0f, 0.0f, 0.05f, 0.70f, 0.0f, 0.0f, 0.0f, 0.15f};
@@ -100,15 +100,15 @@
 
     // Top 1 Accuracy
     totalAccuracy = checker.GetAccuracy(1);
-    BOOST_CHECK(totalAccuracy == 33.3333321f);
+    CHECK(totalAccuracy == 33.3333321f);
 
     // Top 2 Accuracy
     totalAccuracy = checker.GetAccuracy(2);
-    BOOST_CHECK(totalAccuracy == 66.6666641f);
+    CHECK(totalAccuracy == 66.6666641f);
 
     // Top 3 Accuracy
     totalAccuracy = checker.GetAccuracy(3);
-    BOOST_CHECK(totalAccuracy == 100.0f);
+    CHECK(totalAccuracy == 100.0f);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/NetworkTests.cpp b/src/armnn/test/NetworkTests.cpp
index 692d64e..d763a85 100644
--- a/src/armnn/test/NetworkTests.cpp
+++ b/src/armnn/test/NetworkTests.cpp
@@ -9,7 +9,7 @@
 
 #include <Network.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -27,27 +27,27 @@
 
 }
 
-BOOST_AUTO_TEST_SUITE(Network)
-
-BOOST_AUTO_TEST_CASE(LayerGuids)
+TEST_SUITE("Network")
+{
+TEST_CASE("LayerGuids")
 {
     armnn::NetworkImpl net;
     armnn::LayerGuid inputId = net.AddInputLayer(0)->GetGuid();
     armnn::LayerGuid addId = net.AddAdditionLayer()->GetGuid();
     armnn::LayerGuid outputId = net.AddOutputLayer(0)->GetGuid();
 
-    BOOST_TEST(inputId != addId);
-    BOOST_TEST(addId != outputId);
-    BOOST_TEST(inputId != outputId);
+    CHECK(inputId != addId);
+    CHECK(addId != outputId);
+    CHECK(inputId != outputId);
 }
 
-BOOST_AUTO_TEST_CASE(NetworkBasic)
+TEST_CASE("NetworkBasic")
 {
     armnn::NetworkImpl net;
-    BOOST_TEST(net.PrintGraph() == armnn::Status::Success);
+    CHECK(net.PrintGraph() == armnn::Status::Success);
 }
 
-BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForINetwork)
+TEST_CASE("LayerNamesAreOptionalForINetwork")
 {
     armnn::INetworkPtr inet(armnn::INetwork::Create());
     inet->AddInputLayer(0);
@@ -56,7 +56,7 @@
     inet->AddOutputLayer(0);
 }
 
-BOOST_AUTO_TEST_CASE(LayerNamesAreOptionalForNetwork)
+TEST_CASE("LayerNamesAreOptionalForNetwork")
 {
     armnn::NetworkImpl net;
     net.AddInputLayer(0);
@@ -65,12 +65,12 @@
     net.AddOutputLayer(0);
 }
 
-BOOST_AUTO_TEST_CASE(NetworkModification)
+TEST_CASE("NetworkModification")
 {
     armnn::NetworkImpl net;
 
     armnn::IConnectableLayer* const inputLayer = net.AddInputLayer(0, "input layer");
-    BOOST_TEST(inputLayer);
+    CHECK(inputLayer);
 
     unsigned int dims[] = { 10,1,1,1 };
     std::vector<float> convWeightsData(10);
@@ -81,7 +81,7 @@
                                                                           weights,
                                                                           armnn::EmptyOptional(),
                                                                           "conv layer");
-    BOOST_TEST(convLayer);
+    CHECK(convLayer);
 
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
 
@@ -90,31 +90,31 @@
                                                                                      weights,
                                                                                      armnn::EmptyOptional(),
                                                                                      "fully connected");
-    BOOST_TEST(fullyConnectedLayer);
+    CHECK(fullyConnectedLayer);
 
     convLayer->GetOutputSlot(0).Connect(fullyConnectedLayer->GetInputSlot(0));
 
     armnn::Pooling2dDescriptor pooling2dDesc;
     armnn::IConnectableLayer* const poolingLayer = net.AddPooling2dLayer(pooling2dDesc, "pooling2d");
-    BOOST_TEST(poolingLayer);
+    CHECK(poolingLayer);
 
     fullyConnectedLayer->GetOutputSlot(0).Connect(poolingLayer->GetInputSlot(0));
 
     armnn::ActivationDescriptor activationDesc;
     armnn::IConnectableLayer* const activationLayer = net.AddActivationLayer(activationDesc, "activation");
-    BOOST_TEST(activationLayer);
+    CHECK(activationLayer);
 
     poolingLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
 
     armnn::NormalizationDescriptor normalizationDesc;
     armnn::IConnectableLayer* const normalizationLayer = net.AddNormalizationLayer(normalizationDesc, "normalization");
-    BOOST_TEST(normalizationLayer);
+    CHECK(normalizationLayer);
 
     activationLayer->GetOutputSlot(0).Connect(normalizationLayer->GetInputSlot(0));
 
     armnn::SoftmaxDescriptor softmaxDesc;
     armnn::IConnectableLayer* const softmaxLayer = net.AddSoftmaxLayer(softmaxDesc, "softmax");
-    BOOST_TEST(softmaxLayer);
+    CHECK(softmaxLayer);
 
     normalizationLayer->GetOutputSlot(0).Connect(softmaxLayer->GetInputSlot(0));
 
@@ -130,42 +130,42 @@
         invalidTensor,
         invalidTensor,
         "batch norm");
-    BOOST_TEST(batchNormalizationLayer);
+    CHECK(batchNormalizationLayer);
 
     softmaxLayer->GetOutputSlot(0).Connect(batchNormalizationLayer->GetInputSlot(0));
 
     armnn::IConnectableLayer* const additionLayer = net.AddAdditionLayer("addition");
-    BOOST_TEST(additionLayer);
+    CHECK(additionLayer);
 
     batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(0));
     batchNormalizationLayer->GetOutputSlot(0).Connect(additionLayer->GetInputSlot(1));
 
     armnn::IConnectableLayer* const multiplicationLayer = net.AddMultiplicationLayer("multiplication");
-    BOOST_TEST(multiplicationLayer);
+    CHECK(multiplicationLayer);
 
     additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(0));
     additionLayer->GetOutputSlot(0).Connect(multiplicationLayer->GetInputSlot(1));
 
     armnn::IConnectableLayer* const outputLayer = net.AddOutputLayer(0, "output layer");
-    BOOST_TEST(outputLayer);
+    CHECK(outputLayer);
 
     multiplicationLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
 
     //Tests that all layers are present in the graph.
-    BOOST_TEST(net.GetGraph().GetNumLayers() == 11);
+    CHECK(net.GetGraph().GetNumLayers() == 11);
 
     //Tests that the vertices exist and have correct names.
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "input layer"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "conv layer"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "fully connected"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "pooling2d"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "activation"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "normalization"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "softmax"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "batch norm"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "addition"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "multiplication"));
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "output layer"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "input layer"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "conv layer"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "fully connected"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "pooling2d"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "activation"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "normalization"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "softmax"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "batch norm"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "addition"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "multiplication"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "output layer"));
 
     auto checkOneOutputToOneInputConnection = []
         (const armnn::IConnectableLayer* const srcLayer,
@@ -173,14 +173,14 @@
          int expectedSrcNumInputs = 1,
          int expectedDstNumOutputs = 1)
         {
-            BOOST_TEST(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
-            BOOST_TEST(srcLayer->GetNumOutputSlots() == 1);
-            BOOST_TEST(tgtLayer->GetNumInputSlots() == 1);
-            BOOST_TEST(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
+            CHECK(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
+            CHECK(srcLayer->GetNumOutputSlots() == 1);
+            CHECK(tgtLayer->GetNumInputSlots() == 1);
+            CHECK(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
 
-            BOOST_TEST(srcLayer->GetOutputSlot(0).GetNumConnections() == 1);
-            BOOST_TEST(srcLayer->GetOutputSlot(0).GetConnection(0) == &tgtLayer->GetInputSlot(0));
-            BOOST_TEST(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(0).GetConnection());
+            CHECK(srcLayer->GetOutputSlot(0).GetNumConnections() == 1);
+            CHECK(srcLayer->GetOutputSlot(0).GetConnection(0) == &tgtLayer->GetInputSlot(0));
+            CHECK(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(0).GetConnection());
         };
     auto checkOneOutputToTwoInputsConnections = []
         (const armnn::IConnectableLayer* const srcLayer,
@@ -188,29 +188,29 @@
          int expectedSrcNumInputs,
          int expectedDstNumOutputs = 1)
         {
-            BOOST_TEST(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
-            BOOST_TEST(srcLayer->GetNumOutputSlots() == 1);
-            BOOST_TEST(tgtLayer->GetNumInputSlots() == 2);
-            BOOST_TEST(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
+            CHECK(srcLayer->GetNumInputSlots() == expectedSrcNumInputs);
+            CHECK(srcLayer->GetNumOutputSlots() == 1);
+            CHECK(tgtLayer->GetNumInputSlots() == 2);
+            CHECK(tgtLayer->GetNumOutputSlots() == expectedDstNumOutputs);
 
-            BOOST_TEST(srcLayer->GetOutputSlot(0).GetNumConnections() == 2);
+            CHECK(srcLayer->GetOutputSlot(0).GetNumConnections() == 2);
             for (unsigned int i = 0; i < srcLayer->GetOutputSlot(0).GetNumConnections(); ++i)
             {
-                BOOST_TEST(srcLayer->GetOutputSlot(0).GetConnection(i) == &tgtLayer->GetInputSlot(i));
-                BOOST_TEST(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(i).GetConnection());
+                CHECK(srcLayer->GetOutputSlot(0).GetConnection(i) == &tgtLayer->GetInputSlot(i));
+                CHECK(&srcLayer->GetOutputSlot(0) == tgtLayer->GetInputSlot(i).GetConnection());
             }
         };
 
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*convLayer));
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*fullyConnectedLayer));
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*poolingLayer));
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*activationLayer));
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*normalizationLayer));
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*softmaxLayer));
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*batchNormalizationLayer));
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*additionLayer));
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*multiplicationLayer));
-    BOOST_TEST(AreAllLayerInputSlotsConnected(*outputLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*convLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*fullyConnectedLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*poolingLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*activationLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*normalizationLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*softmaxLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*batchNormalizationLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*additionLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*multiplicationLayer));
+    CHECK(AreAllLayerInputSlotsConnected(*outputLayer));
 
     // Checks connectivity.
     checkOneOutputToOneInputConnection(inputLayer, convLayer, 0);
@@ -225,32 +225,32 @@
     checkOneOutputToOneInputConnection(multiplicationLayer, outputLayer, 2, 0);
 }
 
-BOOST_AUTO_TEST_CASE(NetworkModification_SplitterConcat)
+TEST_CASE("NetworkModification_SplitterConcat")
 {
     armnn::NetworkImpl net;
 
     // Adds an input layer and an input tensor descriptor.
     armnn::IConnectableLayer* inputLayer = net.AddInputLayer(0, "input layer");
-    BOOST_TEST(inputLayer);
+    CHECK(inputLayer);
 
     // Adds a splitter layer.
     armnn::ViewsDescriptor splitterDesc(2,4);
 
     armnn::IConnectableLayer* splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer");
-    BOOST_TEST(splitterLayer);
+    CHECK(splitterLayer);
 
     inputLayer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
 
     // Adds a softmax layer 1.
     armnn::SoftmaxDescriptor softmaxDescriptor;
     armnn::IConnectableLayer* softmaxLayer1 = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1");
-    BOOST_TEST(softmaxLayer1);
+    CHECK(softmaxLayer1);
 
     splitterLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
 
     // Adds a softmax layer 2.
     armnn::IConnectableLayer* softmaxLayer2 = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2");
-    BOOST_TEST(softmaxLayer2);
+    CHECK(softmaxLayer2);
 
     splitterLayer->GetOutputSlot(1).Connect(softmaxLayer2->GetInputSlot(0));
 
@@ -258,62 +258,62 @@
     armnn::OriginsDescriptor concatDesc(2, 4);
 
     armnn::IConnectableLayer* concatLayer = net.AddConcatLayer(concatDesc, "concat layer");
-    BOOST_TEST(concatLayer);
+    CHECK(concatLayer);
 
     softmaxLayer1->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(0));
     softmaxLayer2->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(1));
 
     // Adds an output layer.
     armnn::IConnectableLayer* outputLayer = net.AddOutputLayer(0, "output layer");
-    BOOST_TEST(outputLayer);
+    CHECK(outputLayer);
 
     concatLayer->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
 
-    BOOST_TEST(splitterLayer->GetNumOutputSlots() == 2);
-    BOOST_TEST(splitterLayer->GetOutputSlot(0).GetConnection(0) == &softmaxLayer1->GetInputSlot(0));
-    BOOST_TEST(&splitterLayer->GetOutputSlot(0) == softmaxLayer1->GetInputSlot(0).GetConnection());
-    BOOST_TEST(splitterLayer->GetOutputSlot(1).GetConnection(0) == &softmaxLayer2->GetInputSlot(0));
-    BOOST_TEST(&splitterLayer->GetOutputSlot(1) == softmaxLayer2->GetInputSlot(0).GetConnection());
+    CHECK(splitterLayer->GetNumOutputSlots() == 2);
+    CHECK(splitterLayer->GetOutputSlot(0).GetConnection(0) == &softmaxLayer1->GetInputSlot(0));
+    CHECK(&splitterLayer->GetOutputSlot(0) == softmaxLayer1->GetInputSlot(0).GetConnection());
+    CHECK(splitterLayer->GetOutputSlot(1).GetConnection(0) == &softmaxLayer2->GetInputSlot(0));
+    CHECK(&splitterLayer->GetOutputSlot(1) == softmaxLayer2->GetInputSlot(0).GetConnection());
 
-    BOOST_TEST(concatLayer->GetNumInputSlots() == 2);
-    BOOST_TEST(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(0));
-    BOOST_TEST(&softmaxLayer1->GetOutputSlot(0) == concatLayer->GetInputSlot(0).GetConnection());
-    BOOST_TEST(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(1));
-    BOOST_TEST(&softmaxLayer2->GetOutputSlot(0) == concatLayer->GetInputSlot(1).GetConnection());
+    CHECK(concatLayer->GetNumInputSlots() == 2);
+    CHECK(softmaxLayer1->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(0));
+    CHECK(&softmaxLayer1->GetOutputSlot(0) == concatLayer->GetInputSlot(0).GetConnection());
+    CHECK(softmaxLayer2->GetOutputSlot(0).GetConnection(0) == &concatLayer->GetInputSlot(1));
+    CHECK(&softmaxLayer2->GetOutputSlot(0) == concatLayer->GetInputSlot(1).GetConnection());
 }
 
-BOOST_AUTO_TEST_CASE(NetworkModification_SplitterAddition)
+TEST_CASE("NetworkModification_SplitterAddition")
 {
     armnn::NetworkImpl net;
 
     // Adds an input layer and an input tensor descriptor.
     armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
-    BOOST_TEST(layer);
+    CHECK(layer);
 
     // Adds a splitter layer.
     armnn::ViewsDescriptor splitterDesc(2,4);
 
     armnn::IConnectableLayer* const splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer");
-    BOOST_TEST(splitterLayer);
+    CHECK(splitterLayer);
 
     layer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
 
     // Adds a softmax layer 1.
     armnn::SoftmaxDescriptor softmaxDescriptor;
     armnn::IConnectableLayer* const softmax1Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1");
-    BOOST_TEST(softmax1Layer);
+    CHECK(softmax1Layer);
 
     splitterLayer->GetOutputSlot(0).Connect(softmax1Layer->GetInputSlot(0));
 
     // Adds a softmax layer 2.
     armnn::IConnectableLayer* const softmax2Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2");
-    BOOST_TEST(softmax2Layer);
+    CHECK(softmax2Layer);
 
     splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
 
     // Adds addition layer.
     layer = net.AddAdditionLayer("add layer");
-    BOOST_TEST(layer);
+    CHECK(layer);
 
     softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     softmax2Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -324,40 +324,40 @@
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
 
-    BOOST_TEST(layer);
+    CHECK(layer);
 }
 
-BOOST_AUTO_TEST_CASE(NetworkModification_SplitterMultiplication)
+TEST_CASE("NetworkModification_SplitterMultiplication")
 {
     armnn::NetworkImpl net;
 
     // Adds an input layer and an input tensor descriptor.
     armnn::IConnectableLayer* layer = net.AddInputLayer(0, "input layer");
-    BOOST_TEST(layer);
+    CHECK(layer);
 
     // Adds a splitter layer.
     armnn::ViewsDescriptor splitterDesc(2,4);
     armnn::IConnectableLayer* const splitterLayer = net.AddSplitterLayer(splitterDesc, "splitter layer");
-    BOOST_TEST(splitterLayer);
+    CHECK(splitterLayer);
 
     layer->GetOutputSlot(0).Connect(splitterLayer->GetInputSlot(0));
 
     // Adds a softmax layer 1.
     armnn::SoftmaxDescriptor softmaxDescriptor;
     armnn::IConnectableLayer* const softmax1Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_1");
-    BOOST_TEST(softmax1Layer);
+    CHECK(softmax1Layer);
 
     splitterLayer->GetOutputSlot(0).Connect(softmax1Layer->GetInputSlot(0));
 
     // Adds a softmax layer 2.
     armnn::IConnectableLayer* const softmax2Layer = net.AddSoftmaxLayer(softmaxDescriptor, "softmax_2");
-    BOOST_TEST(softmax2Layer);
+    CHECK(softmax2Layer);
 
     splitterLayer->GetOutputSlot(1).Connect(softmax2Layer->GetInputSlot(0));
 
     // Adds multiplication layer.
     layer = net.AddMultiplicationLayer("multiplication layer");
-    BOOST_TEST(layer);
+    CHECK(layer);
 
     softmax1Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
     softmax2Layer->GetOutputSlot(0).Connect(layer->GetInputSlot(1));
@@ -365,12 +365,12 @@
     // Adds an output layer.
     armnn::IConnectableLayer* prevLayer = layer;
     layer = net.AddOutputLayer(0, "output layer");
-    BOOST_TEST(layer);
+    CHECK(layer);
 
     prevLayer->GetOutputSlot(0).Connect(layer->GetInputSlot(0));
 }
 
-BOOST_AUTO_TEST_CASE(Network_AddQuantize)
+TEST_CASE("Network_AddQuantize")
 {
     struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
     {
@@ -378,20 +378,20 @@
         {
             m_Visited = true;
 
-            BOOST_TEST(layer);
+            CHECK(layer);
 
             std::string expectedName = std::string("quantize");
-            BOOST_TEST(std::string(layer->GetName()) == expectedName);
-            BOOST_TEST(std::string(name) == expectedName);
+            CHECK(std::string(layer->GetName()) == expectedName);
+            CHECK(std::string(name) == expectedName);
 
-            BOOST_TEST(layer->GetNumInputSlots() == 1);
-            BOOST_TEST(layer->GetNumOutputSlots() == 1);
+            CHECK(layer->GetNumInputSlots() == 1);
+            CHECK(layer->GetNumOutputSlots() == 1);
 
             const armnn::TensorInfo& infoIn = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-            BOOST_TEST((infoIn.GetDataType() == armnn::DataType::Float32));
+            CHECK((infoIn.GetDataType() == armnn::DataType::Float32));
 
             const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
-            BOOST_TEST((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
+            CHECK((infoOut.GetDataType() == armnn::DataType::QAsymmU8));
         }
 
         bool m_Visited = false;
@@ -416,11 +416,11 @@
     Test testQuantize;
     graph->Accept(testQuantize);
 
-    BOOST_TEST(testQuantize.m_Visited == true);
+    CHECK(testQuantize.m_Visited == true);
 
 }
 
-BOOST_AUTO_TEST_CASE(Network_AddMerge)
+TEST_CASE("Network_AddMerge")
 {
     struct Test : public armnn::LayerVisitorBase<armnn::VisitorNoThrowPolicy>
     {
@@ -428,23 +428,23 @@
         {
             m_Visited = true;
 
-            BOOST_TEST(layer);
+            CHECK(layer);
 
             std::string expectedName = std::string("merge");
-            BOOST_TEST(std::string(layer->GetName()) == expectedName);
-            BOOST_TEST(std::string(name) == expectedName);
+            CHECK(std::string(layer->GetName()) == expectedName);
+            CHECK(std::string(name) == expectedName);
 
-            BOOST_TEST(layer->GetNumInputSlots() == 2);
-            BOOST_TEST(layer->GetNumOutputSlots() == 1);
+            CHECK(layer->GetNumInputSlots() == 2);
+            CHECK(layer->GetNumOutputSlots() == 1);
 
             const armnn::TensorInfo& infoIn0 = layer->GetInputSlot(0).GetConnection()->GetTensorInfo();
-            BOOST_TEST((infoIn0.GetDataType() == armnn::DataType::Float32));
+            CHECK((infoIn0.GetDataType() == armnn::DataType::Float32));
 
             const armnn::TensorInfo& infoIn1 = layer->GetInputSlot(1).GetConnection()->GetTensorInfo();
-            BOOST_TEST((infoIn1.GetDataType() == armnn::DataType::Float32));
+            CHECK((infoIn1.GetDataType() == armnn::DataType::Float32));
 
             const armnn::TensorInfo& infoOut = layer->GetOutputSlot(0).GetTensorInfo();
-            BOOST_TEST((infoOut.GetDataType() == armnn::DataType::Float32));
+            CHECK((infoOut.GetDataType() == armnn::DataType::Float32));
         }
 
         bool m_Visited = false;
@@ -469,10 +469,10 @@
     Test testMerge;
     network->Accept(testMerge);
 
-    BOOST_TEST(testMerge.m_Visited == true);
+    CHECK(testMerge.m_Visited == true);
 }
 
-BOOST_AUTO_TEST_CASE(StandInLayerNetworkTest)
+TEST_CASE("StandInLayerNetworkTest")
 {
     // Create a simple network with a StandIn some place in it.
     armnn::NetworkImpl net;
@@ -498,14 +498,14 @@
     standIn->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
     // Check that the layer is there.
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "StandIn"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "StandIn"));
     // Check that it is connected as expected.
-    BOOST_TEST(input->GetOutputSlot(0).GetConnection(0) == &floor->GetInputSlot(0));
-    BOOST_TEST(floor->GetOutputSlot(0).GetConnection(0) == &standIn->GetInputSlot(0));
-    BOOST_TEST(standIn->GetOutputSlot(0).GetConnection(0) == &output->GetInputSlot(0));
+    CHECK(input->GetOutputSlot(0).GetConnection(0) == &floor->GetInputSlot(0));
+    CHECK(floor->GetOutputSlot(0).GetConnection(0) == &standIn->GetInputSlot(0));
+    CHECK(standIn->GetOutputSlot(0).GetConnection(0) == &output->GetInputSlot(0));
 }
 
-BOOST_AUTO_TEST_CASE(StandInLayerSingleInputMultipleOutputsNetworkTest)
+TEST_CASE("StandInLayerSingleInputMultipleOutputsNetworkTest")
 {
     // Another test with one input and two outputs on the StandIn layer.
     armnn::NetworkImpl net;
@@ -531,11 +531,11 @@
     standIn->GetOutputSlot(1).Connect(output1->GetInputSlot(0));
 
     // Check that the layer is there.
-    BOOST_TEST(GraphHasNamedLayer(net.GetGraph(), "StandIn"));
+    CHECK(GraphHasNamedLayer(net.GetGraph(), "StandIn"));
     // Check that it is connected as expected.
-    BOOST_TEST(input->GetOutputSlot(0).GetConnection(0) == &standIn->GetInputSlot(0));
-    BOOST_TEST(standIn->GetOutputSlot(0).GetConnection(0) == &output0->GetInputSlot(0));
-    BOOST_TEST(standIn->GetOutputSlot(1).GetConnection(0) == &output1->GetInputSlot(0));
+    CHECK(input->GetOutputSlot(0).GetConnection(0) == &standIn->GetInputSlot(0));
+    CHECK(standIn->GetOutputSlot(0).GetConnection(0) == &output0->GetInputSlot(0));
+    CHECK(standIn->GetOutputSlot(1).GetConnection(0) == &output1->GetInputSlot(0));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ObservableTest.cpp b/src/armnn/test/ObservableTest.cpp
index 3bb78a8..02d93f1 100644
--- a/src/armnn/test/ObservableTest.cpp
+++ b/src/armnn/test/ObservableTest.cpp
@@ -3,14 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include "Graph.hpp"
 #include "Observable.hpp"
 
-BOOST_AUTO_TEST_SUITE(Observable)
-
-BOOST_AUTO_TEST_CASE(AddedLayerObservableTest)
+TEST_SUITE("Observable")
+{
+TEST_CASE("AddedLayerObservableTest")
 {
     armnn::Graph graph;
 
@@ -24,11 +24,11 @@
     // Check the observable has observed the changes
     std::list<armnn::Layer*> testLayers({ output, input });
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(layerObservable.begin(), layerObservable.end(),
-                                  testLayers.begin(), testLayers.end());
+    CHECK(std::equal(layerObservable.begin(), layerObservable.end(),
+                                  testLayers.begin(), testLayers.end()));
 }
 
-BOOST_AUTO_TEST_CASE(ClearAddedLayerObservableTest)
+TEST_CASE("ClearAddedLayerObservableTest")
 {
     armnn::Graph graph;
 
@@ -44,11 +44,11 @@
     // Check the observable has observed the changes
     std::list<armnn::Layer*> emptyList({});
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(addedLayerObservable.begin(), addedLayerObservable.end(),
-                                  emptyList.begin(), emptyList.end());
+    CHECK(std::equal(addedLayerObservable.begin(), addedLayerObservable.end(),
+                                  emptyList.begin(), emptyList.end()));
 }
 
-BOOST_AUTO_TEST_CASE(ErasedLayerNamesObservableTest)
+TEST_CASE("ErasedLayerNamesObservableTest")
 {
     armnn::Graph graph;
 
@@ -64,11 +64,11 @@
     // Check the observable has observed the changes
     std::list<std::string> testList({"output"});
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(erasedLayerNamesObservable.begin(), erasedLayerNamesObservable.end(),
-                                  testList.begin(), testList.end());
+    CHECK(std::equal(erasedLayerNamesObservable.begin(), erasedLayerNamesObservable.end(),
+                                  testList.begin(), testList.end()));
 }
 
-BOOST_AUTO_TEST_CASE(ClearErasedLayerNamesObservableTest)
+TEST_CASE("ClearErasedLayerNamesObservableTest")
 {
     armnn::Graph graph;
 
@@ -86,9 +86,9 @@
     // Check the observable has observed the changes
     std::list<std::string> emptyList({});
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(erasedLayerNamesObservable.begin(), erasedLayerNamesObservable.end(),
-                                  emptyList.begin(), emptyList.end());
+    CHECK(std::equal(erasedLayerNamesObservable.begin(), erasedLayerNamesObservable.end(),
+                                  emptyList.begin(), emptyList.end()));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
diff --git a/src/armnn/test/OptimizerTests.cpp b/src/armnn/test/OptimizerTests.cpp
index 7fe69a9..e68546c 100644
--- a/src/armnn/test/OptimizerTests.cpp
+++ b/src/armnn/test/OptimizerTests.cpp
@@ -15,6 +15,7 @@
 #include <armnn/INetwork.hpp>
 #include <armnn/LayerVisitorBase.hpp>
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 #include <armnnUtils/FloatingPointConverter.hpp>
 
@@ -22,7 +23,7 @@
 #include <backendsCommon/LayerSupportBase.hpp>
 #include <backendsCommon/TensorHandle.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
@@ -140,10 +141,11 @@
 
 }    // namespace
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGDisabledTest)
+TEST_CASE("LSTMValidateTensorShapesFromInputsCIFGDisabledTest")
 {
     Graph graph;
 
@@ -151,10 +153,10 @@
     CreateLSTMLayerHelper(graph, false);
 
     //This function used to call ValidateShapesFromInputs();
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(LSTMValidateTensorShapesFromInputsCIFGEnabledTest)
+TEST_CASE("LSTMValidateTensorShapesFromInputsCIFGEnabledTest")
 {
     Graph graph;
 
@@ -162,10 +164,10 @@
     CreateLSTMLayerHelper(graph, true);
 
     //This function used to call ValidateShapesFromInputs();
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(InsertConvertersTest)
+TEST_CASE("InsertConvertersTest")
 {
     const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float16);
 
@@ -191,7 +193,7 @@
         ->GetOutputHandler().SetTensorInfo(info);
 
     // Check graph layer sequence before inserting convert layers
-    BOOST_TEST(CheckSequence(graph.cbegin(),
+    CHECK(CheckSequence(graph.cbegin(),
                              graph.cend(),
                              &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>,
@@ -241,7 +243,7 @@
     }
 
     // Check sequence of layers after inserting convert layers
-    BOOST_TEST(CheckSequence(graph.cbegin(),
+    CHECK(CheckSequence(graph.cbegin(),
                              graph.cend(),
                              &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>,
@@ -284,7 +286,7 @@
     layer->GetOutputSlot().Connect(output->GetInputSlot(0));
 }
 
-BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputs)
+TEST_CASE("Conv2dValidateTensorShapesFromInputs")
 {
     Graph graph;
     const unsigned int inputShape[] = { 1, 3, 8, 16 };
@@ -292,10 +294,10 @@
     const unsigned int outputShape[] = { 1, 2, 4, 14 };
     CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(Conv2dValidateTensorShapesFromInputsNhwc)
+TEST_CASE("Conv2dValidateTensorShapesFromInputsNhwc")
 {
     Graph graph;
     const unsigned int inputShape[] = { 1, 8, 16, 3 };
@@ -303,7 +305,7 @@
     const unsigned int outputShape[] = { 1, 4, 14, 2 };
     CreateConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
 void CreateDepthwiseConvolution2dGraph(Graph &graph, const unsigned int* inputShape,
@@ -334,7 +336,7 @@
     layer->GetOutputSlot().Connect(output->GetInputSlot(0));
 }
 
-BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputs)
+TEST_CASE("DepthwiseConv2dValidateTensorShapesFromInputs")
 {
     Graph graph;
     const unsigned int inputShape[] = { 1, 2, 3, 3 };
@@ -342,10 +344,10 @@
     const unsigned int outputShape[] = { 1, 2, 1, 1 };
     CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(DepthwiseConv2dValidateTensorShapesFromInputsNhwc)
+TEST_CASE("DepthwiseConv2dValidateTensorShapesFromInputsNhwc")
 {
     Graph graph;
     const unsigned int inputShape[] = { 1, 3, 3, 2 };
@@ -353,7 +355,7 @@
     const unsigned int outputShape[] = { 1, 1, 1, 2 };
     CreateDepthwiseConvolution2dGraph(graph, inputShape, weightsShape, outputShape, DataLayout::NHWC);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
 void CreatePooling2dGraph(Graph& graph, const unsigned int* inputShape,  const unsigned int* outputShape,
@@ -384,24 +386,24 @@
     layer->GetOutputSlot().Connect(output->GetInputSlot(0));
 }
 
-BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputs)
+TEST_CASE("Pooling2dValidateTensorShapesFromInputs")
 {
     Graph graph;
     const unsigned int inputShape[]  = { 5, 3, 52, 60 };
     const unsigned int outputShape[] = { 5, 3, 11, 13 };
     CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NCHW);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(Pooling2dValidateTensorShapesFromInputsNhwc)
+TEST_CASE("Pooling2dValidateTensorShapesFromInputsNhwc")
 {
     Graph graph;
     const unsigned int inputShape[]  = { 5, 52, 60, 3 };
     const unsigned int outputShape[] = { 5, 11, 13, 3 };
     CreatePooling2dGraph(graph, inputShape, outputShape, DataLayout::NHWC);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
 void CreateResizeBilinearGraph(Graph& graph,
@@ -429,24 +431,24 @@
     layer->GetOutputSlot().Connect(output->GetInputSlot(0));
 }
 
-BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputs)
+TEST_CASE("ResizeBilinearValidateTensorShapesFromInputs")
 {
     Graph graph;
     const unsigned int inputShape[]  = { 1, 2, 4, 5 };
     const unsigned int outputShape[] = { 1, 2, 3, 4 };
     CreateResizeBilinearGraph(graph, inputShape, outputShape);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(ResizeBilinearValidateTensorShapesFromInputsNhwc)
+TEST_CASE("ResizeBilinearValidateTensorShapesFromInputsNhwc")
 {
     Graph graph;
     const unsigned int inputShape[]  = { 1, 4, 5, 2 };
     const unsigned int outputShape[] = { 1, 3, 4, 2 };
     CreateResizeBilinearGraph(graph, inputShape, outputShape, DataLayout::NHWC);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
 void CreateGatherGraph(Graph& graph,
@@ -470,7 +472,7 @@
     layer->GetOutputSlot().Connect(output->GetInputSlot(0));
 }
 
-BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs)
+TEST_CASE("GatherValidateTensorShapesFromInputs")
 {
     Graph graph;
     armnn::TensorInfo paramsInfo({10, 5}, DataType::Float32);
@@ -479,10 +481,10 @@
 
     CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputs1DParams)
+TEST_CASE("GatherValidateTensorShapesFromInputs1DParams")
 {
     Graph graph;
     armnn::TensorInfo paramsInfo({8}, DataType::Float32);
@@ -491,10 +493,10 @@
 
     CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(GatherValidateTensorShapesFromInputsMultiDimIndices)
+TEST_CASE("GatherValidateTensorShapesFromInputsMultiDimIndices")
 {
     Graph graph;
     armnn::TensorInfo paramsInfo({3, 2, 5}, DataType::Float32);
@@ -503,10 +505,10 @@
 
     CreateGatherGraph(graph, paramsInfo, indicesInfo, outputInfo);
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
-BOOST_AUTO_TEST_CASE(DetectionPostProcessValidateTensorShapes)
+TEST_CASE("DetectionPostProcessValidateTensorShapes")
 {
     Graph graph;
     armnn::TensorInfo boxEncodingsInfo({1, 10, 4}, DataType::QAsymmU8);
@@ -538,7 +540,7 @@
     input0->GetOutputSlot().Connect(layer->GetInputSlot(0));
     input1->GetOutputSlot().Connect(layer->GetInputSlot(1));
 
-    BOOST_CHECK_NO_THROW(graph.InferTensorInfos());
+    CHECK_NOTHROW(graph.InferTensorInfos());
 }
 
 class MockLayerSupport : public LayerSupportBase
@@ -612,21 +614,21 @@
     };
 };
 
-BOOST_AUTO_TEST_CASE(BackendCapabilityTest)
+TEST_CASE("BackendCapabilityTest")
 {
     BackendId backendId = "MockBackend";
 
     armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", true};
 
     // MockBackend does not support the NonConstWeights capability
-    BOOST_CHECK(!armnn::HasCapability(nonConstWeights, backendId));
-    BOOST_CHECK(!armnn::HasCapability("NonConstWeights", backendId));
+    CHECK(!armnn::HasCapability(nonConstWeights, backendId));
+    CHECK(!armnn::HasCapability("NonConstWeights", backendId));
 
     // MockBackend does not support the AsyncExecution capability
-    BOOST_CHECK(!armnn::GetCapability("AsyncExecution", backendId).has_value());
+    CHECK(!armnn::GetCapability("AsyncExecution", backendId).has_value());
 }
 
-BOOST_AUTO_TEST_CASE(BackendHintTest)
+TEST_CASE("BackendHintTest")
 {
     class TestBackendAssignment : public LayerVisitorBase<VisitorNoThrowPolicy>
     {
@@ -635,14 +637,14 @@
         {
             IgnoreUnused(id, name);
             auto inputLayer = PolymorphicDowncast<const InputLayer*>(layer);
-            BOOST_TEST((inputLayer->GetBackendId() == "MockBackend"));
+            CHECK((inputLayer->GetBackendId() == "MockBackend"));
         }
 
         void VisitOutputLayer(const IConnectableLayer* layer, LayerBindingId id, const char* name = nullptr) override
         {
             IgnoreUnused(id, name);
             auto outputLayer = PolymorphicDowncast<const OutputLayer*>(layer);
-            BOOST_TEST((outputLayer->GetBackendId() == "MockBackend"));
+            CHECK((outputLayer->GetBackendId() == "MockBackend"));
         }
 
         void VisitActivationLayer(const IConnectableLayer* layer,
@@ -651,7 +653,7 @@
         {
             IgnoreUnused(activationDescriptor, name);
             auto activation = PolymorphicDowncast<const ActivationLayer*>(layer);
-            BOOST_TEST((activation->GetBackendId() == "CustomBackend"));
+            CHECK((activation->GetBackendId() == "CustomBackend"));
         }
     };
 
@@ -718,7 +720,7 @@
                                             lastLayer,
                                             EmptyOptional());
 
-    BOOST_TEST(res.IsOk());
+    CHECK(res.IsOk());
 
     TestBackendAssignment visitor;
     for (auto it = firstLayer; it != lastLayer; ++it)
@@ -728,7 +730,7 @@
 }
 
 // Tests that OptimizeForExclusiveConnections works, fusing when needed, using BatchNorm fusing as example
-BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsFuseTest)
+TEST_CASE("OptimizeForExclusiveConnectionsFuseTest")
 {
     using namespace armnn;
     // Define layers information
@@ -786,8 +788,8 @@
     conv->GetOutputSlot(0).Connect(batchNorm->GetInputSlot(0));
     batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
 
-    BOOST_CHECK(4 == graph.GetNumLayers());
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(4 == graph.GetNumLayers());
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<Convolution2dLayer>,
                              &IsLayerOfType<BatchNormalizationLayer>,
@@ -801,15 +803,15 @@
                (layer->GetNameStr() == "fused-batchNorm-into-convolution");
     };
 
-    BOOST_CHECK(3 == graph.GetNumLayers());
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(3 == graph.GetNumLayers());
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              checkFusedConv2d,
                              &IsLayerOfType<OutputLayer>));
 }
 
 // Tests that OptimizeForExclusiveConnections works, not fusing when not needed, using BatchNorm fusing as example
-BOOST_AUTO_TEST_CASE(OptimizeForExclusiveConnectionsWithoutFuseTest)
+TEST_CASE("OptimizeForExclusiveConnectionsWithoutFuseTest")
 {
     // Define the network
     Graph graph;
@@ -828,8 +830,8 @@
     batchNorm->GetOutputSlot(0).Connect(output->GetInputSlot(0));
     conv->GetOutputSlot(0).Connect(output2->GetInputSlot(0));
 
-    BOOST_CHECK(5 == graph.GetNumLayers());
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(5 == graph.GetNumLayers());
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::Convolution2dLayer>,
                              &IsLayerOfType<armnn::BatchNormalizationLayer>,
@@ -838,12 +840,12 @@
     // Optimize graph
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(FuseBatchNormIntoConvolution2DFloat32()));
 
-    BOOST_CHECK(5 == graph.GetNumLayers());
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(5 == graph.GetNumLayers());
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::Convolution2dLayer>,
                              &IsLayerOfType<armnn::BatchNormalizationLayer>,
                              &IsLayerOfType<armnn::OutputLayer>,
                              &IsLayerOfType<armnn::OutputLayer>));
 }
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/OptionalTest.cpp b/src/armnn/test/OptionalTest.cpp
index 73c9643..17e978c 100644
--- a/src/armnn/test/OptionalTest.cpp
+++ b/src/armnn/test/OptionalTest.cpp
@@ -2,7 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <armnn/Optional.hpp>
 #include <string>
@@ -24,46 +24,46 @@
 
 } // namespace <anonymous>
 
-BOOST_AUTO_TEST_SUITE(OptionalTests)
-
-BOOST_AUTO_TEST_CASE(SimpleStringTests)
+TEST_SUITE("OptionalTests")
+{
+TEST_CASE("SimpleStringTests")
 {
     armnn::Optional<std::string> optionalString;
-    BOOST_TEST(static_cast<bool>(optionalString) == false);
-    BOOST_TEST(optionalString.has_value() == false);
-    BOOST_TEST((optionalString == armnn::Optional<std::string>()));
+    CHECK(static_cast<bool>(optionalString) == false);
+    CHECK(optionalString.has_value() == false);
+    CHECK((optionalString == armnn::Optional<std::string>()));
 
     optionalString = std::string("Hello World");
-    BOOST_TEST(static_cast<bool>(optionalString) == true);
-    BOOST_TEST(optionalString.has_value() == true);
-    BOOST_TEST(optionalString.value() == "Hello World");
-    BOOST_TEST((optionalString == armnn::Optional<std::string>("Hello World")));
+    CHECK(static_cast<bool>(optionalString) == true);
+    CHECK(optionalString.has_value() == true);
+    CHECK(optionalString.value() == "Hello World");
+    CHECK((optionalString == armnn::Optional<std::string>("Hello World")));
 
     armnn::Optional<std::string> otherString;
     otherString = optionalString;
-    BOOST_TEST(static_cast<bool>(otherString) == true);
-    BOOST_TEST(otherString.value() == "Hello World");
+    CHECK(static_cast<bool>(otherString) == true);
+    CHECK(otherString.value() == "Hello World");
 
     optionalString.reset();
-    BOOST_TEST(static_cast<bool>(optionalString) == false);
-    BOOST_TEST(optionalString.has_value() == false);
+    CHECK(static_cast<bool>(optionalString) == false);
+    CHECK(optionalString.has_value() == false);
 
     const std::string stringValue("Hello World");
     armnn::Optional<std::string> optionalString2(stringValue);
-    BOOST_TEST(static_cast<bool>(optionalString2) == true);
-    BOOST_TEST(optionalString2.has_value() == true);
-    BOOST_TEST(optionalString2.value() == "Hello World");
+    CHECK(static_cast<bool>(optionalString2) == true);
+    CHECK(optionalString2.has_value() == true);
+    CHECK(optionalString2.value() == "Hello World");
 
     armnn::Optional<std::string> optionalString3(std::move(optionalString2));
-    BOOST_TEST(static_cast<bool>(optionalString3) == true);
-    BOOST_TEST(optionalString3.has_value() == true);
-    BOOST_TEST(optionalString3.value() == "Hello World");
+    CHECK(static_cast<bool>(optionalString3) == true);
+    CHECK(optionalString3.has_value() == true);
+    CHECK(optionalString3.value() == "Hello World");
 }
 
-BOOST_AUTO_TEST_CASE(StringRefTests)
+TEST_CASE("StringRefTests")
 {
     armnn::Optional<std::string&> optionalStringRef{armnn::EmptyOptional()};
-    BOOST_TEST(optionalStringRef.has_value() == false);
+    CHECK(optionalStringRef.has_value() == false);
 
     PassStringRef(optionalStringRef);
     PassStringRefWithDefault();
@@ -74,51 +74,51 @@
 
     std::string& helloWorldRef = helloWorld;
     armnn::Optional<std::string&> optionalHelloRef = helloWorldRef;
-    BOOST_TEST(optionalHelloRef.has_value() == true);
-    BOOST_TEST(optionalHelloRef.value() == "Hello World");
+    CHECK(optionalHelloRef.has_value() == true);
+    CHECK(optionalHelloRef.value() == "Hello World");
 
     armnn::Optional<std::string&> optionalHelloRef2 = helloWorld;
-    BOOST_TEST(optionalHelloRef2.has_value() == true);
-    BOOST_TEST(optionalHelloRef2.value() == "Hello World");
+    CHECK(optionalHelloRef2.has_value() == true);
+    CHECK(optionalHelloRef2.value() == "Hello World");
 
     armnn::Optional<std::string&> optionalHelloRef3{helloWorldRef};
-    BOOST_TEST(optionalHelloRef3.has_value() == true);
-    BOOST_TEST(optionalHelloRef3.value() == "Hello World");
+    CHECK(optionalHelloRef3.has_value() == true);
+    CHECK(optionalHelloRef3.value() == "Hello World");
 
     armnn::Optional<std::string&> optionalHelloRef4{helloWorld};
-    BOOST_TEST(optionalHelloRef4.has_value() == true);
-    BOOST_TEST(optionalHelloRef4.value() == "Hello World");
+    CHECK(optionalHelloRef4.has_value() == true);
+    CHECK(optionalHelloRef4.value() == "Hello World");
 
     // modify through the optional reference
     optionalHelloRef4.value().assign("Long Other String");
-    BOOST_TEST(helloWorld == "Long Other String");
-    BOOST_TEST(optionalHelloRef.value() == "Long Other String");
-    BOOST_TEST(optionalHelloRef2.value() == "Long Other String");
-    BOOST_TEST(optionalHelloRef3.value() == "Long Other String");
+    CHECK(helloWorld == "Long Other String");
+    CHECK(optionalHelloRef.value() == "Long Other String");
+    CHECK(optionalHelloRef2.value() == "Long Other String");
+    CHECK(optionalHelloRef3.value() == "Long Other String");
 }
 
-BOOST_AUTO_TEST_CASE(SimpleIntTests)
+TEST_CASE("SimpleIntTests")
 {
     const int intValue = 123;
 
     armnn::Optional<int> optionalInt;
-    BOOST_TEST(static_cast<bool>(optionalInt) == false);
-    BOOST_TEST(optionalInt.has_value() == false);
-    BOOST_TEST((optionalInt == armnn::Optional<int>()));
+    CHECK(static_cast<bool>(optionalInt) == false);
+    CHECK(optionalInt.has_value() == false);
+    CHECK((optionalInt == armnn::Optional<int>()));
 
     optionalInt = intValue;
-    BOOST_TEST(static_cast<bool>(optionalInt) == true);
-    BOOST_TEST(optionalInt.has_value() == true);
-    BOOST_TEST(optionalInt.value() == intValue);
-    BOOST_TEST((optionalInt == armnn::Optional<int>(intValue)));
+    CHECK(static_cast<bool>(optionalInt) == true);
+    CHECK(optionalInt.has_value() == true);
+    CHECK(optionalInt.value() == intValue);
+    CHECK((optionalInt == armnn::Optional<int>(intValue)));
 
     armnn::Optional<int> otherOptionalInt;
     otherOptionalInt = optionalInt;
-    BOOST_TEST(static_cast<bool>(otherOptionalInt) == true);
-    BOOST_TEST(otherOptionalInt.value() == intValue);
+    CHECK(static_cast<bool>(otherOptionalInt) == true);
+    CHECK(otherOptionalInt.value() == intValue);
 }
 
-BOOST_AUTO_TEST_CASE(ObjectConstructedInPlaceTests)
+TEST_CASE("ObjectConstructedInPlaceTests")
 {
     struct SimpleObject
     {
@@ -145,15 +145,15 @@
 
     // Use MakeOptional
     armnn::Optional<SimpleObject> optionalObject1 = armnn::MakeOptional<SimpleObject>(objectName, objectValue);
-    BOOST_CHECK(static_cast<bool>(optionalObject1) == true);
-    BOOST_CHECK(optionalObject1.has_value() == true);
-    BOOST_CHECK(optionalObject1.value() == referenceObject);
+    CHECK(static_cast<bool>(optionalObject1) == true);
+    CHECK(optionalObject1.has_value() == true);
+    CHECK((optionalObject1.value() == referenceObject));
 
     // Call in-place constructor directly
     armnn::Optional<SimpleObject> optionalObject2(CONSTRUCT_IN_PLACE, objectName, objectValue);
-    BOOST_CHECK(static_cast<bool>(optionalObject1) == true);
-    BOOST_CHECK(optionalObject1.has_value() == true);
-    BOOST_CHECK(optionalObject1.value() == referenceObject);
+    CHECK(static_cast<bool>(optionalObject1) == true);
+    CHECK(optionalObject1.has_value() == true);
+    CHECK((optionalObject1.value() == referenceObject));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ProfilerTests.cpp b/src/armnn/test/ProfilerTests.cpp
index 21900ff..b27ad5a 100644
--- a/src/armnn/test/ProfilerTests.cpp
+++ b/src/armnn/test/ProfilerTests.cpp
@@ -7,7 +7,7 @@
 #include <armnn/TypesUtils.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <memory>
 #include <thread>
@@ -34,7 +34,7 @@
 
 void RegisterUnregisterProfilerSingleThreadImpl(bool &res)
 {
-    // Important! Don't use BOOST_TEST macros in this function as they
+    // Important! Don't use CHECK macros in this function as they
     // seem to have problems when used in threads
 
     // Get a reference to the profiler manager.
@@ -61,36 +61,36 @@
 
 } // namespace
 
-BOOST_AUTO_TEST_SUITE(Profiler)
-
-BOOST_AUTO_TEST_CASE(EnableDisableProfiling)
+TEST_SUITE("Profiler")
+{
+TEST_CASE("EnableDisableProfiling")
 {
     std::unique_ptr<armnn::IProfiler> profiler = std::make_unique<armnn::IProfiler>();
 
     // Check that profiling is disabled by default.
-    BOOST_TEST(!profiler->IsProfilingEnabled());
+    CHECK(!profiler->IsProfilingEnabled());
 
     // Enable profiling.
     profiler->EnableProfiling(true);
 
     // Check that profiling is enabled.
-    BOOST_TEST(profiler->IsProfilingEnabled());
+    CHECK(profiler->IsProfilingEnabled());
 
     // Disable profiling.
     profiler->EnableProfiling(false);
 
     // Check that profiling is disabled.
-    BOOST_TEST(!profiler->IsProfilingEnabled());
+    CHECK(!profiler->IsProfilingEnabled());
 }
 
-BOOST_AUTO_TEST_CASE(RegisterUnregisterProfilerSingleThread)
+TEST_CASE("RegisterUnregisterProfilerSingleThread")
 {
     bool res = false;
     RegisterUnregisterProfilerSingleThreadImpl(res);
-    BOOST_TEST(res);
+    CHECK(res);
 }
 
-BOOST_AUTO_TEST_CASE(RegisterUnregisterProfilerMultipleThreads)
+TEST_CASE("RegisterUnregisterProfilerMultipleThreads")
 {
     bool res[3] = {false, false, false};
     std::thread thread1([&res]() { RegisterUnregisterProfilerSingleThreadImpl(res[0]); });
@@ -103,11 +103,11 @@
 
     for (int i = 0 ; i < 3 ; i++)
     {
-        BOOST_TEST(res[i]);
+        CHECK(res[i]);
     }
 }
 
-BOOST_AUTO_TEST_CASE(ProfilingMacros)
+TEST_CASE("ProfilingMacros")
 {
     // Get a reference to the profiler manager.
     armnn::ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
@@ -115,13 +115,13 @@
     { // --- No profiler ---
 
         // Check that there's no profiler registered for this thread.
-        BOOST_TEST(!profilerManager.GetProfiler());
+        CHECK(!profilerManager.GetProfiler());
 
         // Test scoped event.
         { ARMNN_SCOPED_PROFILING_EVENT(armnn::Compute::CpuAcc, "test"); }
 
         // Check that we still cannot get a profiler for this thread.
-        BOOST_TEST(!profilerManager.GetProfiler());
+        CHECK(!profilerManager.GetProfiler());
     }
 
     // Create and register a profiler for this thread.
@@ -138,7 +138,7 @@
 
         // Check that no profiling event has been added to the sequence.
         size_t eventSequenceSizeAfter = armnn::GetProfilerEventSequenceSize(profiler.get());
-        BOOST_TEST(eventSequenceSizeBefore == eventSequenceSizeAfter);
+        CHECK(eventSequenceSizeBefore == eventSequenceSizeAfter);
     }
 
     // Enable profiling.
@@ -154,7 +154,7 @@
 
         // Check that a profiling event has been added to the sequence.
         size_t eventSequenceSizeAfter = armnn::GetProfilerEventSequenceSize(profiler.get());
-        BOOST_TEST(eventSequenceSizeAfter == eventSequenceSizeBefore + 1);
+        CHECK(eventSequenceSizeAfter == eventSequenceSizeBefore + 1);
     }
 
     // Disable profiling here to not print out anything on stdout.
@@ -165,13 +165,13 @@
 
 // This test unit needs the reference backend, it's not available if the reference backend is not built
 
-BOOST_AUTO_TEST_CASE(RuntimeLoadNetwork)
+TEST_CASE("RuntimeLoadNetwork")
 {
     // Get a reference to the profiler manager.
     armnn::ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
 
     // Check that there's no profiler registered for this thread.
-    BOOST_TEST(!profilerManager.GetProfiler());
+    CHECK(!profilerManager.GetProfiler());
 
     // Build a mock-network and load it into the runtime.
     armnn::IRuntime::CreationOptions options;
@@ -183,18 +183,18 @@
     runtime->LoadNetwork(networkIdentifier, armnn::Optimize(*mockNetwork, backends, runtime->GetDeviceSpec()));
 
     // Check that now there's a profiler registered for this thread (created and registered by the loading the network).
-    BOOST_TEST(profilerManager.GetProfiler());
+    CHECK(profilerManager.GetProfiler());
 
     // Unload the network.
     runtime->UnloadNetwork(networkIdentifier);
 
     // Check that the profiler has been un-registered for this thread.
-    BOOST_TEST(!profilerManager.GetProfiler());
+    CHECK(!profilerManager.GetProfiler());
 }
 
 #endif
 
-BOOST_AUTO_TEST_CASE(WriteEventResults)
+TEST_CASE("WriteEventResults")
 {
     // Get a reference to the profiler manager.
     armnn::ProfilerManager& profileManager = armnn::ProfilerManager::GetInstance();
@@ -222,35 +222,35 @@
 
         // Check that a profiling event has been added to the sequence.
         size_t eventSequenceSizeAfter = armnn::GetProfilerEventSequenceSize(profiler.get());
-        BOOST_TEST(eventSequenceSizeAfter == eventSequenceSizeBefore + 1);
+        CHECK(eventSequenceSizeAfter == eventSequenceSizeBefore + 1);
 
         std::ostringstream output;
         profiler->AnalyzeEventsAndWriteResults(output);
-        BOOST_TEST(!output.str().empty());
+        CHECK(!output.str().empty());
 
         // output should contain event name 'test'
-        BOOST_CHECK(output.str().find("test") != std::string::npos);
+        CHECK(output.str().find("test") != std::string::npos);
 
         // output should contain headers
-        BOOST_CHECK(output.str().find("Event Sequence - Name") != std::string::npos);
-        BOOST_CHECK(output.str().find("Event Stats - Name") != std::string::npos);
-        BOOST_CHECK(output.str().find("Total") != std::string::npos);
-        BOOST_CHECK(output.str().find("Device") != std::string::npos);
+        CHECK(output.str().find("Event Sequence - Name") != std::string::npos);
+        CHECK(output.str().find("Event Stats - Name") != std::string::npos);
+        CHECK(output.str().find("Total") != std::string::npos);
+        CHECK(output.str().find("Device") != std::string::npos);
         // output should contain compute device 'CpuAcc'
-        BOOST_CHECK(output.str().find("CpuAcc") != std::string::npos);
+        CHECK(output.str().find("CpuAcc") != std::string::npos);
         // output should not contain un-readable numbers
-        BOOST_CHECK(output.str().find("e+") == std::string::npos);
+        CHECK(output.str().find("e+") == std::string::npos);
         // output should not contain un-readable numbers
-        BOOST_CHECK(output.str().find("+") == std::string::npos);
+        CHECK(output.str().find("+") == std::string::npos);
         // output should not contain zero value
-        BOOST_CHECK(output.str().find(" 0 ") == std::string::npos);
+        CHECK(output.str().find(" 0 ") == std::string::npos);
     }
 
     // Disable profiling here to not print out anything on stdout.
     profiler->EnableProfiling(false);
 }
 
-BOOST_AUTO_TEST_CASE(ProfilerJsonPrinter)
+TEST_CASE("ProfilerJsonPrinter")
 {
     class TestInstrument : public armnn::Instrument
     {
@@ -350,8 +350,8 @@
                               "2.000000\n\t\t\t\t\t\t\t],\n\t\t\t\t\t\t\t\""
                               "unit\": \"us\"\n\t\t\t\t\t\t}\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t}\n\t}\n}\n");
 
-    BOOST_CHECK(output == blessedOutput);
+    CHECK(output == blessedOutput);
     armnn::ProfilerManager::GetInstance().RegisterProfiler(nullptr);
 }
 
-BOOST_AUTO_TEST_SUITE_END();
+}
diff --git a/src/armnn/test/ProfilingEventTest.cpp b/src/armnn/test/ProfilingEventTest.cpp
index 0add836..1e3d1ea 100644
--- a/src/armnn/test/ProfilingEventTest.cpp
+++ b/src/armnn/test/ProfilingEventTest.cpp
@@ -3,7 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include "ProfilingEvent.hpp"
 #include "Profiling.hpp"
@@ -12,9 +12,9 @@
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(ProfilingEvent)
-
-BOOST_AUTO_TEST_CASE(ProfilingEventTest)
+TEST_SUITE("ProfilingEvent")
+{
+TEST_CASE("ProfilingEventTest")
 {
     // Get a reference to the profiler manager.
     armnn::ProfilerManager& profileManager = armnn::ProfilerManager::GetInstance();
@@ -29,7 +29,7 @@
                     BackendId(),
                     std::move(insts1));
 
-    BOOST_CHECK_EQUAL(testEvent.GetName(), "EventName");
+    CHECK_EQ(testEvent.GetName(), "EventName");
 
     // start the timer - outer
     testEvent.Start();
@@ -40,7 +40,7 @@
     // stop the timer - outer
     testEvent.Stop();
 
-    BOOST_CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
+    CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
 
     // create a sub event with CpuAcc
     BackendId cpuAccBackendId(Compute::CpuAcc);
@@ -52,12 +52,12 @@
                      cpuAccBackendId,
                      std::move(insts2));
 
-    BOOST_CHECK_EQUAL(&testEvent, testEvent2.GetParentEvent());
-    BOOST_CHECK_EQUAL(profileManager.GetProfiler(), testEvent2.GetProfiler());
-    BOOST_CHECK(cpuAccBackendId == testEvent2.GetBackendId());
+    CHECK_EQ(&testEvent, testEvent2.GetParentEvent());
+    CHECK_EQ(profileManager.GetProfiler(), testEvent2.GetProfiler());
+    CHECK(cpuAccBackendId == testEvent2.GetBackendId());
 }
 
-BOOST_AUTO_TEST_CASE(ProfilingEventTestOnGpuAcc)
+TEST_CASE("ProfilingEventTestOnGpuAcc")
 {
     // Get a reference to the profiler manager.
     armnn::ProfilerManager& profileManager = armnn::ProfilerManager::GetInstance();
@@ -72,7 +72,7 @@
                     BackendId(),
                     std::move(insts1));
 
-    BOOST_CHECK_EQUAL(testEvent.GetName(), "GPUEvent");
+    CHECK_EQ(testEvent.GetName(), "GPUEvent");
 
     // start the timer - outer
     testEvent.Start();
@@ -83,7 +83,7 @@
     // stop the timer - outer
     testEvent.Stop();
 
-    BOOST_CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
+    CHECK_GE(testEvent.GetMeasurements().front().m_Value, 10.0);
 
     // create a sub event
     BackendId gpuAccBackendId(Compute::GpuAcc);
@@ -95,9 +95,9 @@
                      gpuAccBackendId,
                      std::move(insts2));
 
-    BOOST_CHECK_EQUAL(&testEvent, testEvent2.GetParentEvent());
-    BOOST_CHECK_EQUAL(profileManager.GetProfiler(), testEvent2.GetProfiler());
-    BOOST_CHECK(gpuAccBackendId == testEvent2.GetBackendId());
+    CHECK_EQ(&testEvent, testEvent2.GetParentEvent());
+    CHECK_EQ(profileManager.GetProfiler(), testEvent2.GetProfiler());
+    CHECK(gpuAccBackendId == testEvent2.GetBackendId());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/RuntimeTests.cpp b/src/armnn/test/RuntimeTests.cpp
index c5457d0..3db9544 100644
--- a/src/armnn/test/RuntimeTests.cpp
+++ b/src/armnn/test/RuntimeTests.cpp
@@ -20,7 +20,7 @@
 #include <valgrind/memcheck.h>
 #endif
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include "RuntimeTests.hpp"
 #include "TestUtils.hpp"
 
@@ -34,9 +34,9 @@
 
 }
 
-BOOST_AUTO_TEST_SUITE(Runtime)
-
-BOOST_AUTO_TEST_CASE(RuntimeUnloadNetwork)
+TEST_SUITE("Runtime")
+{
+TEST_CASE("RuntimeUnloadNetwork")
 {
     // build 2 mock-networks and load them into the runtime
     armnn::IRuntime::CreationOptions options;
@@ -56,9 +56,9 @@
     runtime->LoadNetwork(networkIdentifier2, Optimize(*mockNetwork2, backends, runtime->GetDeviceSpec()));
 
     // Unloads one by its networkID.
-    BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Success);
+    CHECK(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Success);
 
-    BOOST_TEST(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Failure);
+    CHECK(runtime->UnloadNetwork(networkIdentifier1) == armnn::Status::Failure);
 }
 
 // Note: the current builds we don't do valgrind and gperftools based leak checking at the same
@@ -76,26 +76,24 @@
     }
 };
 
-BOOST_GLOBAL_FIXTURE(DisableGlobalLeakChecking);
-
-BOOST_AUTO_TEST_CASE(RuntimeHeapMemoryUsageSanityChecks)
+TEST_CASE_FIXTURE(DisableGlobalLeakChecking,  "RuntimeHeapMemoryUsageSanityChecks")
 {
-    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+    CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
     {
         ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Outer");
         {
             ARMNN_SCOPED_LEAK_CHECKER("Sanity_Check_Inner");
-            BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE() == true);
+            CHECK(ARMNN_NO_LEAKS_IN_SCOPE() == true);
             std::unique_ptr<char[]> dummyAllocation(new char[1000]);
-            BOOST_CHECK_MESSAGE(ARMNN_NO_LEAKS_IN_SCOPE() == false,
-                "A leak of 1000 bytes is expected here. "
-                "Please make sure environment variable: HEAPCHECK=draconian is set!");
-            BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 1000);
-            BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 1);
+            // "A leak of 1000 bytes is expected here. "
+            // "Please make sure environment variable: HEAPCHECK=draconian is set!"
+            CHECK((ARMNN_NO_LEAKS_IN_SCOPE() == false));
+            CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 1000);
+            CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 1);
         }
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
-        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
-        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
     }
 }
 
@@ -105,8 +103,9 @@
 #ifdef WITH_VALGRIND
 // Run with the following command to get all the amazing output (in the devenv/build folder) :)
 // valgrind --leak-check=full --show-leak-kinds=all --log-file=Valgrind_Memcheck_Leak_Report.txt armnn/test/UnitTests
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeak)
+TEST_CASE("RuntimeMemoryLeak")
 {
+    MESSAGE("RuntimeMemoryLeak");
     // From documentation:
 
     // This means that no pointer to the block can be found. The block is classified as "lost",
@@ -155,8 +154,8 @@
     }
 
     // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
-    BOOST_TEST(leakedBefore == leakedAfter);
-    BOOST_TEST(reachableBefore == reachableAfter);
+    CHECK(leakedBefore == leakedAfter);
+    CHECK(reachableBefore == reachableAfter);
 
     // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
     // so they are assigned to, but still considered unused, causing a warning.
@@ -165,7 +164,7 @@
 }
 #endif // WITH_VALGRIND
 
-BOOST_AUTO_TEST_CASE(RuntimeCpuRef)
+TEST_CASE("RuntimeCpuRef")
 {
     using namespace armnn;
 
@@ -196,10 +195,10 @@
 
     // Load it into the runtime. It should success.
     armnn::NetworkId netId;
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
 }
 
-BOOST_AUTO_TEST_CASE(RuntimeFallbackToCpuRef)
+TEST_CASE("RuntimeFallbackToCpuRef")
 {
     using namespace armnn;
 
@@ -231,10 +230,10 @@
 
     // Load it into the runtime. It should succeed.
     armnn::NetworkId netId;
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
 }
 
-BOOST_AUTO_TEST_CASE(IVGCVSW_1929_QuantizedSoftmaxIssue)
+TEST_CASE("IVGCVSW_1929_QuantizedSoftmaxIssue")
 {
     // Test for issue reported by Chris Nix in https://jira.arm.com/browse/IVGCVSW-1929
     using namespace armnn;
@@ -270,16 +269,16 @@
                                                       runtime->GetDeviceSpec(),
                                                       OptimizerOptions(),
                                                       errMessages);
-        BOOST_FAIL("An exception should have been thrown");
+        FAIL("An exception should have been thrown");
     }
     catch (const InvalidArgumentException& e)
     {
         // Different exceptions are thrown on different backends
     }
-    BOOST_CHECK(errMessages.size() > 0);
+    CHECK(errMessages.size() > 0);
 }
 
-BOOST_AUTO_TEST_CASE(RuntimeBackendOptions)
+TEST_CASE("RuntimeBackendOptions")
 {
     using namespace armnn;
 
@@ -307,27 +306,27 @@
 
 
     // First group
-    BOOST_TEST(backendOptions[0].GetBackendId().Get() == "FakeBackend1");
-    BOOST_TEST(backendOptions[0].GetOption(0).GetName() == "Option1");
-    BOOST_TEST(backendOptions[0].GetOption(0).GetValue().IsFloat() == true);
-    BOOST_TEST(backendOptions[0].GetOption(0).GetValue().AsFloat() == 1.3f);
+    CHECK(backendOptions[0].GetBackendId().Get() == "FakeBackend1");
+    CHECK(backendOptions[0].GetOption(0).GetName() == "Option1");
+    CHECK(backendOptions[0].GetOption(0).GetValue().IsFloat() == true);
+    CHECK(backendOptions[0].GetOption(0).GetValue().AsFloat() == 1.3f);
 
-    BOOST_TEST(backendOptions[0].GetOption(1).GetName() == "Option2");
-    BOOST_TEST(backendOptions[0].GetOption(1).GetValue().IsBool() == true);
-    BOOST_TEST(backendOptions[0].GetOption(1).GetValue().AsBool() == true);
+    CHECK(backendOptions[0].GetOption(1).GetName() == "Option2");
+    CHECK(backendOptions[0].GetOption(1).GetValue().IsBool() == true);
+    CHECK(backendOptions[0].GetOption(1).GetValue().AsBool() == true);
 
-    BOOST_TEST(backendOptions[0].GetOption(2).GetName() == "Option3");
-    BOOST_TEST(backendOptions[0].GetOption(2).GetValue().IsString() == true);
-    BOOST_TEST(backendOptions[0].GetOption(2).GetValue().AsString() == "some_value");
+    CHECK(backendOptions[0].GetOption(2).GetName() == "Option3");
+    CHECK(backendOptions[0].GetOption(2).GetValue().IsString() == true);
+    CHECK(backendOptions[0].GetOption(2).GetValue().AsString() == "some_value");
 
     // Second group
-    BOOST_TEST(backendOptions[1].GetBackendId().Get() == "FakeBackend1");
-    BOOST_TEST(backendOptions[1].GetOption(0).GetName() == "Option4");
-    BOOST_TEST(backendOptions[1].GetOption(0).GetValue().IsInt() == true);
-    BOOST_TEST(backendOptions[1].GetOption(0).GetValue().AsInt() == 42);
+    CHECK(backendOptions[1].GetBackendId().Get() == "FakeBackend1");
+    CHECK(backendOptions[1].GetOption(0).GetName() == "Option4");
+    CHECK(backendOptions[1].GetOption(0).GetValue().IsInt() == true);
+    CHECK(backendOptions[1].GetOption(0).GetValue().AsInt() == 42);
 }
 
-BOOST_AUTO_TEST_CASE(ProfilingDisable)
+TEST_CASE("ProfilingDisable")
 {
     using namespace armnn;
 
@@ -358,17 +357,17 @@
 
     // Load it into the runtime. It should succeed.
     armnn::NetworkId netId;
-    BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
+    CHECK(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
 
     profiling::ProfilingServiceRuntimeHelper profilingServiceHelper(GetProfilingService(&runtime));
     profiling::BufferManager& bufferManager = profilingServiceHelper.GetProfilingBufferManager();
     auto readableBuffer = bufferManager.GetReadableBuffer();
 
     // Profiling is not enabled, the post-optimisation structure should not be created
-    BOOST_TEST(!readableBuffer);
+    CHECK(!readableBuffer);
 }
 
-BOOST_AUTO_TEST_CASE(ProfilingEnableCpuRef)
+TEST_CASE("ProfilingEnableCpuRef")
 {
     using namespace armnn;
     using namespace armnn::profiling;
@@ -410,29 +409,29 @@
 
     // Load it into the runtime. It should succeed.
     armnn::NetworkId netId;
-    BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
+    CHECK(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
 
     profiling::BufferManager& bufferManager = profilingServiceHelper.GetProfilingBufferManager();
     auto readableBuffer = bufferManager.GetReadableBuffer();
 
     // Profiling is enabled, the post-optimisation structure should be created
-    BOOST_CHECK(readableBuffer != nullptr);
+    CHECK(readableBuffer != nullptr);
 
     unsigned int size = readableBuffer->GetSize();
 
     const unsigned char* readableData = readableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     unsigned int offset = 0;
 
     // Verify Header
     VerifyTimelineHeaderBinary(readableData, offset, size - 8);
-    BOOST_TEST_MESSAGE("HEADER OK");
+    MESSAGE("HEADER OK");
 
     // Post-optimisation network
     // Network entity
     VerifyTimelineEntityBinaryPacketData(optNetGuid, readableData, offset);
-    BOOST_TEST_MESSAGE("NETWORK ENTITY OK");
+    MESSAGE("NETWORK ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -442,7 +441,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK TYPE RELATIONSHIP OK");
+    MESSAGE("NETWORK TYPE RELATIONSHIP OK");
 
     // Network - START OF LIFE
     ProfilingGuid networkSolEventGuid = VerifyTimelineEventBinaryPacket(EmptyOptional(),
@@ -450,7 +449,7 @@
                                                                         EmptyOptional(),
                                                                         readableData,
                                                                         offset);
-    BOOST_TEST_MESSAGE("NETWORK START OF LIFE EVENT OK");
+    MESSAGE("NETWORK START OF LIFE EVENT OK");
 
     // Network - START OF LIFE event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -460,7 +459,7 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK START OF LIFE RELATIONSHIP OK");
+    MESSAGE("NETWORK START OF LIFE RELATIONSHIP OK");
 
     // Process ID Label
     int processID = armnnUtils::Processes::GetCurrentId();
@@ -468,7 +467,7 @@
     ss << processID;
     std::string processIdLabel = ss.str();
     VerifyTimelineLabelBinaryPacketData(EmptyOptional(), processIdLabel, readableData, offset);
-    BOOST_TEST_MESSAGE("PROCESS ID LABEL OK");
+    MESSAGE("PROCESS ID LABEL OK");
 
     // Entity - Process ID relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -478,16 +477,16 @@
                                                LabelsAndEventClasses::PROCESS_ID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK PROCESS ID RELATIONSHIP OK");
+    MESSAGE("NETWORK PROCESS ID RELATIONSHIP OK");
 
     // Input layer
     // Input layer entity
     VerifyTimelineEntityBinaryPacketData(input->GetGuid(), readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT ENTITY OK");
+    MESSAGE("INPUT ENTITY OK");
 
     // Name Entity
     ProfilingGuid inputLabelGuid = VerifyTimelineLabelBinaryPacketData(EmptyOptional(), "input", readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT NAME LABEL OK");
+    MESSAGE("INPUT NAME LABEL OK");
 
     // Entity - Name relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -497,7 +496,7 @@
                                                LabelsAndEventClasses::NAME_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT NAME RELATIONSHIP OK");
+    MESSAGE("INPUT NAME RELATIONSHIP OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -507,7 +506,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT TYPE RELATIONSHIP OK");
+    MESSAGE("INPUT TYPE RELATIONSHIP OK");
 
     // Network - Input layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -517,17 +516,17 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - INPUT CHILD RELATIONSHIP OK");
+    MESSAGE("NETWORK - INPUT CHILD RELATIONSHIP OK");
 
     // Normalization layer
     // Normalization layer entity
     VerifyTimelineEntityBinaryPacketData(normalize->GetGuid(), readableData, offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION LAYER ENTITY OK");
+    MESSAGE("NORMALIZATION LAYER ENTITY OK");
 
     // Name entity
     ProfilingGuid normalizationLayerNameGuid = VerifyTimelineLabelBinaryPacketData(
         EmptyOptional(), "normalization", readableData, offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION LAYER NAME LABEL OK");
+    MESSAGE("NORMALIZATION LAYER NAME LABEL OK");
 
     // Entity - Name relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -537,7 +536,7 @@
                                                LabelsAndEventClasses::NAME_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION LAYER NAME RELATIONSHIP OK");
+    MESSAGE("NORMALIZATION LAYER NAME RELATIONSHIP OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -547,7 +546,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION LAYER TYPE RELATIONSHIP OK");
+    MESSAGE("NORMALIZATION LAYER TYPE RELATIONSHIP OK");
 
     // Network - Normalize layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -557,7 +556,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - NORMALIZATION LAYER CHILD RELATIONSHIP OK");
+    MESSAGE("NETWORK - NORMALIZATION LAYER CHILD RELATIONSHIP OK");
 
     // Input layer - Normalize layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -567,13 +566,13 @@
                                                LabelsAndEventClasses::CONNECTION_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT - NORMALIZATION LAYER CONNECTION OK");
+    MESSAGE("INPUT - NORMALIZATION LAYER CONNECTION OK");
 
     // Normalization workload
     // Normalization workload entity
     ProfilingGuid normalizationWorkloadGuid = VerifyTimelineEntityBinaryPacketData(
         EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD ENTITY OK");
+    MESSAGE("NORMALIZATION WORKLOAD ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -583,12 +582,12 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD TYPE RELATIONSHIP OK");
+    MESSAGE("NORMALIZATION WORKLOAD TYPE RELATIONSHIP OK");
 
     // BackendId entity
     ProfilingGuid cpuRefLabelGuid = VerifyTimelineLabelBinaryPacketData(
         EmptyOptional(), "CpuRef", readableData, offset);
-    BOOST_TEST_MESSAGE("CPUREF LABEL OK");
+    MESSAGE("CPUREF LABEL OK");
 
     // Entity - BackendId relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -598,7 +597,7 @@
                                                LabelsAndEventClasses::BACKENDID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD BACKEND ID RELATIONSHIP OK");
+    MESSAGE("NORMALIZATION WORKLOAD BACKEND ID RELATIONSHIP OK");
 
     // Normalize layer - Normalize workload relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -608,17 +607,17 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION LAYER - WORKLOAD CHILD RELATIONSHIP OK");
+    MESSAGE("NORMALIZATION LAYER - WORKLOAD CHILD RELATIONSHIP OK");
 
     // Output layer
     // Output layer entity
     VerifyTimelineEntityBinaryPacketData(output->GetGuid(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT LAYER ENTITY OK");
+    MESSAGE("OUTPUT LAYER ENTITY OK");
 
     // Name entity
     ProfilingGuid outputLabelGuid = VerifyTimelineLabelBinaryPacketData(
         EmptyOptional(), "output", readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT LAYER NAME LABEL OK");
+    MESSAGE("OUTPUT LAYER NAME LABEL OK");
 
     // Entity - Name relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -628,7 +627,7 @@
                                                LabelsAndEventClasses::NAME_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT LAYER NAME RELATIONSHIP OK");
+    MESSAGE("OUTPUT LAYER NAME RELATIONSHIP OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -638,7 +637,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT LAYER TYPE RELATIONSHIP OK");
+    MESSAGE("OUTPUT LAYER TYPE RELATIONSHIP OK");
 
     // Network - Output layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -648,7 +647,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - OUTPUT LAYER CHILD RELATIONSHIP OK");
+    MESSAGE("NETWORK - OUTPUT LAYER CHILD RELATIONSHIP OK");
 
     // Normalize layer - Output layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -658,7 +657,7 @@
                                                LabelsAndEventClasses::CONNECTION_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZE LAYER - OUTPUT LAYER CONNECTION OK");
+    MESSAGE("NORMALIZE LAYER - OUTPUT LAYER CONNECTION OK");
 
     bufferManager.MarkRead(readableBuffer);
 
@@ -680,33 +679,33 @@
 
     // Get readable buffer for input workload
     auto  inputReadableBuffer = bufferManager.GetReadableBuffer();
-    BOOST_CHECK(inputReadableBuffer != nullptr);
+    CHECK(inputReadableBuffer != nullptr);
 
     // Get readable buffer for output workload
     auto outputReadableBuffer = bufferManager.GetReadableBuffer();
-    BOOST_CHECK(outputReadableBuffer != nullptr);
+    CHECK(outputReadableBuffer != nullptr);
 
     // Get readable buffer for inference timeline
     auto inferenceReadableBuffer = bufferManager.GetReadableBuffer();
-    BOOST_CHECK(inferenceReadableBuffer != nullptr);
+    CHECK(inferenceReadableBuffer != nullptr);
 
     // Validate input workload data
     size = inputReadableBuffer->GetSize();
-    BOOST_CHECK(size == 164);
+    CHECK(size == 164);
 
     readableData = inputReadableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     offset = 0;
 
     // Verify Header
     VerifyTimelineHeaderBinary(readableData, offset, 156);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD HEADER OK");
+    MESSAGE("INPUT WORKLOAD HEADER OK");
 
     // Input workload
     // Input workload entity
     ProfilingGuid inputWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD ENTITY OK");
+    MESSAGE("INPUT WORKLOAD ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -716,12 +715,12 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD TYPE RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD TYPE RELATIONSHIP OK");
 
     // BackendId entity
     ProfilingGuid CpuRefLabelGuid = VerifyTimelineLabelBinaryPacketData(
         EmptyOptional(), "CpuRef", readableData, offset);
-    BOOST_TEST_MESSAGE("CPUREF LABEL OK (INPUT WORKLOAD)");
+    MESSAGE("CPUREF LABEL OK (INPUT WORKLOAD)");
 
     // Entity - BackendId relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -731,7 +730,7 @@
                                                LabelsAndEventClasses::BACKENDID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
 
     // Input layer - Input workload relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -741,27 +740,27 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT LAYER - INPUT WORKLOAD CHILD RELATIONSHIP OK");
+    MESSAGE("INPUT LAYER - INPUT WORKLOAD CHILD RELATIONSHIP OK");
 
     bufferManager.MarkRead(inputReadableBuffer);
 
     // Validate output workload data
     size = outputReadableBuffer->GetSize();
-    BOOST_CHECK(size == 164);
+    CHECK(size == 164);
 
     readableData = outputReadableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     offset = 0;
 
     // Verify Header
     VerifyTimelineHeaderBinary(readableData, offset, 156);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD HEADER OK");
+    MESSAGE("OUTPUT WORKLOAD HEADER OK");
 
     // Output workload
     // Output workload entity
     ProfilingGuid outputWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD ENTITY OK");
+    MESSAGE("OUTPUT WORKLOAD ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -771,11 +770,11 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD TYPE RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD TYPE RELATIONSHIP OK");
 
     // BackendId entity
     VerifyTimelineLabelBinaryPacketData(EmptyOptional(), "CpuRef", readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD CPU REF LABEL OK");
+    MESSAGE("OUTPUT WORKLOAD CPU REF LABEL OK");
 
     // Entity - BackendId relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -785,7 +784,7 @@
                                                LabelsAndEventClasses::BACKENDID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
 
     // Output layer - Output workload relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -795,27 +794,27 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT LAYER - OUTPUT WORKLOAD CHILD RELATIONSHIP OK");
+    MESSAGE("OUTPUT LAYER - OUTPUT WORKLOAD CHILD RELATIONSHIP OK");
 
     bufferManager.MarkRead(outputReadableBuffer);
 
     // Validate inference data
     size = inferenceReadableBuffer->GetSize();
-    BOOST_CHECK(size == 976 + 8 * ThreadIdSize);
+    CHECK(size == 976 + 8 * ThreadIdSize);
 
     readableData = inferenceReadableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     offset = 0;
 
     // Verify Header
     VerifyTimelineHeaderBinary(readableData, offset, 968 + 8 * ThreadIdSize);
-    BOOST_TEST_MESSAGE("INFERENCE HEADER OK");
+    MESSAGE("INFERENCE HEADER OK");
 
     // Inference timeline trace
     // Inference entity
     ProfilingGuid inferenceGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INFERENCE ENTITY OK");
+    MESSAGE("INFERENCE ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -825,7 +824,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE TYPE RELATIONSHIP OK");
+    MESSAGE("INFERENCE TYPE RELATIONSHIP OK");
 
     // Network - Inference relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -835,13 +834,13 @@
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - INFERENCE EXECUTION_OF RELATIONSHIP OK");
+    MESSAGE("NETWORK - INFERENCE EXECUTION_OF RELATIONSHIP OK");
 
     // Start Inference life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid inferenceEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INFERENCE START OF LIFE EVENT OK");
+    MESSAGE("INFERENCE START OF LIFE EVENT OK");
 
     // Inference - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -851,14 +850,14 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE START OF LIFE RELATIONSHIP OK");
+    MESSAGE("INFERENCE START OF LIFE RELATIONSHIP OK");
 
     // Execution
     // Input workload execution
     // Input workload execution entity
     ProfilingGuid inputWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
         EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION ENTITY OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -868,7 +867,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
 
     // Inference - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -878,7 +877,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE - INPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+    MESSAGE("INFERENCE - INPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
 
     // Workload - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -888,7 +887,7 @@
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD - INPUT WORKLOAD EXECUTION RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD - INPUT WORKLOAD EXECUTION RELATIONSHIP OK");
 
     // Start Input workload execution life
     // Event packet - timeline, threadId, eventGuid
@@ -903,7 +902,7 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
 
     // End of Input workload execution life
     // Event packet - timeline, threadId, eventGuid
@@ -918,13 +917,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
 
     // Normalize workload execution
     // Normalize workload execution entity
     ProfilingGuid normalizeWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
         EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("NORMALIZE WORKLOAD EXECUTION ENTITY OK");
+    MESSAGE("NORMALIZE WORKLOAD EXECUTION ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -934,7 +933,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZE WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+    MESSAGE("NORMALIZE WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
 
     // Inference - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -944,7 +943,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE - NORMALIZE WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+    MESSAGE("INFERENCE - NORMALIZE WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
 
     // Workload - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -954,13 +953,13 @@
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD - NORMALIZATION WORKLOAD EXECUTION RELATIONSHIP OK");
+    MESSAGE("NORMALIZATION WORKLOAD - NORMALIZATION WORKLOAD EXECUTION RELATIONSHIP OK");
 
     // Start Normalize workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid normalizationWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD EXECUTION START OF LIFE EVENT OK");
+    MESSAGE("NORMALIZATION WORKLOAD EXECUTION START OF LIFE EVENT OK");
 
     // Normalize workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -970,13 +969,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
+    MESSAGE("NORMALIZATION WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
 
     // End of Normalize workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid normalizationWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD EXECUTION END OF LIFE EVENT OK");
+    MESSAGE("NORMALIZATION WORKLOAD EXECUTION END OF LIFE EVENT OK");
 
     // Normalize workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -986,13 +985,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NORMALIZATION WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
+    MESSAGE("NORMALIZATION WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
 
     // Output workload execution
     // Output workload execution entity
     ProfilingGuid outputWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
         EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION ENTITY OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -1002,7 +1001,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
 
     // Inference - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1012,7 +1011,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE - OUTPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+    MESSAGE("INFERENCE - OUTPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
 
     // Workload - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1022,13 +1021,13 @@
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-     BOOST_TEST_MESSAGE("OUTPUT WORKLOAD - OUTPUT WORKLOAD EXECUTION EXECUTION_OF RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD - OUTPUT WORKLOAD EXECUTION EXECUTION_OF RELATIONSHIP OK");
 
     // Start Output workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid outputWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION START OF LIFE EVENT OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION START OF LIFE EVENT OK");
 
     // Output workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1038,13 +1037,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
 
     // End of Normalize workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid outputWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION END OF LIFE EVENT OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION END OF LIFE EVENT OK");
 
     // Output workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1054,13 +1053,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
 
     // End of Inference life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid inferenceEOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INFERENCE END OF LIFE EVENT OK");
+    MESSAGE("INFERENCE END OF LIFE EVENT OK");
 
     // Inference - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1070,14 +1069,14 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE - END OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("INFERENCE - END OF LIFE EVENT RELATIONSHIP OK");
 
     bufferManager.MarkRead(inferenceReadableBuffer);
 }
 
-BOOST_AUTO_TEST_CASE(ProfilingPostOptimisationStructureCpuRef)
+TEST_CASE("ProfilingPostOptimisationStructureCpuRef")
 {
     VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuRef);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index fa3f400..8abcfd7 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -3,7 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <armnn/Tensor.hpp>
 #include <Graph.hpp>
@@ -14,7 +14,8 @@
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(ShapeInferenceTests)
+TEST_SUITE("ShapeInferenceTests")
+{
 using namespace armnn;
 namespace
 {
@@ -72,7 +73,7 @@
 
         for (unsigned int i = 0; i < outputSize; ++i)
         {
-            BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
+            CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
         }
     };
 
@@ -84,14 +85,14 @@
 
     layer->SetShapeInferenceMethod(ShapeInferenceMethod::ValidateOnly);
 
-    BOOST_CHECK_THROW(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
+    CHECK_THROWS_AS(layer->ValidateTensorShapesFromInputs(), LayerValidationException);
 
     layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
     layer->ValidateTensorShapesFromInputs();
 
     for (unsigned int i = 0; i < outputSize; ++i)
     {
-        BOOST_CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
+        CHECK(layer->GetOutputSlot(i).GetTensorInfo().GetShape() == expectedOutputShapes[i]);
     }
 
     // Test inference with Dimensionality::Specified and various combinations of dimensions of unknown size
@@ -116,7 +117,7 @@
     RunShapeInferenceTest<LayerT>(layer, dimensionSizeLists);
 }
 
-BOOST_AUTO_TEST_CASE(NetworkOptionsTest)
+TEST_CASE("NetworkOptionsTest")
 {
      BackendOptions ShapeInferenceMethodOption("ShapeInferenceMethod",
      {
@@ -136,9 +137,9 @@
     inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
     activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
 
-    BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+    CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
 
-    BOOST_CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
+    CHECK(activationLayer->GetOutputSlot(0).GetTensorInfo() == tensorInfo);
 
 
     ShapeInferenceMethodOption = BackendOptions("ShapeInferenceMethod",
@@ -156,7 +157,7 @@
     inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
     activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
 
-    BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+    CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
 
     network = INetwork::Create();
 
@@ -168,22 +169,22 @@
     inputLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
     activationLayer->GetOutputSlot(0).SetTensorInfo({TensorShape{Dimensionality::NotSpecified}, DataType::Float32});
 
-    BOOST_CHECK_NO_THROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
+    CHECK_NOTHROW(activationLayer->GetOutputSlot(0).IsTensorInfoSet());
 }
 
-BOOST_AUTO_TEST_CASE(AbsTest)
+TEST_CASE("AbsTest")
 {
     ActivationDescriptor descriptor;
     descriptor.m_Function = ActivationFunction::Abs;
     CreateGraphAndRunTest<ActivationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "activation");
 }
 
-BOOST_AUTO_TEST_CASE(AdditionTest)
+TEST_CASE("AdditionTest")
 {
     CreateGraphAndRunTest<AdditionLayer>({{ 5, 7, 6, 2 }, { 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "add");
 }
 
-BOOST_AUTO_TEST_CASE(ArgMinMaxTest)
+TEST_CASE("ArgMinMaxTest")
 {
     armnn::ArgMinMaxDescriptor descriptor;
     descriptor.m_Function = ArgMinMaxFunction::Min;
@@ -192,13 +193,13 @@
     CreateGraphAndRunTest<ArgMinMaxLayer>({{ 1, 3, 2, 4 }}, {{ 1, 2, 4 }}, descriptor, "argMinMax");
 }
 
-BOOST_AUTO_TEST_CASE(BatchNormalizationTest)
+TEST_CASE("BatchNormalizationTest")
 {
     BatchNormalizationDescriptor descriptor;
     CreateGraphAndRunTest<BatchNormalizationLayer>({{ 1, 2, 3, 2 }}, {{ 1, 2, 3, 2 }}, descriptor, "batchNorm");
 }
 
-BOOST_AUTO_TEST_CASE(BatchToSpaceNdTest)
+TEST_CASE("BatchToSpaceNdTest")
 {
     BatchToSpaceNdDescriptor descriptor;
 
@@ -212,7 +213,7 @@
     CreateGraphAndRunTest<BatchToSpaceNdLayer>({{ 4, 2, 2, 1 }}, {{ 1, 4, 4, 1 }}, descriptor, "batchtospacend");
 }
 
-BOOST_AUTO_TEST_CASE(ComparisionTest)
+TEST_CASE("ComparisionTest")
 {
     ComparisonDescriptor descriptor;
     descriptor.m_Operation = ComparisonOperation::Equal;
@@ -222,7 +223,7 @@
                                            "comparision");
 }
 
-BOOST_AUTO_TEST_CASE(ConcatTest)
+TEST_CASE("ConcatTest")
 {
     ConcatDescriptor descriptor(2, 3);
 
@@ -232,7 +233,7 @@
     CreateGraphAndRunTest<ConcatLayer>({{ 1, 2, 1 }, { 1, 2, 1 }}, {{ 2, 2, 1 }}, descriptor, "concat");
 }
 
-BOOST_AUTO_TEST_CASE(ConstantTesst)
+TEST_CASE("ConstantTesst")
 {
     Graph graph;
     TensorShape outputShape{ 1, 1, 3, 3 };
@@ -246,31 +247,31 @@
 
     layer->ValidateTensorShapesFromInputs();
 
-    BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
+    CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == outputShape);
 }
 
-BOOST_AUTO_TEST_CASE(ConvertBf16ToFp32Test)
+TEST_CASE("ConvertBf16ToFp32Test")
 {
     CreateGraphAndRunTest<ConvertBf16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
 }
 
-BOOST_AUTO_TEST_CASE(ConvertFp16ToBf16Test)
+TEST_CASE("ConvertFp16ToBf16Test")
 {
     const TensorShape tensorShape{5, 7, 6, 2};
     CreateGraphAndRunTest<ConvertFp32ToBf16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
 }
 
-BOOST_AUTO_TEST_CASE(ConvertFp16ToFp32Test)
+TEST_CASE("ConvertFp16ToFp32Test")
 {
     CreateGraphAndRunTest<ConvertFp16ToFp32Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
 }
 
-BOOST_AUTO_TEST_CASE(ConvertFp32ToFp16Test)
+TEST_CASE("ConvertFp32ToFp16Test")
 {
     CreateGraphAndRunTest<ConvertFp32ToFp16Layer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
 }
 
-BOOST_AUTO_TEST_CASE(Convolution2dTest)
+TEST_CASE("Convolution2dTest")
 {
     const TensorShape inputShape{1, 1, 10, 10};
 
@@ -299,13 +300,13 @@
     RunShapeInferenceTest<Convolution2dLayer>(layer, {{ 1, 1, 4, 4 }});
 }
 
-BOOST_AUTO_TEST_CASE(DebugLayerTest)
+TEST_CASE("DebugLayerTest")
 {
     const TensorShape tensorShape;
     CreateGraphAndRunTest<DebugLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "debug");
 }
 
-BOOST_AUTO_TEST_CASE(DepthToSpaceTest)
+TEST_CASE("DepthToSpaceTest")
 {
     DepthToSpaceDescriptor descriptor;
 
@@ -315,7 +316,7 @@
     CreateGraphAndRunTest<DepthToSpaceLayer>({{ 1, 1, 1, 8}}, {{ 1, 2, 2, 2 }}, descriptor, "depthtospace");
 }
 
-BOOST_AUTO_TEST_CASE(DepthwiseConvolutionTest)
+TEST_CASE("DepthwiseConvolutionTest")
 {
     DepthwiseConvolution2dDescriptor descriptor;
 
@@ -344,13 +345,13 @@
     RunShapeInferenceTest<DepthwiseConvolution2dLayer>(layer, {{ 8, 18, 1, 2 }});
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeTest)
+TEST_CASE("DequantizeTest")
 {
     const TensorShape tensorShape{5, 7, 6, 2};
     CreateGraphAndRunTest<DequantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "dequantize");
 }
 
-BOOST_AUTO_TEST_CASE(DetectionPostProcessTest)
+TEST_CASE("DetectionPostProcessTest")
 {
     const TensorShape detectionBoxesInfo{ 1, 3, 4 };
     const TensorShape detectionScoresInfo{ 1, 3, 4 };
@@ -384,7 +385,7 @@
     RunShapeInferenceTest<DetectionPostProcessLayer>(layer, {{ 1, 3, 4 }, { 1, 3  }, { 1, 3 }, { 1 }});
 }
 
-BOOST_AUTO_TEST_CASE(FakeQuantizationTest)
+TEST_CASE("FakeQuantizationTest")
 {
     FakeQuantizationDescriptor descriptor;
     descriptor.m_Max = 1;
@@ -392,13 +393,13 @@
     CreateGraphAndRunTest<FakeQuantizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, descriptor, "fakequantization");
 }
 
-BOOST_AUTO_TEST_CASE(FloorTest)
+TEST_CASE("FloorTest")
 {
     const TensorShape tensorShape{5, 7, 6, 2};
     CreateGraphAndRunTest<FloorLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "floor");
 }
 
-BOOST_AUTO_TEST_CASE(FullyConnectedTest)
+TEST_CASE("FullyConnectedTest")
 {
     Graph graph;
 
@@ -420,12 +421,12 @@
     RunShapeInferenceTest<FullyConnectedLayer>(layer, {{ 1, outputChannels }});
 }
 
-BOOST_AUTO_TEST_CASE(GatherTest)
+TEST_CASE("GatherTest")
 {
     CreateGraphAndRunTest<GatherLayer>({{ 7, 6, 2}, {2,3}}, {{ 2, 3, 6, 2 }}, GatherDescriptor(), "gather");
 }
 
-BOOST_AUTO_TEST_CASE(InstanceNormalizationTest)
+TEST_CASE("InstanceNormalizationTest")
 {
     const TensorShape tensorShape{5, 7, 6, 2};
 
@@ -434,7 +435,7 @@
                                                       "instancenorm");
 }
 
-BOOST_AUTO_TEST_CASE(L2NormalizationTest)
+TEST_CASE("L2NormalizationTest")
 {
     const TensorShape tensorShape{5, 7, 6, 2};
 
@@ -443,14 +444,14 @@
                                                 "l2norm");
 }
 
-BOOST_AUTO_TEST_CASE(LogSoftMaxTest)
+TEST_CASE("LogSoftMaxTest")
 {
     const TensorShape tensorShape{5, 7, 6, 2};
 
     CreateGraphAndRunTest<LogSoftmaxLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, LogSoftmaxDescriptor(), "logsoftmax");
 }
 
-BOOST_AUTO_TEST_CASE(LstmTest)
+TEST_CASE("LstmTest")
 {
     const TensorShape inputShape{2, 5};
     const TensorShape inputCellState{2, 20};
@@ -485,7 +486,7 @@
     RunShapeInferenceTest<LstmLayer>(layer, {{2, 80}, {2, 20}, {2, 20}, {2, 20}});
 }
 
-BOOST_AUTO_TEST_CASE(MeanLayerTest)
+TEST_CASE("MeanLayerTest")
 {
     MeanDescriptor descriptor;
     descriptor.m_Axis = {0};
@@ -493,30 +494,30 @@
     CreateGraphAndRunTest<MeanLayer>({{ 5, 7, 6, 2 }}, {{ 7, 6, 2 }}, descriptor, "mean");
 }
 
-BOOST_AUTO_TEST_CASE(MemCopyTest)
+TEST_CASE("MemCopyTest")
 {
     CreateGraphAndRunTest<MemCopyLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memcopy");
 }
 
-BOOST_AUTO_TEST_CASE(MemImportTest)
+TEST_CASE("MemImportTest")
 {
     CreateGraphAndRunTest<MemImportLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "memomport");
 }
 
-BOOST_AUTO_TEST_CASE(MergeTest)
+TEST_CASE("MergeTest")
 {
     const TensorShape tensorShape{ 5, 7, 6, 2 };
     CreateGraphAndRunTest<MergeLayer>({ { 5, 7, 6, 2 }, { 5, 7, 6, 2 } }, {{ 5, 7, 6, 2 }}, "merge");
 }
 
-BOOST_AUTO_TEST_CASE(NormalizationTest)
+TEST_CASE("NormalizationTest")
 {
     const TensorShape tensorShape{5, 7, 6, 2};
 
     CreateGraphAndRunTest<NormalizationLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, NormalizationDescriptor(), "l2norm");
 }
 
-BOOST_AUTO_TEST_CASE(PermuteTest)
+TEST_CASE("PermuteTest")
 {
     PermuteDescriptor descriptor;
     descriptor.m_DimMappings = {0U, 2U, 3U, 1U};
@@ -524,7 +525,7 @@
     CreateGraphAndRunTest<PermuteLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "permute");
 }
 
-BOOST_AUTO_TEST_CASE(Pooling2dTest)
+TEST_CASE("Pooling2dTest")
 {
     armnn::Pooling2dDescriptor descriptor;
     descriptor.m_PoolType = armnn::PoolingAlgorithm::Max;
@@ -539,7 +540,7 @@
     CreateGraphAndRunTest<Pooling2dLayer>({{ 1, 2, 8, 13 }}, {{ 1, 2, 2, 8 }}, descriptor, "pooling2d");
 }
 
-BOOST_AUTO_TEST_CASE(QLstmTest)
+TEST_CASE("QLstmTest")
 {
     const TensorShape inputShape{2, 5};
     const TensorShape inputCellState{2, 20};
@@ -573,7 +574,7 @@
     RunShapeInferenceTest<QLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
 }
 
-BOOST_AUTO_TEST_CASE(QuantizedLstmTest)
+TEST_CASE("QuantizedLstmTest")
 {
     const TensorShape inputShape{2, 5};
     const TensorShape inputCellState{2, 20};
@@ -601,13 +602,13 @@
     RunShapeInferenceTest<QuantizedLstmLayer>(layer, {{2, 20}, {2, 20}, {2, 20}});
 }
 
-BOOST_AUTO_TEST_CASE(QuantizeTest)
+TEST_CASE("QuantizeTest")
 {
     const TensorShape tensorShape { 5, 4, 7, 6 };
     CreateGraphAndRunTest<QuantizeLayer>({{ 5, 7, 6, 2 }}, {{ 5, 7, 6, 2 }}, "mean");
 }
 
-BOOST_AUTO_TEST_CASE(RankTest)
+TEST_CASE("RankTest")
 {
    // due to rank having a scalar output we need a custom test
    const TensorShape expectedOutputs(Dimensionality::Scalar);
@@ -617,23 +618,23 @@
 
    layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::NotSpecified), DataType::Float32});
 
-   BOOST_CHECK_THROW(
+   CHECK_THROWS_AS(
            layer->ValidateTensorShapesFromInputs(), LayerValidationException);
 
    layer->SetShapeInferenceMethod(ShapeInferenceMethod::InferAndValidate);
 
     layer->ValidateTensorShapesFromInputs();
 
-   BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
+   CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
 
    layer->GetOutputSlot(0).SetTensorInfo({TensorShape(Dimensionality::Scalar), DataType::Float32});
 
     layer->ValidateTensorShapesFromInputs();
 
-   BOOST_CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
+   CHECK(layer->GetOutputSlot(0).GetTensorInfo().GetShape() == expectedOutputs);
 }
 
-BOOST_AUTO_TEST_CASE(ReshapeTest)
+TEST_CASE("ReshapeTest")
 {
     ReshapeDescriptor descriptor;
 
@@ -642,7 +643,7 @@
     CreateGraphAndRunTest<ReshapeLayer>({{ 2, 2, 2, 2 }}, {{ 1, 1, 1, 8 }}, descriptor, "reshape");
 }
 
-BOOST_AUTO_TEST_CASE(ResizeTest)
+TEST_CASE("ResizeTest")
 {
     ResizeDescriptor descriptor;
 
@@ -652,7 +653,7 @@
     CreateGraphAndRunTest<ResizeLayer>({{ 1, 7, 6, 2 }}, {{ 1, 7, 6, 2 }}, descriptor, "resize");
 }
 
-BOOST_AUTO_TEST_CASE(SliceTest)
+TEST_CASE("SliceTest")
 {
     SliceDescriptor descriptor;
     descriptor.m_Begin  = { 1, 0, 1, 2 };
@@ -661,7 +662,7 @@
     CreateGraphAndRunTest<SliceLayer>({{ 3, 2, 3, 5 }}, {{ 2, 1, 2, 3 }}, descriptor, "mean");
 }
 
-BOOST_AUTO_TEST_CASE(SpaceToBatchNdTest)
+TEST_CASE("SpaceToBatchNdTest")
 {
     SpaceToBatchNdDescriptor descriptor;
 
@@ -675,7 +676,7 @@
     CreateGraphAndRunTest<SpaceToBatchNdLayer>({{ 1, 4, 4, 1 }}, {{ 4, 2, 2, 1 }}, descriptor, "spacetobatchnd");
 }
 
-BOOST_AUTO_TEST_CASE(SpaceToDepth)
+TEST_CASE("SpaceToDepth")
 {
     SpaceToDepthDescriptor descriptor;
 
@@ -685,7 +686,7 @@
     CreateGraphAndRunTest<SpaceToDepthLayer>({{ 1, 2, 2, 2 }}, {{ 1, 1, 1, 8}}, descriptor, "spacetodepth");
 }
 
-BOOST_AUTO_TEST_CASE(SplitterTest)
+TEST_CASE("SplitterTest")
 {
     SplitterDescriptor descriptor(2, 3);
 
@@ -700,7 +701,7 @@
     CreateGraphAndRunTest<SplitterLayer>({{ 2, 2, 2 }}, {{ 1, 2, 2 }, { 1, 2, 2 }}, descriptor, "splitter");
 }
 
-BOOST_AUTO_TEST_CASE(StackTest)
+TEST_CASE("StackTest")
 {
     StackDescriptor descriptor;
 
@@ -711,7 +712,7 @@
     CreateGraphAndRunTest<StackLayer>({{ 3, 2, 3 }, { 3, 2, 3 }}, {{ 2, 3, 2, 3 }}, descriptor, "stack");
 }
 
-BOOST_AUTO_TEST_CASE(StridedSliceTest)
+TEST_CASE("StridedSliceTest")
 {
     StridedSliceDescriptor descriptor;
 
@@ -722,12 +723,12 @@
     CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "stridedslice");
 }
 
-BOOST_AUTO_TEST_CASE(Switchtest)
+TEST_CASE("Switchtest")
 {
     CreateGraphAndRunTest<SwitchLayer>({{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, {{ 3, 2, 3, 1 }, { 3, 2, 3, 1 }}, "switch");
 }
 
-BOOST_AUTO_TEST_CASE(TransposeConvolution2dTest)
+TEST_CASE("TransposeConvolution2dTest")
 {
     StridedSliceDescriptor descriptor;
 
@@ -738,7 +739,7 @@
     CreateGraphAndRunTest<StridedSliceLayer>({{ 3, 2, 3, 1 }}, {{ 2, 1, 2, 1 }}, descriptor, "t");
 }
 
-BOOST_AUTO_TEST_CASE(TransposeTest)
+TEST_CASE("TransposeTest")
 {
     armnn::TransposeDescriptor descriptor;
     descriptor.m_DimMappings = {0U, 3U, 1U, 2U};
@@ -746,5 +747,5 @@
     CreateGraphAndRunTest<TransposeLayer>({{ 1, 2, 2, 3 }}, {{ 1, 3, 2, 2 }}, descriptor, "stridedslice");
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 }
\ No newline at end of file
diff --git a/src/armnn/test/SubgraphViewTests.cpp b/src/armnn/test/SubgraphViewTests.cpp
index ecb876d..d270787 100644
--- a/src/armnn/test/SubgraphViewTests.cpp
+++ b/src/armnn/test/SubgraphViewTests.cpp
@@ -2,7 +2,8 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
+#include <doctest/doctest.h>
 
 #include <Graph.hpp>
 #include <SubgraphView.hpp>
@@ -89,24 +90,21 @@
 template <typename T>
 void CompareVectors(const std::vector<T>& result, const std::vector<T>& expected)
 {
-    BOOST_CHECK_EQUAL_COLLECTIONS(result.begin(), result.end(), expected.begin(), expected.end());
+    CHECK(std::equal(result.begin(), result.end(), expected.begin(), expected.end()));
 }
 
 void CompareSubgraphViews(SubgraphViewSelector::SubgraphViewPtr& result,
                           SubgraphViewSelector::SubgraphViewPtr& expected)
 {
     // expect both to be valid subgraphs
-    BOOST_TEST((result.get() != nullptr));
-    BOOST_TEST((expected.get() != nullptr));
+    CHECK((result.get() != nullptr));
+    CHECK((expected.get() != nullptr));
 
     if (result.get() != nullptr && expected.get() != nullptr)
     {
-        // try to detect all other obvious errors too, mainly because here
-        // we can get a nicer error message from boost, the collection test
-        // also report error for these
-        BOOST_TEST(result->GetInputSlots().size() == expected->GetInputSlots().size());
-        BOOST_TEST(result->GetOutputSlots().size() == expected->GetOutputSlots().size());
-        BOOST_TEST(result->GetLayers().size() == expected->GetLayers().size());
+        CHECK(result->GetInputSlots().size() == expected->GetInputSlots().size());
+        CHECK(result->GetOutputSlots().size() == expected->GetOutputSlots().size());
+        CHECK(result->GetLayers().size() == expected->GetLayers().size());
 
         auto resultLayers = ToSortedArray<Layer *>(result->GetLayers().begin(),
                                                    result->GetLayers().end());
@@ -130,9 +128,9 @@
 
 } // namespace <anonymous>
 
-BOOST_AUTO_TEST_SUITE(SubgraphSubstitution)
-
-BOOST_AUTO_TEST_CASE(SingleInputSingleOutput)
+TEST_SUITE("SubgraphSubstitution")
+{
+TEST_CASE("SingleInputSingleOutput")
 {
     // Construct graph
     Graph graph;
@@ -166,11 +164,11 @@
     graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
 
     // Check that connections are correct after substitution
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn);
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
+    CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn);
+    CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
 }
 
-BOOST_AUTO_TEST_CASE(SingleInputSingleOutputSubstituteGraph)
+TEST_CASE("SingleInputSingleOutputSubstituteGraph")
 {
     // Construct graph
     Graph graph;
@@ -209,11 +207,11 @@
     graph.SubstituteSubgraph(*subgraph, *substituteSubgraph);
 
     // Check that connections are correct after substitution
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn);
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
+    CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn);
+    CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
 }
 
-BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
+TEST_CASE("MultiInputSingleOutput")
 {
     // Construct graph
     Graph graph;
@@ -258,13 +256,13 @@
     graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
 
     // Check that connections are correct after substitution
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(1).GetConnection(), subgraphInputConn2);
+    CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
+    CHECK_EQ(preCompiledLayer->GetInputSlot(1).GetConnection(), subgraphInputConn2);
 
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
+    CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn);
 }
 
-BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
+TEST_CASE("SingleInputMultiOutput")
 {
     // Construct graph
     Graph graph;
@@ -307,13 +305,13 @@
     graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
 
     // Check that connections are correct after substitution
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
+    CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
 
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
+    CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
+    CHECK_EQ(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
 }
 
-BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
+TEST_CASE("MultiInputMultiOutput")
 {
     // Construct graph
     Graph graph;
@@ -359,14 +357,14 @@
     graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
 
     // Check that connections are correct after substitution
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(1).GetConnection(), subgraphInputConn2);
+    CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
+    CHECK_EQ(preCompiledLayer->GetInputSlot(1).GetConnection(), subgraphInputConn2);
 
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
-    BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
+    CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
+    CHECK_EQ(preCompiledLayer->GetOutputSlot(1).GetConnection(0), subgraphOutputConn2);
 }
 
-BOOST_AUTO_TEST_CASE(EraseReplacedLayers)
+TEST_CASE("EraseReplacedLayers")
 {
     // Construct graph
     Graph graph;
@@ -404,24 +402,24 @@
     graph.SubstituteSubgraph(*subgraph, preCompiledLayer);
 
     // Check that the layers belonging to the sub-graph have been erased from the graph after substitution
-    BOOST_CHECK(!AreAnySubgraphLayersPresentInGraph(subgraphLayers, graph));
+    CHECK(!AreAnySubgraphLayersPresentInGraph(subgraphLayers, graph));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
-BOOST_AUTO_TEST_SUITE(SubgraphSelection)
-
-BOOST_AUTO_TEST_CASE(SubgraphForEmptyGraph)
+TEST_SUITE("SubgraphSelection")
+{
+TEST_CASE("SubgraphForEmptyGraph")
 {
     Graph graph;
     SubgraphView subgraph(graph);
 
-    BOOST_TEST(subgraph.GetInputSlots().empty());
-    BOOST_TEST(subgraph.GetOutputSlots().empty());
-    BOOST_TEST(subgraph.GetLayers().empty());
+    CHECK(subgraph.GetInputSlots().empty());
+    CHECK(subgraph.GetOutputSlots().empty());
+    CHECK(subgraph.GetLayers().empty());
 }
 
-BOOST_AUTO_TEST_CASE(SubgraphForEntireGraph)
+TEST_CASE("SubgraphForEntireGraph")
 {
     Graph graph;
 
@@ -436,12 +434,12 @@
 
     SubgraphView subgraph(graph);
 
-    BOOST_TEST(subgraph.GetInputSlots().empty());
-    BOOST_TEST(subgraph.GetOutputSlots().empty());
-    BOOST_TEST(subgraph.GetLayers().size() == graph.GetNumLayers());
+    CHECK(subgraph.GetInputSlots().empty());
+    CHECK(subgraph.GetOutputSlots().empty());
+    CHECK(subgraph.GetLayers().size() == graph.GetNumLayers());
 }
 
-BOOST_AUTO_TEST_CASE(NoSubgraphsForNoMatch)
+TEST_CASE("NoSubgraphsForNoMatch")
 {
     Graph graph;
 
@@ -451,10 +449,10 @@
     SubgraphViewSelector::Subgraphs subgraphs =
         SubgraphViewSelector::SelectSubgraphs(graph, [](const Layer &) { return false; });
 
-    BOOST_TEST(subgraphs.empty());
+    CHECK(subgraphs.empty());
 }
 
-BOOST_AUTO_TEST_CASE(OneSubgraphsSelectedASingleMatch)
+TEST_CASE("OneSubgraphsSelectedASingleMatch")
 {
     Graph graph;
 
@@ -471,7 +469,7 @@
                 return isOutput;
             });
 
-    BOOST_TEST(subgraphs.size() == 1);
+    CHECK(subgraphs.size() == 1);
     if (subgraphs.size() == 1)
     {
         auto expected = CreateSubgraphViewFrom(CreateInputsFrom({output}),
@@ -483,7 +481,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(MultipleLayersSelectedInTheMiddle)
+TEST_CASE("MultipleLayersSelectedInTheMiddle")
 {
     Graph graph;
 
@@ -506,7 +504,7 @@
                 return toSelect;
             });
 
-    BOOST_TEST(subgraphs.size() == 1);
+    CHECK(subgraphs.size() == 1);
     if (subgraphs.size() == 1)
     {
         auto expected = CreateSubgraphViewFrom(CreateInputsFrom({mid1}),
@@ -517,7 +515,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(DisjointGraphs)
+TEST_CASE("DisjointGraphs")
 {
     // The input graph has two disjoint sections and all layers are selected.
     // This should result in two subgraphs being produced.
@@ -542,11 +540,11 @@
     // expected results to test against
     auto expected1 = CreateSubgraphViewFrom({}, {}, { o0, n0, i0 });
     auto expected2 = CreateSubgraphViewFrom({}, {}, { o1, n1, i1 });
-    BOOST_TEST(subgraphs.size() == 2);
+    CHECK(subgraphs.size() == 2);
     if (subgraphs.size() == 2)
     {
-        BOOST_TEST((subgraphs[0] != nullptr));
-        BOOST_TEST((subgraphs[1] != nullptr));
+        CHECK((subgraphs[0] != nullptr));
+        CHECK((subgraphs[1] != nullptr));
         if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
         {
             if (std::find(subgraphs[0]->GetLayers().begin(), subgraphs[0]->GetLayers().end(), i0) !=
@@ -564,7 +562,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(IslandInTheMiddle)
+TEST_CASE("IslandInTheMiddle")
 {
     // This case represent the scenario when a non-selected X1 node placed in the middle
     // of the selected M* nodes.
@@ -629,12 +627,12 @@
             std::vector<OutputSlot*>{},
             { m5, m6 });
 
-    BOOST_TEST(subgraphs.size() == 2);
+    CHECK(subgraphs.size() == 2);
     if (subgraphs.size() == 2)
     {
         // we need to have valid subgraph pointers here
-        BOOST_TEST((subgraphs[0] != nullptr));
-        BOOST_TEST((subgraphs[1] != nullptr));
+        CHECK((subgraphs[0] != nullptr));
+        CHECK((subgraphs[1] != nullptr));
 
         if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
         {
@@ -646,8 +644,8 @@
             }
             );
 
-            BOOST_TEST(subgraphs[0]->GetLayers().size() == 2);
-            BOOST_TEST(subgraphs[1]->GetLayers().size() == 5);
+            CHECK(subgraphs[0]->GetLayers().size() == 2);
+            CHECK(subgraphs[1]->GetLayers().size() == 5);
 
             CompareSubgraphViews(subgraphs[0], smallerSubgraph);
             CompareSubgraphViews(subgraphs[1], largerSubgraph);
@@ -655,7 +653,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(MultipleSimpleSubgraphs)
+TEST_CASE("MultipleSimpleSubgraphs")
 {
     // This test case represents the scenario when we have two distinct subgraphs
     // in a simple linear network. The selected nodes are the M* and the
@@ -704,12 +702,12 @@
                                                   CreateOutputsFrom({m3}),
                                                   {m3});
 
-    BOOST_TEST(subgraphs.size() == 2);
+    CHECK(subgraphs.size() == 2);
     if (subgraphs.size() == 2)
     {
         // we need to have valid subgraph pointers here
-        BOOST_TEST((subgraphs[0] != nullptr));
-        BOOST_TEST((subgraphs[1] != nullptr));
+        CHECK((subgraphs[0] != nullptr));
+        CHECK((subgraphs[1] != nullptr));
 
         if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
         {
@@ -721,8 +719,8 @@
                 }
             );
 
-            BOOST_TEST(subgraphs[0]->GetLayers().size() == 1);
-            BOOST_TEST(subgraphs[1]->GetLayers().size() == 2);
+            CHECK(subgraphs[0]->GetLayers().size() == 1);
+            CHECK(subgraphs[1]->GetLayers().size() == 2);
 
             CompareSubgraphViews(subgraphs[0], smallerSubgraph);
             CompareSubgraphViews(subgraphs[1], largerSubgraph);
@@ -730,7 +728,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(SimpleLinearTest)
+TEST_CASE("SimpleLinearTest")
 {
     //X1 -> M1 -> M2 -> X2
     //Where the input slots of M1 and the output slots of M2 are to be the sub graph boundaries.
@@ -765,7 +763,7 @@
                         return toSelect;
                     });
 
-    BOOST_CHECK(subgraphs.size() == 1);
+    CHECK(subgraphs.size() == 1);
     if(subgraphs.size() == 1)
     {
         auto expected = CreateSubgraphViewFrom(CreateInputsFrom({layerM1}),
@@ -776,7 +774,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(MultiInputSingleOutput)
+TEST_CASE("MultiInputSingleOutput")
 {
     //X1 -> M1 -> M3 -> X3
     //X2 -> M2 -> M3 -> X3
@@ -820,7 +818,7 @@
                         return toSelect;
                     });
 
-    BOOST_CHECK(subgraphs.size() == 1);
+    CHECK(subgraphs.size() == 1);
     if (subgraphs.size() == 1)
     {
         auto expected = CreateSubgraphViewFrom(CreateInputsFrom({layerM1, layerM2}),
@@ -831,7 +829,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(SingleInputMultiOutput)
+TEST_CASE("SingleInputMultiOutput")
 {
     //X1 -> M1 -> M2 -> X2
     //X1 -> M1 -> M3 -> X3
@@ -876,7 +874,7 @@
                         return toSelect;
                     });
 
-    BOOST_CHECK(subgraphs.size() == 1);
+    CHECK(subgraphs.size() == 1);
     if(subgraphs.size() == 1)
     {
         auto expected = CreateSubgraphViewFrom(CreateInputsFrom({layerM1}),
@@ -887,7 +885,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(MultiInputMultiOutput)
+TEST_CASE("MultiInputMultiOutput")
 {
     // This case represents the scenario with multiple inputs and multiple outputs
     //
@@ -940,7 +938,7 @@
             });
 
 
-    BOOST_CHECK(subgraphs.size() == 1);
+    CHECK(subgraphs.size() == 1);
     if (subgraphs.size() == 1)
     {
         auto expected = CreateSubgraphViewFrom(CreateInputsFrom({m1, m2}),
@@ -951,7 +949,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(ValidMerge)
+TEST_CASE("ValidMerge")
 {
     // Checks that a node that has multiple choices for merge candidates (M3 in this case) correctly merges with the
     // one that it can (M0), and doesn't merge with the ones it can't (X2 and M2).
@@ -1001,12 +999,12 @@
         CreateOutputsFrom({ }),
         { m0, m3 });
 
-    BOOST_TEST(subgraphs.size() == 2);
+    CHECK(subgraphs.size() == 2);
     if (subgraphs.size() == 2)
     {
         // we need to have valid subgraph pointers here
-        BOOST_TEST((subgraphs[0] != nullptr));
-        BOOST_TEST((subgraphs[1] != nullptr));
+        CHECK((subgraphs[0] != nullptr));
+        CHECK((subgraphs[1] != nullptr));
 
         if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
         {
@@ -1024,7 +1022,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(PropagatedDependencies)
+TEST_CASE("PropagatedDependencies")
 {
     // Version of IslandInTheMiddle with longer chain
     // to make sure antecedents are propagated.
@@ -1095,13 +1093,13 @@
     auto smallerSubgraph =
         CreateSubgraphViewFrom(CreateInputsFrom({ m10 }), CreateOutputsFrom({ m10 }), { m10 });
 
-    BOOST_TEST(subgraphs.size() == 3);
+    CHECK(subgraphs.size() == 3);
     if (subgraphs.size() == 3)
     {
         // we need to have valid subgraph pointers here
-        BOOST_TEST((subgraphs[0] != nullptr));
-        BOOST_TEST((subgraphs[1] != nullptr));
-        BOOST_TEST((subgraphs[2] != nullptr));
+        CHECK((subgraphs[0] != nullptr));
+        CHECK((subgraphs[1] != nullptr));
+        CHECK((subgraphs[2] != nullptr));
 
         if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr && subgraphs[2].get() != nullptr)
         {
@@ -1120,7 +1118,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(Random)
+TEST_CASE("Random")
 {
     // Creates random networks, splits them into subgraphs and checks the resulting subgraphs obey the required
     // dependency rules. We can easily generate very large networks which helps cover corner cases the other
@@ -1319,7 +1317,7 @@
                     Layer* l = toProcess.front();
                     toProcess.pop();
 
-                    BOOST_CHECK(layerToSubgraph[l] != subgraph.get());
+                    CHECK(layerToSubgraph[l] != subgraph.get());
 
                     for (const InputSlot& is : l->GetInputSlots())
                     {
@@ -1331,11 +1329,11 @@
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
-BOOST_AUTO_TEST_SUITE(IntegrationTests)
-
-BOOST_AUTO_TEST_CASE(SingleSubgraph)
+TEST_SUITE("IntegrationTests")
+{
+TEST_CASE("SingleSubgraph")
 {
     // This test case represents the scenario when we have one subgraph
     // in which two layers have GpuAcc backend assigned
@@ -1368,18 +1366,18 @@
                 return toSelect;
             });
 
-    BOOST_TEST(subgraphs.size() == 1);
+    CHECK(subgraphs.size() == 1);
     if(subgraphs.size() == 1)
     {
-        BOOST_TEST((subgraphs[0] != nullptr));
+        CHECK((subgraphs[0] != nullptr));
 
         if (subgraphs[0].get() != nullptr)
         {
             unsigned int numInputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetInputSlots().size());
             unsigned int numOutputSlots = armnn::numeric_cast<unsigned int>(subgraphs[0]->GetOutputSlots().size());
 
-            BOOST_TEST((numInputSlots == 1));
-            BOOST_TEST((numOutputSlots == 1));
+            CHECK((numInputSlots == 1));
+            CHECK((numOutputSlots == 1));
 
             // Save sub-graph connections for comparison after substitution
             IOutputSlot* subgraphInputConn1 = subgraphs[0]->GetInputSlot(0)->GetConnection();
@@ -1393,14 +1391,14 @@
             graph.SubstituteSubgraph(*subgraphs[0], preCompiledLayer);
 
             // Check that connections are correct after substitution
-            BOOST_CHECK_EQUAL(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
+            CHECK_EQ(preCompiledLayer->GetInputSlot(0).GetConnection(), subgraphInputConn1);
 
-            BOOST_CHECK_EQUAL(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
+            CHECK_EQ(preCompiledLayer->GetOutputSlot(0).GetConnection(0), subgraphOutputConn1);
         }
     }
 }
 
-BOOST_AUTO_TEST_CASE(MultipleSubgraphs)
+TEST_CASE("MultipleSubgraphs")
 {
     // This test case represents the scenario when we have two subgraphs
     // in which two layers have CpuAcc backend assigned
@@ -1441,11 +1439,11 @@
                 return toSelect;
             });
 
-    BOOST_TEST(subgraphs.size() == 2);
+    CHECK(subgraphs.size() == 2);
     if(subgraphs.size() == 2)
     {
-        BOOST_TEST((subgraphs[0] != nullptr));
-        BOOST_TEST((subgraphs[1] != nullptr));
+        CHECK((subgraphs[0] != nullptr));
+        CHECK((subgraphs[1] != nullptr));
 
         if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
         {
@@ -1484,18 +1482,18 @@
             graph.SubstituteSubgraph(*subgraphs[1], preCompiledLayer2);
 
             // Check that connections are correct after substitution
-            BOOST_CHECK_EQUAL(preCompiledLayer1->GetInputSlot(0).GetConnection(), subgraph1InputConn);
-            BOOST_CHECK_EQUAL(preCompiledLayer1->GetOutputSlot(0).GetConnection(0), subgraph1OutputConn1);
-            BOOST_CHECK_EQUAL(preCompiledLayer1->GetOutputSlot(1).GetConnection(0), subgraph1OutputConn2);
+            CHECK_EQ(preCompiledLayer1->GetInputSlot(0).GetConnection(), subgraph1InputConn);
+            CHECK_EQ(preCompiledLayer1->GetOutputSlot(0).GetConnection(0), subgraph1OutputConn1);
+            CHECK_EQ(preCompiledLayer1->GetOutputSlot(1).GetConnection(0), subgraph1OutputConn2);
 
-            BOOST_CHECK_EQUAL(preCompiledLayer2->GetInputSlot(0).GetConnection(), subgraph2InputConn1);
-            BOOST_CHECK_EQUAL(preCompiledLayer2->GetInputSlot(1).GetConnection(), subgraph2InputConn2);
-            BOOST_CHECK_EQUAL(preCompiledLayer2->GetOutputSlot(0).GetConnection(0), subgraph2OutputConn);
+            CHECK_EQ(preCompiledLayer2->GetInputSlot(0).GetConnection(), subgraph2InputConn1);
+            CHECK_EQ(preCompiledLayer2->GetInputSlot(1).GetConnection(), subgraph2InputConn2);
+            CHECK_EQ(preCompiledLayer2->GetOutputSlot(0).GetConnection(0), subgraph2OutputConn);
         }
     }
 }
 
-BOOST_AUTO_TEST_CASE(SubgraphCycles)
+TEST_CASE("SubgraphCycles")
 {
     // This case represent the scenario when a naive split could lead to a cyclic dependency between two subgraphs
     //
@@ -1555,12 +1553,12 @@
                                                  CreateOutputsFrom({m2}),
                                                  {m2});
 
-    BOOST_TEST(subgraphs.size() == 2);
+    CHECK(subgraphs.size() == 2);
     if (subgraphs.size() == 2)
     {
         // we need to have valid subgraph pointers here
-        BOOST_TEST((subgraphs[0] != nullptr));
-        BOOST_TEST((subgraphs[1] != nullptr));
+        CHECK((subgraphs[0] != nullptr));
+        CHECK((subgraphs[1] != nullptr));
 
         if (subgraphs[0].get() != nullptr && subgraphs[1].get() != nullptr)
         {
@@ -1573,8 +1571,8 @@
             );
 
             // one subgraph needs to be size=1 and the other one is 4
-            BOOST_TEST(subgraphs[0]->GetLayers().size() == 1);
-            BOOST_TEST(subgraphs[1]->GetLayers().size() == 2);
+            CHECK(subgraphs[0]->GetLayers().size() == 1);
+            CHECK(subgraphs[1]->GetLayers().size() == 2);
 
             CompareSubgraphViews(subgraphs[0], outputSubgraph);
             CompareSubgraphViews(subgraphs[1], inputSubgraph);
@@ -1582,7 +1580,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(SubgraphOrder)
+TEST_CASE("SubgraphOrder")
 {
     Graph graph;
 
@@ -1603,10 +1601,10 @@
     LayerType expectedSorted[] = {LayerType::Input, LayerType::Activation, LayerType::Output};
     view->ForEachLayer([&idx, &expectedSorted](const Layer* l)
         {
-            BOOST_TEST((expectedSorted[idx] == l->GetType()));
+            CHECK((expectedSorted[idx] == l->GetType()));
             idx++;
         }
     );
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/TensorHandleStrategyTest.cpp b/src/armnn/test/TensorHandleStrategyTest.cpp
index 47d0666..fb26880 100644
--- a/src/armnn/test/TensorHandleStrategyTest.cpp
+++ b/src/armnn/test/TensorHandleStrategyTest.cpp
@@ -2,7 +2,8 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
+#include <doctest/doctest.h>
 
 #include <armnn/LayerVisitorBase.hpp>
 
@@ -270,29 +271,29 @@
 };
 
 
-BOOST_AUTO_TEST_SUITE(TensorHandle)
-
-BOOST_AUTO_TEST_CASE(RegisterFactories)
+TEST_SUITE("TensorHandle")
+{
+TEST_CASE("RegisterFactories")
 {
     TestBackendA backendA;
     TestBackendB backendB;
 
-    BOOST_TEST(backendA.GetHandleFactoryPreferences()[0] == "TestHandleFactoryA1");
-    BOOST_TEST(backendA.GetHandleFactoryPreferences()[1] == "TestHandleFactoryA2");
-    BOOST_TEST(backendA.GetHandleFactoryPreferences()[2] == "TestHandleFactoryB1");
-    BOOST_TEST(backendA.GetHandleFactoryPreferences()[3] == "TestHandleFactoryD1");
+    CHECK(backendA.GetHandleFactoryPreferences()[0] == "TestHandleFactoryA1");
+    CHECK(backendA.GetHandleFactoryPreferences()[1] == "TestHandleFactoryA2");
+    CHECK(backendA.GetHandleFactoryPreferences()[2] == "TestHandleFactoryB1");
+    CHECK(backendA.GetHandleFactoryPreferences()[3] == "TestHandleFactoryD1");
 
     TensorHandleFactoryRegistry registry;
     backendA.RegisterTensorHandleFactories(registry);
     backendB.RegisterTensorHandleFactories(registry);
 
-    BOOST_TEST((registry.GetFactory("Non-existing Backend") == nullptr));
-    BOOST_TEST((registry.GetFactory("TestHandleFactoryA1") != nullptr));
-    BOOST_TEST((registry.GetFactory("TestHandleFactoryA2") != nullptr));
-    BOOST_TEST((registry.GetFactory("TestHandleFactoryB1") != nullptr));
+    CHECK((registry.GetFactory("Non-existing Backend") == nullptr));
+    CHECK((registry.GetFactory("TestHandleFactoryA1") != nullptr));
+    CHECK((registry.GetFactory("TestHandleFactoryA2") != nullptr));
+    CHECK((registry.GetFactory("TestHandleFactoryB1") != nullptr));
 }
 
-BOOST_AUTO_TEST_CASE(TensorHandleSelectionStrategy)
+TEST_CASE("TensorHandleSelectionStrategy")
 {
     auto backendA = std::make_unique<TestBackendA>();
     auto backendB = std::make_unique<TestBackendB>();
@@ -343,8 +344,8 @@
     std::vector<std::string> errors;
     auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
 
-    BOOST_TEST(result.m_Error == false);
-    BOOST_TEST(result.m_Warning == false);
+    CHECK(result.m_Error == false);
+    CHECK(result.m_Warning == false);
 
     OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
     OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
@@ -353,18 +354,18 @@
     OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
 
     // Check that the correct factory was selected
-    BOOST_TEST(inputLayerOut.GetTensorHandleFactoryId() == "TestHandleFactoryD1");
-    BOOST_TEST(softmaxLayer1Out.GetTensorHandleFactoryId() == "TestHandleFactoryB1");
-    BOOST_TEST(softmaxLayer2Out.GetTensorHandleFactoryId() == "TestHandleFactoryB1");
-    BOOST_TEST(softmaxLayer3Out.GetTensorHandleFactoryId() == "TestHandleFactoryC1");
-    BOOST_TEST(softmaxLayer4Out.GetTensorHandleFactoryId() == "TestHandleFactoryD1");
+    CHECK(inputLayerOut.GetTensorHandleFactoryId() == "TestHandleFactoryD1");
+    CHECK(softmaxLayer1Out.GetTensorHandleFactoryId() == "TestHandleFactoryB1");
+    CHECK(softmaxLayer2Out.GetTensorHandleFactoryId() == "TestHandleFactoryB1");
+    CHECK(softmaxLayer3Out.GetTensorHandleFactoryId() == "TestHandleFactoryC1");
+    CHECK(softmaxLayer4Out.GetTensorHandleFactoryId() == "TestHandleFactoryD1");
 
     // Check that the correct strategy was selected
-    BOOST_TEST((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::CopyToTarget));
-    BOOST_TEST((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::ExportToTarget));
-    BOOST_TEST((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+    CHECK((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+    CHECK((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+    CHECK((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::CopyToTarget));
+    CHECK((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::ExportToTarget));
+    CHECK((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
 
     graph.AddCompatibilityLayers(backends, registry);
 
@@ -377,7 +378,7 @@
             copyCount++;
         }
     });
-    BOOST_TEST(copyCount == 1);
+    CHECK(copyCount == 1);
 
     // Test for import layers
     int importCount= 0;
@@ -388,7 +389,7 @@
             importCount++;
         }
     });
-    BOOST_TEST(importCount == 1);
+    CHECK(importCount == 1);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/TensorHelpers.hpp b/src/armnn/test/TensorHelpers.hpp
index b8788e7..95cea58 100644
--- a/src/armnn/test/TensorHelpers.hpp
+++ b/src/armnn/test/TensorHelpers.hpp
@@ -12,7 +12,7 @@
 
 #include <QuantizeHelper.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <array>
 #include <cmath>
diff --git a/src/armnn/test/TensorTest.cpp b/src/armnn/test/TensorTest.cpp
index a0b68ac..fd2d784 100644
--- a/src/armnn/test/TensorTest.cpp
+++ b/src/armnn/test/TensorTest.cpp
@@ -6,40 +6,12 @@
 #include <armnn/Tensor.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
+#include <doctest/doctest.h>
 
-namespace armnn
-{
-
-// Adds unit test framework for interpreting TensorInfo type.
-std::ostream& boost_test_print_type(std::ostream& ostr, const TensorInfo& right)
-{
-    ostr << "TensorInfo[ "
-    << right.GetNumDimensions() << ","
-    << right.GetShape()[0] << ","
-    << right.GetShape()[1] << ","
-    << right.GetShape()[2] << ","
-    << right.GetShape()[3]
-    << " ]" << std::endl;
-    return ostr;
-}
-
-std::ostream& boost_test_print_type(std::ostream& ostr, const TensorShape& shape)
-{
-    ostr << "TensorShape[ "
-        << shape.GetNumDimensions() << ","
-        << shape[0] << ","
-        << shape[1] << ","
-        << shape[2] << ","
-        << shape[3]
-        << " ]" << std::endl;
-    return ostr;
-}
-
-} //namespace armnn
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Tensor)
-
+TEST_SUITE("Tensor")
+{
 struct TensorInfoFixture
 {
     TensorInfoFixture()
@@ -52,54 +24,54 @@
     TensorInfo m_TensorInfo;
 };
 
-BOOST_FIXTURE_TEST_CASE(ConstructShapeUsingListInitialization, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "ConstructShapeUsingListInitialization")
 {
     TensorShape listInitializedShape{ 6, 7, 8, 9 };
-    BOOST_TEST(listInitializedShape == m_TensorInfo.GetShape());
+    CHECK(listInitializedShape == m_TensorInfo.GetShape());
 }
 
-BOOST_FIXTURE_TEST_CASE(ConstructTensorInfo, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "ConstructTensorInfo")
 {
-    BOOST_TEST(m_TensorInfo.GetNumDimensions() == 4);
-    BOOST_TEST(m_TensorInfo.GetShape()[0] == 6); // <= Outer most
-    BOOST_TEST(m_TensorInfo.GetShape()[1] == 7);
-    BOOST_TEST(m_TensorInfo.GetShape()[2] == 8);
-    BOOST_TEST(m_TensorInfo.GetShape()[3] == 9);     // <= Inner most
+    CHECK(m_TensorInfo.GetNumDimensions() == 4);
+    CHECK(m_TensorInfo.GetShape()[0] == 6); // <= Outer most
+    CHECK(m_TensorInfo.GetShape()[1] == 7);
+    CHECK(m_TensorInfo.GetShape()[2] == 8);
+    CHECK(m_TensorInfo.GetShape()[3] == 9);     // <= Inner most
 }
 
-BOOST_FIXTURE_TEST_CASE(CopyConstructTensorInfo, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "CopyConstructTensorInfo")
 {
     TensorInfo copyConstructed(m_TensorInfo);
-    BOOST_TEST(copyConstructed.GetNumDimensions() == 4);
-    BOOST_TEST(copyConstructed.GetShape()[0] == 6);
-    BOOST_TEST(copyConstructed.GetShape()[1] == 7);
-    BOOST_TEST(copyConstructed.GetShape()[2] == 8);
-    BOOST_TEST(copyConstructed.GetShape()[3] == 9);
+    CHECK(copyConstructed.GetNumDimensions() == 4);
+    CHECK(copyConstructed.GetShape()[0] == 6);
+    CHECK(copyConstructed.GetShape()[1] == 7);
+    CHECK(copyConstructed.GetShape()[2] == 8);
+    CHECK(copyConstructed.GetShape()[3] == 9);
 }
 
-BOOST_FIXTURE_TEST_CASE(TensorInfoEquality, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "TensorInfoEquality")
 {
     TensorInfo copyConstructed(m_TensorInfo);
-    BOOST_TEST(copyConstructed == m_TensorInfo);
+    CHECK(copyConstructed == m_TensorInfo);
 }
 
-BOOST_FIXTURE_TEST_CASE(TensorInfoInequality, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "TensorInfoInequality")
 {
     TensorInfo other;
     unsigned int sizes[] = {2,3,4,5};
     other = TensorInfo(4, sizes, DataType::Float32);
 
-    BOOST_TEST(other != m_TensorInfo);
+    CHECK(other != m_TensorInfo);
 }
 
-BOOST_FIXTURE_TEST_CASE(TensorInfoAssignmentOperator, TensorInfoFixture)
+TEST_CASE_FIXTURE(TensorInfoFixture, "TensorInfoAssignmentOperator")
 {
     TensorInfo copy;
     copy = m_TensorInfo;
-    BOOST_TEST(copy == m_TensorInfo);
+    CHECK(copy == m_TensorInfo);
 }
 
-BOOST_AUTO_TEST_CASE(CopyNoQuantizationTensorInfo)
+TEST_CASE("CopyNoQuantizationTensorInfo")
 {
     TensorInfo infoA;
     infoA.SetShape({ 5, 6, 7, 8 });
@@ -112,24 +84,24 @@
     infoB.SetQuantizationOffset(5);
     infoB.SetQuantizationDim(Optional<unsigned int>(1));
 
-    BOOST_TEST((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
-    BOOST_TEST((infoA.GetDataType() == DataType::QAsymmU8));
-    BOOST_TEST(infoA.GetQuantizationScale() == 1);
-    BOOST_TEST(infoA.GetQuantizationOffset() == 0);
-    BOOST_CHECK(!infoA.GetQuantizationDim().has_value());
+    CHECK((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+    CHECK((infoA.GetDataType() == DataType::QAsymmU8));
+    CHECK(infoA.GetQuantizationScale() == 1);
+    CHECK(infoA.GetQuantizationOffset() == 0);
+    CHECK(!infoA.GetQuantizationDim().has_value());
 
-    BOOST_TEST(infoA != infoB);
+    CHECK(infoA != infoB);
     infoA = infoB;
-    BOOST_TEST(infoA == infoB);
+    CHECK(infoA == infoB);
 
-    BOOST_TEST((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
-    BOOST_TEST((infoA.GetDataType() == DataType::QAsymmU8));
-    BOOST_TEST(infoA.GetQuantizationScale() == 10.0f);
-    BOOST_TEST(infoA.GetQuantizationOffset() == 5);
-    BOOST_CHECK(infoA.GetQuantizationDim().value() == 1);
+    CHECK((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+    CHECK((infoA.GetDataType() == DataType::QAsymmU8));
+    CHECK(infoA.GetQuantizationScale() == 10.0f);
+    CHECK(infoA.GetQuantizationOffset() == 5);
+    CHECK(infoA.GetQuantizationDim().value() == 1);
 }
 
-BOOST_AUTO_TEST_CASE(CopyDifferentQuantizationTensorInfo)
+TEST_CASE("CopyDifferentQuantizationTensorInfo")
 {
     TensorInfo infoA;
     infoA.SetShape({ 5, 6, 7, 8 });
@@ -145,21 +117,21 @@
     infoB.SetQuantizationOffset(6);
     infoB.SetQuantizationDim(Optional<unsigned int>(2));
 
-    BOOST_TEST((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
-    BOOST_TEST((infoA.GetDataType() == DataType::QAsymmU8));
-    BOOST_TEST(infoA.GetQuantizationScale() == 10.0f);
-    BOOST_TEST(infoA.GetQuantizationOffset() == 5);
-    BOOST_CHECK(infoA.GetQuantizationDim().value() == 1);
+    CHECK((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+    CHECK((infoA.GetDataType() == DataType::QAsymmU8));
+    CHECK(infoA.GetQuantizationScale() == 10.0f);
+    CHECK(infoA.GetQuantizationOffset() == 5);
+    CHECK(infoA.GetQuantizationDim().value() == 1);
 
-    BOOST_TEST(infoA != infoB);
+    CHECK(infoA != infoB);
     infoA = infoB;
-    BOOST_TEST(infoA == infoB);
+    CHECK(infoA == infoB);
 
-    BOOST_TEST((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
-    BOOST_TEST((infoA.GetDataType() == DataType::QAsymmU8));
-    BOOST_TEST(infoA.GetQuantizationScale() == 11.0f);
-    BOOST_TEST(infoA.GetQuantizationOffset() == 6);
-    BOOST_CHECK(infoA.GetQuantizationDim().value() == 2);
+    CHECK((infoA.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+    CHECK((infoA.GetDataType() == DataType::QAsymmU8));
+    CHECK(infoA.GetQuantizationScale() == 11.0f);
+    CHECK(infoA.GetQuantizationOffset() == 6);
+    CHECK(infoA.GetQuantizationDim().value() == 2);
 }
 
 void CheckTensor(const ConstTensor& t)
@@ -167,7 +139,7 @@
     t.GetInfo();
 }
 
-BOOST_AUTO_TEST_CASE(TensorVsConstTensor)
+TEST_CASE("TensorVsConstTensor")
 {
     int mutableDatum = 2;
     const int immutableDatum = 3;
@@ -185,68 +157,68 @@
     CheckTensor(ct);
 }
 
-BOOST_AUTO_TEST_CASE(ModifyTensorInfo)
+TEST_CASE("ModifyTensorInfo")
 {
     TensorInfo info;
     info.SetShape({ 5, 6, 7, 8 });
-    BOOST_TEST((info.GetShape() == TensorShape({ 5, 6, 7, 8 })));
+    CHECK((info.GetShape() == TensorShape({ 5, 6, 7, 8 })));
     info.SetDataType(DataType::QAsymmU8);
-    BOOST_TEST((info.GetDataType() == DataType::QAsymmU8));
+    CHECK((info.GetDataType() == DataType::QAsymmU8));
     info.SetQuantizationScale(10.0f);
-    BOOST_TEST(info.GetQuantizationScale() == 10.0f);
+    CHECK(info.GetQuantizationScale() == 10.0f);
     info.SetQuantizationOffset(5);
-    BOOST_TEST(info.GetQuantizationOffset() == 5);
+    CHECK(info.GetQuantizationOffset() == 5);
 }
 
-BOOST_AUTO_TEST_CASE(TensorShapeOperatorBrackets)
+TEST_CASE("TensorShapeOperatorBrackets")
 {
     const TensorShape constShape({0,1,2,3});
     TensorShape shape({0,1,2,3});
 
     // Checks version of operator[] which returns an unsigned int.
-    BOOST_TEST(shape[2] == 2);
+    CHECK(shape[2] == 2);
     shape[2] = 20;
-    BOOST_TEST(shape[2] == 20);
+    CHECK(shape[2] == 20);
 
     // Checks the version of operator[] which returns a reference.
-    BOOST_TEST(constShape[2] == 2);
+    CHECK(constShape[2] == 2);
 }
 
-BOOST_AUTO_TEST_CASE(TensorInfoPerAxisQuantization)
+TEST_CASE("TensorInfoPerAxisQuantization")
 {
     // Old constructor
     TensorInfo tensorInfo0({ 1, 1 }, DataType::Float32, 2.0f, 1);
-    BOOST_CHECK(!tensorInfo0.HasMultipleQuantizationScales());
-    BOOST_CHECK(tensorInfo0.GetQuantizationScale() == 2.0f);
-    BOOST_CHECK(tensorInfo0.GetQuantizationOffset() == 1);
-    BOOST_CHECK(tensorInfo0.GetQuantizationScales()[0] == 2.0f);
-    BOOST_CHECK(!tensorInfo0.GetQuantizationDim().has_value());
+    CHECK(!tensorInfo0.HasMultipleQuantizationScales());
+    CHECK(tensorInfo0.GetQuantizationScale() == 2.0f);
+    CHECK(tensorInfo0.GetQuantizationOffset() == 1);
+    CHECK(tensorInfo0.GetQuantizationScales()[0] == 2.0f);
+    CHECK(!tensorInfo0.GetQuantizationDim().has_value());
 
     // Set per-axis quantization scales
     std::vector<float> perAxisScales{ 3.0f, 4.0f };
     tensorInfo0.SetQuantizationScales(perAxisScales);
-    BOOST_CHECK(tensorInfo0.HasMultipleQuantizationScales());
-    BOOST_CHECK(tensorInfo0.GetQuantizationScales() == perAxisScales);
+    CHECK(tensorInfo0.HasMultipleQuantizationScales());
+    CHECK(tensorInfo0.GetQuantizationScales() == perAxisScales);
 
     // Set per-tensor quantization scale
     tensorInfo0.SetQuantizationScale(5.0f);
-    BOOST_CHECK(!tensorInfo0.HasMultipleQuantizationScales());
-    BOOST_CHECK(tensorInfo0.GetQuantizationScales()[0] == 5.0f);
+    CHECK(!tensorInfo0.HasMultipleQuantizationScales());
+    CHECK(tensorInfo0.GetQuantizationScales()[0] == 5.0f);
 
     // Set quantization offset
     tensorInfo0.SetQuantizationDim(Optional<unsigned int>(1));
-    BOOST_CHECK(tensorInfo0.GetQuantizationDim().value() == 1);
+    CHECK(tensorInfo0.GetQuantizationDim().value() == 1);
 
     // New constructor
     perAxisScales = { 6.0f, 7.0f };
     TensorInfo tensorInfo1({ 1, 1 }, DataType::Float32, perAxisScales, 1);
-    BOOST_CHECK(tensorInfo1.HasMultipleQuantizationScales());
-    BOOST_CHECK(tensorInfo1.GetQuantizationOffset() == 0);
-    BOOST_CHECK(tensorInfo1.GetQuantizationScales() == perAxisScales);
-    BOOST_CHECK(tensorInfo1.GetQuantizationDim().value() == 1);
+    CHECK(tensorInfo1.HasMultipleQuantizationScales());
+    CHECK(tensorInfo1.GetQuantizationOffset() == 0);
+    CHECK(tensorInfo1.GetQuantizationScales() == perAxisScales);
+    CHECK(tensorInfo1.GetQuantizationDim().value() == 1);
 }
 
-BOOST_AUTO_TEST_CASE(TensorShape_scalar)
+TEST_CASE("TensorShape_scalar")
 {
     float mutableDatum = 3.1416f;
 
@@ -254,33 +226,33 @@
     armnn::TensorInfo        info   ( shape, DataType::Float32 );
     const armnn::Tensor      tensor ( info, &mutableDatum );
 
-    BOOST_CHECK(armnn::Dimensionality::Scalar == shape.GetDimensionality());
+    CHECK(armnn::Dimensionality::Scalar == shape.GetDimensionality());
     float scalarValue = *reinterpret_cast<float*>(tensor.GetMemoryArea());
-    BOOST_CHECK_MESSAGE(mutableDatum == scalarValue, "Scalar value is " << scalarValue);
+    CHECK_MESSAGE(mutableDatum == scalarValue, "Scalar value is " << scalarValue);
 
     armnn::TensorShape shape_equal;
     armnn::TensorShape shape_different;
     shape_equal = shape;
-    BOOST_TEST(shape_equal == shape);
-    BOOST_TEST(shape_different != shape);
-    BOOST_CHECK_MESSAGE(1 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
-    BOOST_CHECK_MESSAGE(1 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
-    BOOST_CHECK(true == shape.GetDimensionSpecificity(0));
-    BOOST_CHECK(shape.AreAllDimensionsSpecified());
-    BOOST_CHECK(shape.IsAtLeastOneDimensionSpecified());
+    CHECK(shape_equal == shape);
+    CHECK(shape_different != shape);
+    CHECK_MESSAGE(1 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+    CHECK_MESSAGE(1 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+    CHECK(true == shape.GetDimensionSpecificity(0));
+    CHECK(shape.AreAllDimensionsSpecified());
+    CHECK(shape.IsAtLeastOneDimensionSpecified());
 
-    BOOST_TEST(1 == shape[0]);
-    BOOST_TEST(1 == tensor.GetShape()[0]);
-    BOOST_TEST(1 == tensor.GetInfo().GetShape()[0]);
-    BOOST_CHECK_THROW( shape[1], InvalidArgumentException );
+    CHECK(1 == shape[0]);
+    CHECK(1 == tensor.GetShape()[0]);
+    CHECK(1 == tensor.GetInfo().GetShape()[0]);
+    CHECK_THROWS_AS( shape[1], InvalidArgumentException );
 
     float newMutableDatum  = 42.f;
     std::memcpy(tensor.GetMemoryArea(), &newMutableDatum, sizeof(float));
     scalarValue = *reinterpret_cast<float*>(tensor.GetMemoryArea());
-    BOOST_CHECK_MESSAGE(newMutableDatum == scalarValue, "Scalar value is " << scalarValue);
+    CHECK_MESSAGE(newMutableDatum == scalarValue, "Scalar value is " << scalarValue);
 }
 
-BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_unknownNumberDimensions)
+TEST_CASE("TensorShape_DynamicTensorType1_unknownNumberDimensions")
 {
     float       mutableDatum  = 3.1416f;
 
@@ -288,19 +260,19 @@
     armnn::TensorInfo  info   ( shape, DataType::Float32 );
     armnn::Tensor      tensor ( info, &mutableDatum );
 
-    BOOST_CHECK(armnn::Dimensionality::NotSpecified == shape.GetDimensionality());
-    BOOST_CHECK_THROW( shape[0], InvalidArgumentException );
-    BOOST_CHECK_THROW( shape.GetNumElements(), InvalidArgumentException );
-    BOOST_CHECK_THROW( shape.GetNumDimensions(), InvalidArgumentException );
+    CHECK(armnn::Dimensionality::NotSpecified == shape.GetDimensionality());
+    CHECK_THROWS_AS( shape[0], InvalidArgumentException );
+    CHECK_THROWS_AS( shape.GetNumElements(), InvalidArgumentException );
+    CHECK_THROWS_AS( shape.GetNumDimensions(), InvalidArgumentException );
 
     armnn::TensorShape shape_equal;
     armnn::TensorShape shape_different;
     shape_equal = shape;
-    BOOST_TEST(shape_equal == shape);
-    BOOST_TEST(shape_different != shape);
+    CHECK(shape_equal == shape);
+    CHECK(shape_different != shape);
 }
 
-BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_unknownAllDimensionsSizes)
+TEST_CASE("TensorShape_DynamicTensorType1_unknownAllDimensionsSizes")
 {
     float       mutableDatum  = 3.1416f;
 
@@ -308,23 +280,23 @@
     armnn::TensorInfo  info   ( shape, DataType::Float32 );
     armnn::Tensor      tensor ( info, &mutableDatum );
 
-    BOOST_CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
-    BOOST_CHECK_MESSAGE(0 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
-    BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
-    BOOST_CHECK(false == shape.GetDimensionSpecificity(0));
-    BOOST_CHECK(false == shape.GetDimensionSpecificity(1));
-    BOOST_CHECK(false == shape.GetDimensionSpecificity(2));
-    BOOST_CHECK(!shape.AreAllDimensionsSpecified());
-    BOOST_CHECK(!shape.IsAtLeastOneDimensionSpecified());
+    CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
+    CHECK_MESSAGE(0 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+    CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+    CHECK(false == shape.GetDimensionSpecificity(0));
+    CHECK(false == shape.GetDimensionSpecificity(1));
+    CHECK(false == shape.GetDimensionSpecificity(2));
+    CHECK(!shape.AreAllDimensionsSpecified());
+    CHECK(!shape.IsAtLeastOneDimensionSpecified());
 
     armnn::TensorShape shape_equal;
     armnn::TensorShape shape_different;
     shape_equal = shape;
-    BOOST_TEST(shape_equal == shape);
-    BOOST_TEST(shape_different != shape);
+    CHECK(shape_equal == shape);
+    CHECK(shape_different != shape);
 }
 
-BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_unknownSomeDimensionsSizes)
+TEST_CASE("TensorShape_DynamicTensorType1_unknownSomeDimensionsSizes")
 {
     std::vector<float> mutableDatum  { 42.f, 42.f, 42.f,
                                        0.0f, 0.1f, 0.2f };
@@ -333,36 +305,36 @@
     armnn::TensorInfo  info          ( shape, DataType::Float32 );
     armnn::Tensor      tensor        ( info, &mutableDatum );
 
-    BOOST_CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
-    BOOST_CHECK_MESSAGE(6 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
-    BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
-    BOOST_CHECK(true  == shape.GetDimensionSpecificity(0));
-    BOOST_CHECK(false == shape.GetDimensionSpecificity(1));
-    BOOST_CHECK(true  == shape.GetDimensionSpecificity(2));
-    BOOST_CHECK(!shape.AreAllDimensionsSpecified());
-    BOOST_CHECK(shape.IsAtLeastOneDimensionSpecified());
+    CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
+    CHECK_MESSAGE(6 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+    CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+    CHECK(true  == shape.GetDimensionSpecificity(0));
+    CHECK(false == shape.GetDimensionSpecificity(1));
+    CHECK(true  == shape.GetDimensionSpecificity(2));
+    CHECK(!shape.AreAllDimensionsSpecified());
+    CHECK(shape.IsAtLeastOneDimensionSpecified());
 
-    BOOST_CHECK_THROW(shape[1], InvalidArgumentException);
-    BOOST_CHECK_THROW(tensor.GetShape()[1], InvalidArgumentException);
-    BOOST_CHECK_THROW(tensor.GetInfo().GetShape()[1], InvalidArgumentException);
+    CHECK_THROWS_AS(shape[1], InvalidArgumentException);
+    CHECK_THROWS_AS(tensor.GetShape()[1], InvalidArgumentException);
+    CHECK_THROWS_AS(tensor.GetInfo().GetShape()[1], InvalidArgumentException);
 
-    BOOST_TEST(2 == shape[0]);
-    BOOST_TEST(2 == tensor.GetShape()[0]);
-    BOOST_TEST(2 == tensor.GetInfo().GetShape()[0]);
-    BOOST_CHECK_THROW( shape[1], InvalidArgumentException );
+    CHECK(2 == shape[0]);
+    CHECK(2 == tensor.GetShape()[0]);
+    CHECK(2 == tensor.GetInfo().GetShape()[0]);
+    CHECK_THROWS_AS( shape[1], InvalidArgumentException );
 
-    BOOST_TEST(3 == shape[2]);
-    BOOST_TEST(3 == tensor.GetShape()[2]);
-    BOOST_TEST(3 == tensor.GetInfo().GetShape()[2]);
+    CHECK(3 == shape[2]);
+    CHECK(3 == tensor.GetShape()[2]);
+    CHECK(3 == tensor.GetInfo().GetShape()[2]);
 
     armnn::TensorShape shape_equal;
     armnn::TensorShape shape_different;
     shape_equal = shape;
-    BOOST_TEST(shape_equal == shape);
-    BOOST_TEST(shape_different != shape);
+    CHECK(shape_equal == shape);
+    CHECK(shape_different != shape);
 }
 
-BOOST_AUTO_TEST_CASE(TensorShape_DynamicTensorType1_transitionFromUnknownToKnownDimensionsSizes)
+TEST_CASE("TensorShape_DynamicTensorType1_transitionFromUnknownToKnownDimensionsSizes")
 {
     std::vector<float> mutableDatum  { 42.f, 42.f, 42.f,
                                        0.0f, 0.1f, 0.2f };
@@ -373,74 +345,74 @@
 
     // Specify the number of dimensions
     shape.SetNumDimensions(3);
-    BOOST_CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
-    BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
-    BOOST_CHECK(false == shape.GetDimensionSpecificity(0));
-    BOOST_CHECK(false == shape.GetDimensionSpecificity(1));
-    BOOST_CHECK(false == shape.GetDimensionSpecificity(2));
-    BOOST_CHECK(!shape.AreAllDimensionsSpecified());
-    BOOST_CHECK(!shape.IsAtLeastOneDimensionSpecified());
+    CHECK(armnn::Dimensionality::Specified == shape.GetDimensionality());
+    CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+    CHECK(false == shape.GetDimensionSpecificity(0));
+    CHECK(false == shape.GetDimensionSpecificity(1));
+    CHECK(false == shape.GetDimensionSpecificity(2));
+    CHECK(!shape.AreAllDimensionsSpecified());
+    CHECK(!shape.IsAtLeastOneDimensionSpecified());
 
     // Specify dimension 0 and 2.
     shape.SetDimensionSize(0, 2);
     shape.SetDimensionSize(2, 3);
-    BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
-    BOOST_CHECK_MESSAGE(6 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
-    BOOST_CHECK(true  == shape.GetDimensionSpecificity(0));
-    BOOST_CHECK(false == shape.GetDimensionSpecificity(1));
-    BOOST_CHECK(true  == shape.GetDimensionSpecificity(2));
-    BOOST_CHECK(!shape.AreAllDimensionsSpecified());
-    BOOST_CHECK(shape.IsAtLeastOneDimensionSpecified());
+    CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+    CHECK_MESSAGE(6 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+    CHECK(true  == shape.GetDimensionSpecificity(0));
+    CHECK(false == shape.GetDimensionSpecificity(1));
+    CHECK(true  == shape.GetDimensionSpecificity(2));
+    CHECK(!shape.AreAllDimensionsSpecified());
+    CHECK(shape.IsAtLeastOneDimensionSpecified());
 
     info.SetShape(shape);
     armnn::Tensor tensor2( info, &mutableDatum );
-    BOOST_TEST(2 == shape[0]);
-    BOOST_TEST(2 == tensor2.GetShape()[0]);
-    BOOST_TEST(2 == tensor2.GetInfo().GetShape()[0]);
+    CHECK(2 == shape[0]);
+    CHECK(2 == tensor2.GetShape()[0]);
+    CHECK(2 == tensor2.GetInfo().GetShape()[0]);
 
-    BOOST_CHECK_THROW(shape[1], InvalidArgumentException);
-    BOOST_CHECK_THROW(tensor.GetShape()[1], InvalidArgumentException);
-    BOOST_CHECK_THROW(tensor.GetInfo().GetShape()[1], InvalidArgumentException);
+    CHECK_THROWS_AS(shape[1], InvalidArgumentException);
+    CHECK_THROWS_AS(tensor.GetShape()[1], InvalidArgumentException);
+    CHECK_THROWS_AS(tensor.GetInfo().GetShape()[1], InvalidArgumentException);
 
-    BOOST_TEST(3 == shape[2]);
-    BOOST_TEST(3 == tensor2.GetShape()[2]);
-    BOOST_TEST(3 == tensor2.GetInfo().GetShape()[2]);
+    CHECK(3 == shape[2]);
+    CHECK(3 == tensor2.GetShape()[2]);
+    CHECK(3 == tensor2.GetInfo().GetShape()[2]);
 
     armnn::TensorShape shape_equal;
     armnn::TensorShape shape_different;
     shape_equal = shape;
-    BOOST_TEST(shape_equal == shape);
-    BOOST_TEST(shape_different != shape);
+    CHECK(shape_equal == shape);
+    CHECK(shape_different != shape);
 
     // Specify dimension 1.
     shape.SetDimensionSize(1, 5);
-    BOOST_CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
-    BOOST_CHECK_MESSAGE(30 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
-    BOOST_CHECK(true  == shape.GetDimensionSpecificity(0));
-    BOOST_CHECK(true  == shape.GetDimensionSpecificity(1));
-    BOOST_CHECK(true  == shape.GetDimensionSpecificity(2));
-    BOOST_CHECK(shape.AreAllDimensionsSpecified());
-    BOOST_CHECK(shape.IsAtLeastOneDimensionSpecified());
+    CHECK_MESSAGE(3 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+    CHECK_MESSAGE(30 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+    CHECK(true  == shape.GetDimensionSpecificity(0));
+    CHECK(true  == shape.GetDimensionSpecificity(1));
+    CHECK(true  == shape.GetDimensionSpecificity(2));
+    CHECK(shape.AreAllDimensionsSpecified());
+    CHECK(shape.IsAtLeastOneDimensionSpecified());
 }
 
-BOOST_AUTO_TEST_CASE(Tensor_emptyConstructors)
+TEST_CASE("Tensor_emptyConstructors")
 {
     auto shape = armnn::TensorShape();
-    BOOST_CHECK_MESSAGE( 0 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
-    BOOST_CHECK_MESSAGE( 0 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
-    BOOST_CHECK( armnn::Dimensionality::Specified == shape.GetDimensionality());
-    BOOST_CHECK( shape.AreAllDimensionsSpecified());
-    BOOST_CHECK_THROW( shape[0], InvalidArgumentException );
+    CHECK_MESSAGE( 0 == shape.GetNumDimensions(), "Number of dimensions is " << shape.GetNumDimensions());
+    CHECK_MESSAGE( 0 == shape.GetNumElements(), "Number of elements is " << shape.GetNumElements());
+    CHECK( armnn::Dimensionality::Specified == shape.GetDimensionality());
+    CHECK( shape.AreAllDimensionsSpecified());
+    CHECK_THROWS_AS( shape[0], InvalidArgumentException );
 
     auto tensor = armnn::Tensor();
-    BOOST_CHECK_MESSAGE( 0 == tensor.GetNumDimensions(), "Number of dimensions is " << tensor.GetNumDimensions());
-    BOOST_CHECK_MESSAGE( 0 == tensor.GetNumElements(), "Number of elements is " << tensor.GetNumElements());
-    BOOST_CHECK_MESSAGE( 0 == tensor.GetShape().GetNumDimensions(), "Number of dimensions is " <<
+    CHECK_MESSAGE( 0 == tensor.GetNumDimensions(), "Number of dimensions is " << tensor.GetNumDimensions());
+    CHECK_MESSAGE( 0 == tensor.GetNumElements(), "Number of elements is " << tensor.GetNumElements());
+    CHECK_MESSAGE( 0 == tensor.GetShape().GetNumDimensions(), "Number of dimensions is " <<
                         tensor.GetShape().GetNumDimensions());
-    BOOST_CHECK_MESSAGE( 0 == tensor.GetShape().GetNumElements(), "Number of dimensions is " <<
+    CHECK_MESSAGE( 0 == tensor.GetShape().GetNumElements(), "Number of dimensions is " <<
                         tensor.GetShape().GetNumElements());
-    BOOST_CHECK( armnn::Dimensionality::Specified == tensor.GetShape().GetDimensionality());
-    BOOST_CHECK( tensor.GetShape().AreAllDimensionsSpecified());
-    BOOST_CHECK_THROW( tensor.GetShape()[0], InvalidArgumentException );
+    CHECK( armnn::Dimensionality::Specified == tensor.GetShape().GetDimensionality());
+    CHECK( tensor.GetShape().AreAllDimensionsSpecified());
+    CHECK_THROWS_AS( tensor.GetShape()[0], InvalidArgumentException );
 }
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.cpp b/src/armnn/test/TestInputOutputLayerVisitor.cpp
index 6563517..8462290 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.cpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.cpp
@@ -5,12 +5,14 @@
 #include "TestInputOutputLayerVisitor.hpp"
 #include "Network.hpp"
 
+#include <doctest/doctest.h>
+
 namespace armnn
 {
 
-BOOST_AUTO_TEST_SUITE(TestInputOutputLayerVisitor)
-
-BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndName)
+TEST_SUITE("TestInputOutputLayerVisitor")
+{
+TEST_CASE("CheckInputLayerVisitorBindingIdAndName")
 {
     const char* layerName = "InputLayer";
     TestInputLayerVisitor visitor(1, layerName);
@@ -20,7 +22,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckInputLayerVisitorBindingIdAndNameNull)
+TEST_CASE("CheckInputLayerVisitorBindingIdAndNameNull")
 {
     TestInputLayerVisitor visitor(1);
     NetworkImpl net;
@@ -29,7 +31,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndName)
+TEST_CASE("CheckOutputLayerVisitorBindingIdAndName")
 {
     const char* layerName = "OutputLayer";
     TestOutputLayerVisitor visitor(1, layerName);
@@ -39,7 +41,7 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_CASE(CheckOutputLayerVisitorBindingIdAndNameNull)
+TEST_CASE("CheckOutputLayerVisitorBindingIdAndNameNull")
 {
     TestOutputLayerVisitor visitor(1);
     NetworkImpl net;
@@ -48,6 +50,6 @@
     layer->Accept(visitor);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
 } //namespace armnn
\ No newline at end of file
diff --git a/src/armnn/test/TestInputOutputLayerVisitor.hpp b/src/armnn/test/TestInputOutputLayerVisitor.hpp
index f67a65c..b890895 100644
--- a/src/armnn/test/TestInputOutputLayerVisitor.hpp
+++ b/src/armnn/test/TestInputOutputLayerVisitor.hpp
@@ -5,14 +5,14 @@
 #pragma once
 
 #include "TestLayerVisitor.hpp"
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace armnn
 {
 
 void CheckLayerBindingId(LayerBindingId visitorId, LayerBindingId id)
 {
-    BOOST_CHECK_EQUAL(visitorId, id);
+    CHECK_EQ(visitorId, id);
 }
 
 // Concrete TestLayerVisitor subclasses for layers taking LayerBindingId argument with overridden VisitLayer methods
diff --git a/src/armnn/test/TestLayerVisitor.cpp b/src/armnn/test/TestLayerVisitor.cpp
index ba30dbc..ec40511 100644
--- a/src/armnn/test/TestLayerVisitor.cpp
+++ b/src/armnn/test/TestLayerVisitor.cpp
@@ -3,9 +3,10 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "TestLayerVisitor.hpp"
 
+#include <doctest/doctest.h>
+
 namespace armnn
 {
 
@@ -13,29 +14,29 @@
 {
     if (name == nullptr)
     {
-        BOOST_CHECK(m_LayerName == nullptr);
+        CHECK(m_LayerName == nullptr);
     }
     else if (m_LayerName == nullptr)
     {
-        BOOST_CHECK(name == nullptr);
+        CHECK(name == nullptr);
     }
     else
     {
-        BOOST_CHECK_EQUAL(m_LayerName, name);
+        CHECK_EQ(std::string(m_LayerName), std::string(name));
     }
 }
 
 void TestLayerVisitor::CheckLayerPointer(const IConnectableLayer* layer)
 {
-    BOOST_CHECK(layer != nullptr);
+    CHECK(layer != nullptr);
 }
 
 void TestLayerVisitor::CheckConstTensors(const ConstTensor& expected, const ConstTensor& actual)
 {
-    BOOST_CHECK(expected.GetInfo() == actual.GetInfo());
-    BOOST_CHECK(expected.GetNumDimensions() == actual.GetNumDimensions());
-    BOOST_CHECK(expected.GetNumElements() == actual.GetNumElements());
-    BOOST_CHECK(expected.GetNumBytes() == actual.GetNumBytes());
+    CHECK(expected.GetInfo() == actual.GetInfo());
+    CHECK(expected.GetNumDimensions() == actual.GetNumDimensions());
+    CHECK(expected.GetNumElements() == actual.GetNumElements());
+    CHECK(expected.GetNumBytes() == actual.GetNumBytes());
     if (expected.GetNumBytes() == actual.GetNumBytes())
     {
         //check data is the same byte by byte
@@ -43,7 +44,7 @@
         const unsigned char* actualPtr = static_cast<const unsigned char*>(actual.GetMemoryArea());
         for (unsigned int i = 0; i < expected.GetNumBytes(); i++)
         {
-            BOOST_CHECK(*(expectedPtr + i) == *(actualPtr + i));
+            CHECK(*(expectedPtr + i) == *(actualPtr + i));
         }
     }
 }
@@ -51,7 +52,7 @@
 void TestLayerVisitor::CheckOptionalConstTensors(const Optional<ConstTensor>& expected,
                                                  const Optional<ConstTensor>& actual)
 {
-    BOOST_CHECK(expected.has_value() == actual.has_value());
+    CHECK(expected.has_value() == actual.has_value());
     if (expected.has_value() && actual.has_value())
     {
         CheckConstTensors(expected.value(), actual.value());
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
index 39e2543..39c00f4 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.cpp
@@ -7,11 +7,13 @@
 
 #include <armnn/Exceptions.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(name) \
-BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameAndDescriptor) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(name, testName) \
+TEST_CASE(#testName) \
 { \
     const char* layerName = "name##Layer"; \
     armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
@@ -21,8 +23,8 @@
     layer->Accept(visitor); \
 }
 
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name) \
-BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptrAndDescriptor) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name, testName) \
+TEST_CASE(#testName) \
 { \
     armnn::name##Descriptor descriptor = GetDescriptor<armnn::name##Descriptor>(); \
     Test##name##LayerVisitor visitor(descriptor); \
@@ -31,10 +33,6 @@
     layer->Accept(visitor); \
 }
 
-#define TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(name)
-
 template<typename Descriptor> Descriptor GetDescriptor();
 
 template<>
@@ -273,35 +271,93 @@
 
 } // anonymous namespace
 
-BOOST_AUTO_TEST_SUITE(TestNameAndDescriptorLayerVisitor)
+TEST_SUITE("TestNameAndDescriptorLayerVisitor")
+{
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Activation, CheckAdditionLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ArgMinMax, CheckArgMinMaxLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(DepthToSpace, CheckDepthToSpaceLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(BatchToSpaceNd, CheckBatchToSpaceNdLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Comparison, CheckComparisonLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Concat, CheckConcatLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(ElementwiseUnary, CheckElementwiseUnaryLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Fill, CheckFillLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Gather, CheckGatherLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(InstanceNormalization,
+                                                  CheckInstanceNormalizationLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(L2Normalization, CheckL2NormalizationLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(LogicalBinary, CheckLogicalBinaruLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(LogSoftmax, CheckLogSoftmaxLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Mean, CheckMeanLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Normalization, CheckNormalizationLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Pad, CheckPadLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Permute, CheckPermuteLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Pooling2d, CheckPooling2dLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Reshape, CheckReshapeLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Resize, CheckResizeLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Slice, CheckSliceLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Softmax, CheckSoftmaxLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(SpaceToBatchNd, CheckSpaceToBatchNdLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(SpaceToDepth, CheckSpaceToDepthLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Splitter, CheckSplitterLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Stack, CheckStackLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(StridedSlice, CheckStridedSliceLayerVisitorNameAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_AND_DESCRIPTOR(Transpose, CheckTransposeLayerVisitorNameAndDescriptor)
 
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Activation)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(ArgMinMax)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(DepthToSpace)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(BatchToSpaceNd)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Comparison)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Concat)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(ElementwiseUnary)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Fill)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Gather)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(InstanceNormalization)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(L2Normalization)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogicalBinary)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(LogSoftmax)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Mean)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Normalization)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Pad)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Permute)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Pooling2d)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Reshape)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Resize)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Slice)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Softmax)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(SpaceToBatchNd)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(SpaceToDepth)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Splitter)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Stack)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(StridedSlice)
-TEST_SUITE_NAME_AND_DESCRIPTOR_LAYER_VISITOR(Transpose)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Activation,
+    CheckAdditionLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ArgMinMax,
+    CheckArgMinMaxLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(DepthToSpace,
+    CheckDepthToSpaceLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(BatchToSpaceNd,
+    CheckBatchToSpaceNdLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Comparison,
+    CheckComparisonLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Concat,
+    CheckConcatLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(ElementwiseUnary,
+    CheckElementwiseUnaryLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Fill,
+    CheckFillLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Gather,
+    CheckGatherLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(InstanceNormalization,
+    CheckInstanceNormalizationLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(L2Normalization,
+    CheckL2NormalizationLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(LogicalBinary,
+    CheckLogicalBinaruLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(LogSoftmax,
+    CheckLogSoftmaxLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Mean,
+    CheckMeanLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Normalization,
+    CheckNormalizationLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Pad,
+    CheckPadLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Permute,
+    CheckPermuteLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Pooling2d,
+    CheckPooling2dLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Reshape,
+    CheckReshapeLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Resize,
+    CheckResizeLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Slice,
+    CheckSliceLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Softmax,
+    CheckSoftmaxLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(SpaceToBatchNd,
+    CheckSpaceToBatchNdLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(SpaceToDepth,
+    CheckSpaceToDepthLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Splitter,
+    CheckSplitterLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Stack,
+    CheckStackLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(StridedSlice,
+    CheckStridedSliceLayerVisitorNameNullptrAndDescriptor)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR_AND_DESCRIPTOR(Transpose,
+    CheckTransposeLayerVisitorNameNullptrAndDescriptor)
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
index c911caa..a3c1420 100644
--- a/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
+++ b/src/armnn/test/TestNameAndDescriptorLayerVisitor.hpp
@@ -6,8 +6,7 @@
 
 #include "TestLayerVisitor.hpp"
 
-
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
diff --git a/src/armnn/test/TestNameOnlyLayerVisitor.cpp b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
index 971d7ee..00d65f8 100644
--- a/src/armnn/test/TestNameOnlyLayerVisitor.cpp
+++ b/src/armnn/test/TestNameOnlyLayerVisitor.cpp
@@ -7,13 +7,13 @@
 
 #include <Network.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
 
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME(name) \
-BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorName) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME(name, testName) \
+TEST_CASE(#testName) \
 { \
     Test##name##LayerVisitor visitor("name##Layer"); \
     armnn::NetworkImpl net; \
@@ -21,8 +21,8 @@
     layer->Accept(visitor); \
 }
 
-#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name) \
-BOOST_AUTO_TEST_CASE(Check##name##LayerVisitorNameNullptr) \
+#define TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name, testName) \
+TEST_CASE(#testName) \
 { \
     Test##name##LayerVisitor visitor; \
     armnn::NetworkImpl net; \
@@ -30,26 +30,35 @@
     layer->Accept(visitor); \
 }
 
-#define TEST_SUITE_NAME_ONLY_LAYER_VISITOR(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME(name) \
-TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(name)
-
 } // anonymous namespace
 
-BOOST_AUTO_TEST_SUITE(TestNameOnlyLayerVisitor)
+TEST_SUITE("TestNameOnlyLayerVisitor")
+{
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Addition, CheckAdditionLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Addition, CheckAdditionLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Dequantize, CheckDequantizeLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Dequantize, CheckDequantizeLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Division, CheckDivisionLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Division, CheckDivisionLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Floor, CheckFloorLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Floor, CheckFloorLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Maximum, CheckMaximumLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Maximum, CheckMaximumLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Merge, CheckMergeLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Merge, CheckMergeLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Minimum, CheckMinimumLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Minimum, CheckMinimumLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Multiplication, CheckMultiplicationLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Multiplication, CheckMultiplicationLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Prelu, CheckPreluLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Prelu, CheckPreluLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Quantize, CheckQuantizeLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Quantize, CheckQuantizeLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Rank, CheckRankLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Rank, CheckRankLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Subtraction, CheckSubtractionLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Subtraction, CheckSubtractionLayerVisitorNameNullptr)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME(Switch, CheckSwitchLayerVisitorName)
+TEST_CASE_CHECK_LAYER_VISITOR_NAME_NULLPTR(Switch, CheckSwitchLayerVisitorNameNullptr)
 
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Addition)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Dequantize)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Division)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Floor)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Maximum)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Merge)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Minimum)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Multiplication)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Prelu)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Quantize)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Rank)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Subtraction)
-TEST_SUITE_NAME_ONLY_LAYER_VISITOR(Switch)
-
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/UnitTests.cpp b/src/armnn/test/UnitTests.cpp
index a587e9b..cf532a7 100644
--- a/src/armnn/test/UnitTests.cpp
+++ b/src/armnn/test/UnitTests.cpp
@@ -2,12 +2,13 @@
 // Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#define BOOST_TEST_MODULE UnitTests
-#include <boost/test/unit_test.hpp>
+
+#ifndef DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#define DOCTEST_CONFIG_IMPLEMENT_WITH_MAIN
+#endif
+#include <doctest/doctest.h>
 
 #include "UnitTests.hpp"
-#include <armnn/Logging.hpp>
-#include <armnn/utility/NumericCast.hpp>
 
 struct ConfigureLoggingFixture
 {
@@ -17,21 +18,20 @@
     }
 };
 
-BOOST_GLOBAL_FIXTURE(ConfigureLoggingFixture);
 
-BOOST_AUTO_TEST_SUITE(LoggerSuite)
 
-BOOST_AUTO_TEST_CASE(LoggerTest)
+TEST_SUITE("LoggerSuite")
+{
+TEST_CASE_FIXTURE(ConfigureLoggingFixture, "LoggerTest")
 {
     std::stringstream ss;
-
     {
         struct StreamRedirector
         {
         public:
             StreamRedirector(std::ostream& stream, std::streambuf* newStreamBuffer)
-                : m_Stream(stream)
-                , m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer))
+            : m_Stream(stream)
+            , m_BackupBuffer(m_Stream.rdbuf(newStreamBuffer))
             {}
             ~StreamRedirector() { m_Stream.rdbuf(m_BackupBuffer); }
 
@@ -40,14 +40,12 @@
             std::streambuf* m_BackupBuffer;
         };
 
-
         StreamRedirector redirect(std::cout, ss.rdbuf());
 
         using namespace armnn;
         SetLogFilter(LogSeverity::Trace);
         SetAllLoggingSinks(true, false, false);
 
-
         ARMNN_LOG(trace) << "My trace message; " << -2;
         ARMNN_LOG(debug) << "My debug message; " << -1;
         ARMNN_LOG(info) << "My info message; " << 0;
@@ -56,15 +54,14 @@
         ARMNN_LOG(fatal) << "My fatal message; "  << 3;
 
         SetLogFilter(LogSeverity::Fatal);
-
     }
 
-    BOOST_CHECK(ss.str().find("Trace: My trace message; -2") != std::string::npos);
-    BOOST_CHECK(ss.str().find("Debug: My debug message; -1") != std::string::npos);
-    BOOST_CHECK(ss.str().find("Info: My info message; 0") != std::string::npos);
-    BOOST_CHECK(ss.str().find("Warning: My warning message; 1") != std::string::npos);
-    BOOST_CHECK(ss.str().find("Error: My error message; 2") != std::string::npos);
-    BOOST_CHECK(ss.str().find("Fatal: My fatal message; 3") != std::string::npos);
+    CHECK(ss.str().find("Trace: My trace message; -2") != std::string::npos);
+    CHECK(ss.str().find("Debug: My debug message; -1") != std::string::npos);
+    CHECK(ss.str().find("Info: My info message; 0") != std::string::npos);
+    CHECK(ss.str().find("Warning: My warning message; 1") != std::string::npos);
+    CHECK(ss.str().find("Error: My error message; 2") != std::string::npos);
+    CHECK(ss.str().find("Fatal: My fatal message; 3") != std::string::npos);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
\ No newline at end of file
diff --git a/src/armnn/test/UnitTests.hpp b/src/armnn/test/UnitTests.hpp
index bb91c4d..e4a8b96 100644
--- a/src/armnn/test/UnitTests.hpp
+++ b/src/armnn/test/UnitTests.hpp
@@ -14,7 +14,7 @@
 
 #include "TensorHelpers.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 inline void ConfigureLoggingTest()
 {
@@ -28,7 +28,7 @@
 //      using FactoryType = armnn::ClWorkloadFactory;
 //      using FactoryType = armnn::NeonWorkloadFactory;
 
-/// Executes BOOST_TEST on CompareTensors() return value so that the predicate_result message is reported.
+/// Executes CHECK_MESSAGE on CompareTensors() return value so that the predicate_result message is reported.
 /// If the test reports itself as not supported then the tensors are not compared.
 /// Additionally this checks that the supportedness reported by the test matches the name of the test.
 /// Unsupported tests must be 'tagged' by including "UNSUPPORTED" in their name.
@@ -40,8 +40,8 @@
 void CompareTestResultIfSupported(const std::string& testName, const LayerTestResult<T, n>& testResult)
 {
     bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
-    BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported,
-                        "The test name does not match the supportedness it is reporting");
+    CHECK_MESSAGE(testNameIndicatesUnsupported != testResult.m_Supported,
+                  "The test name does not match the supportedness it is reporting");
     if (testResult.m_Supported)
     {
         auto result = CompareTensors(testResult.m_ActualData,
@@ -49,7 +49,7 @@
                                      testResult.m_ActualShape,
                                      testResult.m_ExpectedShape,
                                      testResult.m_CompareBoolean);
-        BOOST_TEST(result.m_Result, result.m_Message.str());
+       CHECK_MESSAGE(result.m_Result, result.m_Message.str());
     }
 }
 
@@ -59,15 +59,15 @@
     bool testNameIndicatesUnsupported = testName.find("UNSUPPORTED") != std::string::npos;
     for (unsigned int i = 0; i < testResult.size(); ++i)
     {
-        BOOST_CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported,
-                            "The test name does not match the supportedness it is reporting");
+        CHECK_MESSAGE(testNameIndicatesUnsupported != testResult[i].m_Supported,
+                      "The test name does not match the supportedness it is reporting");
         if (testResult[i].m_Supported)
         {
             auto result = CompareTensors(testResult[i].m_ActualData,
                                          testResult[i].m_ExpectedData,
                                          testResult[i].m_ActualShape,
                                          testResult[i].m_ExpectedShape);
-            BOOST_TEST(result.m_Result, result.m_Message.str());
+            CHECK_MESSAGE(result.m_Result, result.m_Message.str());
         }
     }
 }
@@ -106,19 +106,31 @@
 }
 
 #define ARMNN_SIMPLE_TEST_CASE(TestName, TestFunction) \
-    BOOST_AUTO_TEST_CASE(TestName) \
+    TEST_CASE(#TestName) \
     { \
         TestFunction(); \
     }
 
 #define ARMNN_AUTO_TEST_CASE(TestName, TestFunction, ...) \
-    BOOST_AUTO_TEST_CASE(TestName) \
+    TEST_CASE(#TestName) \
+    { \
+        RunTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+    }
+
+#define ARMNN_AUTO_TEST_FIXTURE(TestName, Fixture, TestFunction, ...) \
+    TEST_CASE_FIXTURE(Fixture, #TestName) \
     { \
         RunTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
     }
 
 #define ARMNN_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \
-    BOOST_AUTO_TEST_CASE(TestName) \
+    TEST_CASE(#TestName) \
+    { \
+        RunTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
+    }
+
+#define ARMNN_AUTO_TEST_FIXTURE_WITH_THF(TestName, Fixture, TestFunction, ...) \
+    TEST_CASE_FIXTURE(Fixture, #TestName) \
     { \
         RunTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
     }
@@ -152,25 +164,25 @@
 }
 
 #define ARMNN_COMPARE_REF_AUTO_TEST_CASE(TestName, TestFunction, ...) \
-    BOOST_AUTO_TEST_CASE(TestName) \
+    TEST_CASE(#TestName) \
     { \
         CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
     }
 
 #define ARMNN_COMPARE_REF_AUTO_TEST_CASE_WITH_THF(TestName, TestFunction, ...) \
-    BOOST_AUTO_TEST_CASE(TestName) \
+    TEST_CASE(#TestName) \
     { \
         CompareRefTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
     }
 
 #define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE(TestName, Fixture, TestFunction, ...) \
-    BOOST_FIXTURE_TEST_CASE(TestName, Fixture) \
+    TEST_CASE_FIXTURE(Fixture, #TestName) \
     { \
         CompareRefTestFunction<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
     }
 
 #define ARMNN_COMPARE_REF_FIXTURE_TEST_CASE_WITH_THF(TestName, Fixture, TestFunction, ...) \
-    BOOST_FIXTURE_TEST_CASE(TestName, Fixture) \
+    TEST_CASE_FIXTURE(Fixture, #TestName) \
     { \
         CompareRefTestFunctionUsingTensorHandleFactory<FactoryType>(#TestName, &TestFunction, ##__VA_ARGS__); \
     }
diff --git a/src/armnn/test/UtilityTests.cpp b/src/armnn/test/UtilityTests.cpp
index bad6c22..b3b6c3b 100644
--- a/src/armnn/test/UtilityTests.cpp
+++ b/src/armnn/test/UtilityTests.cpp
@@ -3,7 +3,7 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #define ARMNN_POLYMORPHIC_CAST_TESTABLE
 #define ARMNN_NUMERIC_CAST_TESTABLE
@@ -17,9 +17,9 @@
 #include <limits>
 
 // Tests of include/Utility files
-BOOST_AUTO_TEST_SUITE(UtilityTests)
-
-BOOST_AUTO_TEST_CASE(PolymorphicDowncast)
+TEST_SUITE("UtilityTests")
+{
+TEST_CASE("PolymorphicDowncast")
 {
     using namespace armnn;
     class Base
@@ -44,19 +44,19 @@
     Child1 child1;
     Base* base1 = &child1;
     auto ptr1 = dynamic_cast<Child1*>(base1);
-    BOOST_CHECK(ptr1 != nullptr);
-    BOOST_CHECK_NO_THROW(armnn::PolymorphicDowncast<Child1*>(base1));
-    BOOST_CHECK(armnn::PolymorphicDowncast<Child1*>(base1) == ptr1);
+    CHECK(ptr1 != nullptr);
+    CHECK_NOTHROW(armnn::PolymorphicDowncast<Child1*>(base1));
+    CHECK(armnn::PolymorphicDowncast<Child1*>(base1) == ptr1);
 
     auto ptr2 = dynamic_cast<Child2*>(base1);
-    BOOST_CHECK(ptr2 == nullptr);
-    BOOST_CHECK_THROW(armnn::PolymorphicDowncast<Child2*>(base1), std::bad_cast);
+    CHECK(ptr2 == nullptr);
+    CHECK_THROWS_AS(armnn::PolymorphicDowncast<Child2*>(base1), std::bad_cast);
 
     armnn::IgnoreUnused(ptr1, ptr2);
 }
 
 
-BOOST_AUTO_TEST_CASE(PolymorphicPointerDowncast_SharedPointer)
+TEST_CASE("PolymorphicPointerDowncast_SharedPointer")
 {
     using namespace armnn;
     class Base
@@ -81,19 +81,19 @@
     std::shared_ptr<Base> base1 = std::make_shared<Child1>();
 
     std::shared_ptr<Child1> ptr1 = std::static_pointer_cast<Child1>(base1);
-    BOOST_CHECK(ptr1);
-    BOOST_CHECK_NO_THROW(armnn::PolymorphicPointerDowncast<Child1>(base1));
-    BOOST_CHECK(armnn::PolymorphicPointerDowncast<Child1>(base1) == ptr1);
+    CHECK(ptr1);
+    CHECK_NOTHROW(armnn::PolymorphicPointerDowncast<Child1>(base1));
+    CHECK(armnn::PolymorphicPointerDowncast<Child1>(base1) == ptr1);
 
     auto ptr2 = std::dynamic_pointer_cast<Child2>(base1);
-    BOOST_CHECK(!ptr2);
-    BOOST_CHECK_THROW(armnn::PolymorphicPointerDowncast<Child2>(base1), std::bad_cast);
+    CHECK(!ptr2);
+    CHECK_THROWS_AS(armnn::PolymorphicPointerDowncast<Child2>(base1), std::bad_cast);
 
     armnn::IgnoreUnused(ptr1, ptr2);
 }
 
 
-BOOST_AUTO_TEST_CASE(PolymorphicPointerDowncast_BuildInPointer)
+TEST_CASE("PolymorphicPointerDowncast_BuildInPointer")
 {
     using namespace armnn;
     class Base
@@ -118,68 +118,68 @@
     Child1 child1;
     Base* base1 = &child1;
     auto ptr1 = dynamic_cast<Child1*>(base1);
-    BOOST_CHECK(ptr1 != nullptr);
-    BOOST_CHECK_NO_THROW(armnn::PolymorphicPointerDowncast<Child1>(base1));
-    BOOST_CHECK(armnn::PolymorphicPointerDowncast<Child1>(base1) == ptr1);
+    CHECK(ptr1 != nullptr);
+    CHECK_NOTHROW(armnn::PolymorphicPointerDowncast<Child1>(base1));
+    CHECK(armnn::PolymorphicPointerDowncast<Child1>(base1) == ptr1);
 
     auto ptr2 = dynamic_cast<Child2*>(base1);
-    BOOST_CHECK(ptr2 == nullptr);
-    BOOST_CHECK_THROW(armnn::PolymorphicPointerDowncast<Child2>(base1), std::bad_cast);
+    CHECK(ptr2 == nullptr);
+    CHECK_THROWS_AS(armnn::PolymorphicPointerDowncast<Child2>(base1), std::bad_cast);
 
     armnn::IgnoreUnused(ptr1, ptr2);
 }
 
 
-BOOST_AUTO_TEST_CASE(NumericCast)
+TEST_CASE("NumericCast")
 {
     using namespace armnn;
 
     // To 8 bit
-    BOOST_CHECK_THROW(numeric_cast<unsigned char>(-1), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<unsigned char>(1 << 8), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<unsigned char>(1L << 16), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<unsigned char>(1LL << 32), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<unsigned char>(-1), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<unsigned char>(1 << 8), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<unsigned char>(1L << 16), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<unsigned char>(1LL << 32), std::bad_cast);
 
-    BOOST_CHECK_THROW(numeric_cast<signed char>((1L << 8)*-1), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<signed char>((1L << 15)*-1), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<signed char>((1LL << 31)*-1), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<signed char>((1L << 8)*-1), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<signed char>((1L << 15)*-1), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<signed char>((1LL << 31)*-1), std::bad_cast);
 
-    BOOST_CHECK_NO_THROW(numeric_cast<unsigned char>(1U));
-    BOOST_CHECK_NO_THROW(numeric_cast<unsigned char>(1L));
-    BOOST_CHECK_NO_THROW(numeric_cast<signed char>(-1));
-    BOOST_CHECK_NO_THROW(numeric_cast<signed char>(-1L));
-    BOOST_CHECK_NO_THROW(numeric_cast<signed char>((1 << 7)*-1));
+    CHECK_NOTHROW(numeric_cast<unsigned char>(1U));
+    CHECK_NOTHROW(numeric_cast<unsigned char>(1L));
+    CHECK_NOTHROW(numeric_cast<signed char>(-1));
+    CHECK_NOTHROW(numeric_cast<signed char>(-1L));
+    CHECK_NOTHROW(numeric_cast<signed char>((1 << 7)*-1));
 
     // To 16 bit
-    BOOST_CHECK_THROW(numeric_cast<uint16_t>(-1), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<uint16_t>(1L << 16), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<uint16_t>(1LL << 32), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<uint16_t>(-1), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<uint16_t>(1L << 16), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<uint16_t>(1LL << 32), std::bad_cast);
 
-    BOOST_CHECK_THROW(numeric_cast<int16_t>(1L << 15), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<int16_t>(1LL << 31), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<int16_t>(1L << 15), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<int16_t>(1LL << 31), std::bad_cast);
 
-    BOOST_CHECK_NO_THROW(numeric_cast<uint16_t>(1L << 8));
-    BOOST_CHECK_NO_THROW(numeric_cast<int16_t>(1L << 7));
-    BOOST_CHECK_NO_THROW(numeric_cast<int16_t>((1L << 15)*-1));
+    CHECK_NOTHROW(numeric_cast<uint16_t>(1L << 8));
+    CHECK_NOTHROW(numeric_cast<int16_t>(1L << 7));
+    CHECK_NOTHROW(numeric_cast<int16_t>((1L << 15)*-1));
 
-    BOOST_CHECK_NO_THROW(numeric_cast<int16_t>(1U << 8));
-    BOOST_CHECK_NO_THROW(numeric_cast<int16_t>(1U << 14));
+    CHECK_NOTHROW(numeric_cast<int16_t>(1U << 8));
+    CHECK_NOTHROW(numeric_cast<int16_t>(1U << 14));
 
     // To 32 bit
-    BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1));
-    BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1 << 8));
-    BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1L << 16));
-    BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1LL << 31));
+    CHECK_NOTHROW(numeric_cast<uint32_t>(1));
+    CHECK_NOTHROW(numeric_cast<uint32_t>(1 << 8));
+    CHECK_NOTHROW(numeric_cast<uint32_t>(1L << 16));
+    CHECK_NOTHROW(numeric_cast<uint32_t>(1LL << 31));
 
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(-1));
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>((1L << 8)*-1));
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>((1L << 16)*-1));
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>((1LL << 31)*-1));
+    CHECK_NOTHROW(numeric_cast<int32_t>(-1));
+    CHECK_NOTHROW(numeric_cast<int32_t>((1L << 8)*-1));
+    CHECK_NOTHROW(numeric_cast<int32_t>((1L << 16)*-1));
+    CHECK_NOTHROW(numeric_cast<int32_t>((1LL << 31)*-1));
 
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1U));
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1U << 8));
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1U << 16));
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1U << 30));
+    CHECK_NOTHROW(numeric_cast<int32_t>(1U));
+    CHECK_NOTHROW(numeric_cast<int32_t>(1U << 8));
+    CHECK_NOTHROW(numeric_cast<int32_t>(1U << 16));
+    CHECK_NOTHROW(numeric_cast<int32_t>(1U << 30));
 
     float float_max = std::numeric_limits<float>::max();
     float float_min = std::numeric_limits<float>::lowest();
@@ -195,59 +195,59 @@
     auto double_max = std::numeric_limits<double>::max();
 
     // Float to signed integer
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(1.324f));
-    BOOST_CHECK(1 == numeric_cast<int32_t>(1.324f));
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(-1.0f));
-    BOOST_CHECK(-1 == numeric_cast<int32_t>(-1.0f));
+    CHECK_NOTHROW(numeric_cast<int32_t>(1.324f));
+    CHECK(1 == numeric_cast<int32_t>(1.324f));
+    CHECK_NOTHROW(numeric_cast<int32_t>(-1.0f));
+    CHECK(-1 == numeric_cast<int32_t>(-1.0f));
 
-    BOOST_CHECK_NO_THROW(numeric_cast<int8_t>(static_cast<float>(int8_max)));
-    BOOST_CHECK_NO_THROW(numeric_cast<int16_t>(static_cast<float>(int16_max)));
-    BOOST_CHECK_NO_THROW(numeric_cast<int32_t>(static_cast<float>(int32_max)));
+    CHECK_NOTHROW(numeric_cast<int8_t>(static_cast<float>(int8_max)));
+    CHECK_NOTHROW(numeric_cast<int16_t>(static_cast<float>(int16_max)));
+    CHECK_NOTHROW(numeric_cast<int32_t>(static_cast<float>(int32_max)));
 
-    BOOST_CHECK_THROW(numeric_cast<int8_t>(float_max), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<int16_t>(float_max), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<int32_t>(float_max), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<int8_t>(float_max), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<int16_t>(float_max), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<int32_t>(float_max), std::bad_cast);
 
-    BOOST_CHECK_THROW(numeric_cast<int8_t>(float_min), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<int16_t>(float_min), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<int32_t>(float_min), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<int8_t>(float_min), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<int16_t>(float_min), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<int32_t>(float_min), std::bad_cast);
 
     // Signed integer to float
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(1));
-    BOOST_CHECK(1.0 == numeric_cast<float>(1));
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(-1));
-    BOOST_CHECK(-1.0 == numeric_cast<float>(-1));
+    CHECK_NOTHROW(numeric_cast<float>(1));
+    CHECK(1.0 == numeric_cast<float>(1));
+    CHECK_NOTHROW(numeric_cast<float>(-1));
+    CHECK(-1.0 == numeric_cast<float>(-1));
 
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(int8_max));
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(int16_max));
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(int32_max));
+    CHECK_NOTHROW(numeric_cast<float>(int8_max));
+    CHECK_NOTHROW(numeric_cast<float>(int16_max));
+    CHECK_NOTHROW(numeric_cast<float>(int32_max));
 
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(int8_min));
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(int16_min));
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(int32_min));
+    CHECK_NOTHROW(numeric_cast<float>(int8_min));
+    CHECK_NOTHROW(numeric_cast<float>(int16_min));
+    CHECK_NOTHROW(numeric_cast<float>(int32_min));
 
     // Unsigned integer to float
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(1U));
-    BOOST_CHECK(1.0 == numeric_cast<float>(1U));
+    CHECK_NOTHROW(numeric_cast<float>(1U));
+    CHECK(1.0 == numeric_cast<float>(1U));
 
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(uint8_max));
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(uint16_max));
-    BOOST_CHECK_NO_THROW(numeric_cast<float>(uint32_max));
+    CHECK_NOTHROW(numeric_cast<float>(uint8_max));
+    CHECK_NOTHROW(numeric_cast<float>(uint16_max));
+    CHECK_NOTHROW(numeric_cast<float>(uint32_max));
 
     // Float to unsigned integer
-    BOOST_CHECK_NO_THROW(numeric_cast<uint32_t>(1.43243f));
-    BOOST_CHECK(1 == numeric_cast<uint32_t>(1.43243f));
+    CHECK_NOTHROW(numeric_cast<uint32_t>(1.43243f));
+    CHECK(1 == numeric_cast<uint32_t>(1.43243f));
 
-    BOOST_CHECK_THROW(numeric_cast<uint32_t>(-1.1f), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<uint32_t>(float_max), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<uint32_t>(-1.1f), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<uint32_t>(float_max), std::bad_cast);
 
     // Double checks
-    BOOST_CHECK_THROW(numeric_cast<int32_t>(double_max), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<int32_t>(double_max), std::bad_cast);
-    BOOST_CHECK_THROW(numeric_cast<float>(double_max), std::bad_cast);
-    BOOST_CHECK_NO_THROW(numeric_cast<double>(int32_max));
-    BOOST_CHECK_NO_THROW(numeric_cast<long double>(int32_max));
+    CHECK_THROWS_AS(numeric_cast<int32_t>(double_max), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<int32_t>(double_max), std::bad_cast);
+    CHECK_THROWS_AS(numeric_cast<float>(double_max), std::bad_cast);
+    CHECK_NOTHROW(numeric_cast<double>(int32_max));
+    CHECK_NOTHROW(numeric_cast<long double>(int32_max));
 
     }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/UtilsTests.cpp b/src/armnn/test/UtilsTests.cpp
index f2ca95d..1599d0c 100644
--- a/src/armnn/test/UtilsTests.cpp
+++ b/src/armnn/test/UtilsTests.cpp
@@ -2,7 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 
 #include <armnn/BackendHelper.hpp>
@@ -15,52 +15,52 @@
 #include <Graph.hpp>
 #include <ResolveType.hpp>
 
-BOOST_AUTO_TEST_SUITE(Utils)
-
-BOOST_AUTO_TEST_CASE(DataTypeSize)
+TEST_SUITE("Utils")
 {
-    BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4);
-    BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::QAsymmU8) == 1);
-    BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4);
-    BOOST_TEST(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
+TEST_CASE("DataTypeSize")
+{
+    CHECK(armnn::GetDataTypeSize(armnn::DataType::Float32) == 4);
+    CHECK(armnn::GetDataTypeSize(armnn::DataType::QAsymmU8) == 1);
+    CHECK(armnn::GetDataTypeSize(armnn::DataType::Signed32) == 4);
+    CHECK(armnn::GetDataTypeSize(armnn::DataType::Boolean) == 1);
 }
 
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithTooManyMappings)
+TEST_CASE("PermuteDescriptorWithTooManyMappings")
 {
-    BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 4u, 5u }), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 4u, 5u }), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings1d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings1d")
 {
-    BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 1u }), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(armnn::PermuteDescriptor({ 1u }), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings2d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings2d")
 {
-    BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 2u, 0u }), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(armnn::PermuteDescriptor({ 2u, 0u }), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings3d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings3d")
 {
-    BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 3u, 1u }), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(armnn::PermuteDescriptor({ 0u, 3u, 1u }), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings4d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings4d")
 {
-    BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 4u }), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(armnn::PermuteDescriptor({ 0u, 1u, 2u, 4u }), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithInvalidMappings5d)
+TEST_CASE("PermuteDescriptorWithInvalidMappings5d")
 {
-    BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 5u }), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(armnn::PermuteDescriptor({ 0u, 1u, 2u, 3u, 5u }), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(PermuteDescriptorWithDuplicatedMappings)
+TEST_CASE("PermuteDescriptorWithDuplicatedMappings")
 {
-    BOOST_CHECK_THROW(armnn::PermuteDescriptor({ 1u, 1u, 0u }), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(armnn::PermuteDescriptor({ 1u, 1u, 0u }), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(HalfType)
+TEST_CASE("HalfType")
 {
     using namespace half_float::literal;
     armnn::Half a = 1.0_h;
@@ -69,25 +69,25 @@
     armnn::Half c(b);
 
     // Test half type
-    BOOST_CHECK_EQUAL(a, b);
-    BOOST_CHECK_EQUAL(sizeof(c), 2);
+    CHECK_EQ(a, b);
+    CHECK_EQ(sizeof(c), 2);
 
     // Test half type is floating point type
-    BOOST_CHECK(std::is_floating_point<armnn::Half>::value);
+    CHECK(std::is_floating_point<armnn::Half>::value);
 
     // Test utility function returns correct type.
     using ResolvedType = armnn::ResolveType<armnn::DataType::Float16>;
     constexpr bool isHalfType = std::is_same<armnn::Half, ResolvedType>::value;
-    BOOST_CHECK(isHalfType);
+    CHECK(isHalfType);
 
     //Test utility functions return correct size
-    BOOST_CHECK(GetDataTypeSize(armnn::DataType::Float16) == 2);
+    CHECK(GetDataTypeSize(armnn::DataType::Float16) == 2);
 
     //Test utility functions return correct name
-    BOOST_CHECK((GetDataTypeName(armnn::DataType::Float16) == std::string("Float16")));
+    CHECK((GetDataTypeName(armnn::DataType::Float16) == std::string("Float16")));
 }
 
-BOOST_AUTO_TEST_CASE(BFloatType)
+TEST_CASE("BFloatType")
 {
     uint16_t v = 16256;
     armnn::BFloat16 a(v);
@@ -95,83 +95,83 @@
     armnn::BFloat16 zero;
 
     // Test BFloat16 type
-    BOOST_CHECK_EQUAL(sizeof(a), 2);
-    BOOST_CHECK_EQUAL(a, b);
-    BOOST_CHECK_EQUAL(a.Val(), v);
-    BOOST_CHECK_EQUAL(a, 1.0f);
-    BOOST_CHECK_EQUAL(zero, 0.0f);
+    CHECK_EQ(sizeof(a), 2);
+    CHECK_EQ(a, b);
+    CHECK_EQ(a.Val(), v);
+    CHECK_EQ(a, 1.0f);
+    CHECK_EQ(zero, 0.0f);
 
     // Infinity
     float infFloat = std::numeric_limits<float>::infinity();
     armnn::BFloat16 infBF(infFloat);
-    BOOST_CHECK_EQUAL(infBF, armnn::BFloat16::Inf());
+    CHECK_EQ(infBF, armnn::BFloat16::Inf());
 
     // NaN
     float nan = std::numeric_limits<float>::quiet_NaN();
     armnn::BFloat16 nanBF(nan);
-    BOOST_CHECK_EQUAL(nanBF, armnn::BFloat16::Nan());
+    CHECK_EQ(nanBF, armnn::BFloat16::Nan());
 
     // Test utility function returns correct type.
     using ResolvedType = armnn::ResolveType<armnn::DataType::BFloat16>;
     constexpr bool isBFloat16Type = std::is_same<armnn::BFloat16, ResolvedType>::value;
-    BOOST_CHECK(isBFloat16Type);
+    CHECK(isBFloat16Type);
 
     //Test utility functions return correct size
-    BOOST_CHECK(GetDataTypeSize(armnn::DataType::BFloat16) == 2);
+    CHECK(GetDataTypeSize(armnn::DataType::BFloat16) == 2);
 
     //Test utility functions return correct name
-    BOOST_CHECK((GetDataTypeName(armnn::DataType::BFloat16) == std::string("BFloat16")));
+    CHECK((GetDataTypeName(armnn::DataType::BFloat16) == std::string("BFloat16")));
 }
 
-BOOST_AUTO_TEST_CASE(Float32ToBFloat16Test)
+TEST_CASE("Float32ToBFloat16Test")
 {
     // LSB = 0, R = 0 -> round down
     armnn::BFloat16 roundDown0 = armnn::BFloat16::Float32ToBFloat16(1.704735E38f); // 0x7F004000
-    BOOST_CHECK_EQUAL(roundDown0.Val(), 0x7F00);
+    CHECK_EQ(roundDown0.Val(), 0x7F00);
     // LSB = 1, R = 0 -> round down
     armnn::BFloat16 roundDown1 = armnn::BFloat16::Float32ToBFloat16(9.18355E-41f); // 0x00010000
-    BOOST_CHECK_EQUAL(roundDown1.Val(), 0x0001);
+    CHECK_EQ(roundDown1.Val(), 0x0001);
     // LSB = 0, R = 1 all 0 -> round down
     armnn::BFloat16 roundDown2 = armnn::BFloat16::Float32ToBFloat16(1.14794E-40f); // 0x00014000
-    BOOST_CHECK_EQUAL(roundDown2.Val(), 0x0001);
+    CHECK_EQ(roundDown2.Val(), 0x0001);
     // LSB = 1, R = 1 -> round up
     armnn::BFloat16 roundUp = armnn::BFloat16::Float32ToBFloat16(-2.0234377f); // 0xC0018001
-    BOOST_CHECK_EQUAL(roundUp.Val(), 0xC002);
+    CHECK_EQ(roundUp.Val(), 0xC002);
     // LSB = 0, R = 1 -> round up
     armnn::BFloat16 roundUp1 = armnn::BFloat16::Float32ToBFloat16(4.843037E-35f); // 0x0680C000
-    BOOST_CHECK_EQUAL(roundUp1.Val(), 0x0681);
+    CHECK_EQ(roundUp1.Val(), 0x0681);
     // Max positive value -> infinity
     armnn::BFloat16 maxPositive = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::max()); // 0x7F7FFFFF
-    BOOST_CHECK_EQUAL(maxPositive, armnn::BFloat16::Inf());
+    CHECK_EQ(maxPositive, armnn::BFloat16::Inf());
     // Max negative value -> -infinity
     armnn::BFloat16 maxNeg = armnn::BFloat16::Float32ToBFloat16(std::numeric_limits<float>::lowest()); // 0xFF7FFFFF
-    BOOST_CHECK_EQUAL(maxNeg.Val(), 0xFF80);
+    CHECK_EQ(maxNeg.Val(), 0xFF80);
     // Min positive value
     armnn::BFloat16 minPositive = armnn::BFloat16::Float32ToBFloat16(1.1754942E-38f); // 0x007FFFFF
-    BOOST_CHECK_EQUAL(minPositive.Val(), 0x0080);
+    CHECK_EQ(minPositive.Val(), 0x0080);
     // Min negative value
     armnn::BFloat16 minNeg = armnn::BFloat16::Float32ToBFloat16(-1.1754942E-38f); // 0x807FFFFF
-    BOOST_CHECK_EQUAL(minNeg.Val(), 0x8080);
+    CHECK_EQ(minNeg.Val(), 0x8080);
 }
 
-BOOST_AUTO_TEST_CASE(BFloat16ToFloat32Test)
+TEST_CASE("BFloat16ToFloat32Test")
 {
     armnn::BFloat16 bf0(1.5f);
-    BOOST_CHECK_EQUAL(bf0.ToFloat32(), 1.5f);
+    CHECK_EQ(bf0.ToFloat32(), 1.5f);
     armnn::BFloat16 bf1(-5.525308E-25f);
-    BOOST_CHECK_EQUAL(bf1.ToFloat32(), -5.525308E-25f);
+    CHECK_EQ(bf1.ToFloat32(), -5.525308E-25f);
     armnn::BFloat16 bf2(-2.0625f);
-    BOOST_CHECK_EQUAL(bf2.ToFloat32(), -2.0625f);
+    CHECK_EQ(bf2.ToFloat32(), -2.0625f);
     uint16_t v = 32639;
     armnn::BFloat16 bf3(v);
-    BOOST_CHECK_EQUAL(bf3.ToFloat32(), 3.3895314E38f);
+    CHECK_EQ(bf3.ToFloat32(), 3.3895314E38f);
     // Infinity
-    BOOST_CHECK_EQUAL(armnn::BFloat16::Inf().ToFloat32(), std::numeric_limits<float>::infinity());
+    CHECK_EQ(armnn::BFloat16::Inf().ToFloat32(), std::numeric_limits<float>::infinity());
     // NaN
-    BOOST_CHECK(std::isnan(armnn::BFloat16::Nan().ToFloat32()));
+    CHECK(std::isnan(armnn::BFloat16::Nan().ToFloat32()));
 }
 
-BOOST_AUTO_TEST_CASE(GraphTopologicalSortSimpleTest)
+TEST_CASE("GraphTopologicalSortSimpleTest")
 {
     std::map<int, std::vector<int>> graph;
 
@@ -192,13 +192,13 @@
     std::vector<int> output;
     bool sortCompleted = armnnUtils::GraphTopologicalSort<int>(targetNodes, getNodeInputs, output);
 
-    BOOST_TEST(sortCompleted);
+    CHECK(sortCompleted);
 
     std::vector<int> correctResult = {5, 4, 2, 0, 3, 1};
-    BOOST_CHECK_EQUAL_COLLECTIONS(output.begin(), output.end(), correctResult.begin(), correctResult.end());
+    CHECK(std::equal(output.begin(), output.end(), correctResult.begin(), correctResult.end()));
 }
 
-BOOST_AUTO_TEST_CASE(GraphTopologicalSortVariantTest)
+TEST_CASE("GraphTopologicalSortVariantTest")
 {
     std::map<int, std::vector<int>> graph;
 
@@ -220,13 +220,13 @@
     std::vector<int> output;
     bool sortCompleted = armnnUtils::GraphTopologicalSort<int>(targetNodes, getNodeInputs, output);
 
-    BOOST_TEST(sortCompleted);
+    CHECK(sortCompleted);
 
     std::vector<int> correctResult = {6, 5, 3, 4, 2, 0, 1};
-    BOOST_CHECK_EQUAL_COLLECTIONS(output.begin(), output.end(), correctResult.begin(), correctResult.end());
+    CHECK(std::equal(output.begin(), output.end(), correctResult.begin(), correctResult.end()));
 }
 
-BOOST_AUTO_TEST_CASE(CyclicalGraphTopologicalSortTest)
+TEST_CASE("CyclicalGraphTopologicalSortTest")
 {
     std::map<int, std::vector<int>> graph;
 
@@ -244,32 +244,32 @@
     std::vector<int> output;
     bool sortCompleted = armnnUtils::GraphTopologicalSort<int>(targetNodes, getNodeInputs, output);
 
-    BOOST_TEST(!sortCompleted);
+    CHECK(!sortCompleted);
 }
 
-BOOST_AUTO_TEST_CASE(PermuteQuantizationDim)
+TEST_CASE("PermuteQuantizationDim")
 {
     std::vector<float> scales {1.0f, 1.0f};
 
     // Set QuantizationDim to be index 1
     const armnn::TensorInfo perChannelInfo({ 1, 2, 3, 4 }, armnn::DataType::Float32, scales, 1U);
-    BOOST_CHECK(perChannelInfo.GetQuantizationDim().value() == 1U);
+    CHECK(perChannelInfo.GetQuantizationDim().value() == 1U);
 
     // Permute so that index 1 moves to final index i.e. index 3
     armnn::PermutationVector mappings({ 0, 3, 2, 1 });
     auto permutedPerChannel = armnnUtils::Permuted(perChannelInfo, mappings);
 
     // Check that QuantizationDim is in index 3
-    BOOST_CHECK(permutedPerChannel.GetQuantizationDim().value() == 3U);
+    CHECK(permutedPerChannel.GetQuantizationDim().value() == 3U);
 
     // Even if there is only a single scale the quantization dim still exists and needs to be permuted
     std::vector<float> scale {1.0f};
     const armnn::TensorInfo perChannelInfo1({ 1, 2, 3, 4 }, armnn::DataType::Float32, scale, 1U);
     auto permuted = armnnUtils::Permuted(perChannelInfo1, mappings);
-    BOOST_CHECK(permuted.GetQuantizationDim().value() == 3U);
+    CHECK(permuted.GetQuantizationDim().value() == 3U);
 }
 
-BOOST_AUTO_TEST_CASE(PermuteVectorIterator)
+TEST_CASE("PermuteVectorIterator")
 {
     // We're slightly breaking the spirit of std::array.end() because we're using it as a
     // variable length rather than fixed length. This test is to use a couple of iterators and
@@ -278,7 +278,7 @@
     // Create zero length.
     armnn::PermutationVector zeroPVector({});
     // Begin should be equal to end.
-    BOOST_CHECK(zeroPVector.begin() == zeroPVector.end());
+    CHECK(zeroPVector.begin() == zeroPVector.end());
 
     // Create length 4. Summing the 4 values should be 6.
     armnn::PermutationVector fourPVector({ 0, 3, 2, 1 });
@@ -287,7 +287,7 @@
     {
         sum += it;
     }
-    BOOST_CHECK(sum == 6);
+    CHECK(sum == 6);
     // Directly use begin and end, make sure there are 4 iterations.
     unsigned int iterations = 0;
     auto itr = fourPVector.begin();
@@ -296,7 +296,7 @@
         ++iterations;
         itr++;
     }
-    BOOST_CHECK(iterations == 4);
+    CHECK(iterations == 4);
 
     // Do the same with 2 elements.
     armnn::PermutationVector twoPVector({ 0, 1 });
@@ -307,20 +307,20 @@
         ++iterations;
         itr++;
     }
-    BOOST_CHECK(iterations == 2);
+    CHECK(iterations == 2);
 }
 
 #if defined(ARMNNREF_ENABLED)
-BOOST_AUTO_TEST_CASE(LayerSupportHandle)
+TEST_CASE("LayerSupportHandle")
 {
     auto layerSupportObject = armnn::GetILayerSupportByBackendId("CpuRef");
     armnn::TensorInfo input;
     std::string reasonIfUnsupported;
     // InputLayer always supported for CpuRef
-    BOOST_CHECK_EQUAL(layerSupportObject.IsInputSupported(input, reasonIfUnsupported), true);
+    CHECK_EQ(layerSupportObject.IsInputSupported(input, reasonIfUnsupported), true);
 
-    BOOST_CHECK(layerSupportObject.IsBackendRegistered());
+    CHECK(layerSupportObject.IsBackendRegistered());
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
index d0d728b..36a4507 100644
--- a/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
+++ b/src/armnn/test/optimizations/AddBroadcastReshapeLayerTests.cpp
@@ -8,11 +8,12 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace optimizations;
 
 void AddBroadcastReshapeLayerOptimizerTest(const TensorInfo& info0,
@@ -36,7 +37,7 @@
     input1->GetOutputSlot().Connect(add->GetInputSlot(1));
     add->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<AdditionLayer>,
@@ -46,7 +47,7 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
 
     // Broadcast reshape layer has been added to the graph correctly
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ReshapeLayer>,
@@ -54,15 +55,15 @@
                              &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, reshapeLayerName);
-    BOOST_TEST(reshapeLayer);
+    CHECK(reshapeLayer);
     auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
 
     // Tensorshape and the data type are correct
-    BOOST_TEST((addedReshapeTensorInfo.GetShape() == expectedReshapeShape));
-    BOOST_TEST((addedReshapeTensorInfo.GetDataType() == expectedDataType));
+    CHECK((addedReshapeTensorInfo.GetShape() == expectedReshapeShape));
+    CHECK((addedReshapeTensorInfo.GetDataType() == expectedDataType));
 }
 
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSimpleTest)
+TEST_CASE("AddBroadcastReshapeLayerSimpleTest")
 {
     const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32);
     const TensorInfo info1({ 1 }, DataType::Float32);
@@ -71,7 +72,7 @@
                                           DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer1DTest)
+TEST_CASE("AddBroadcastReshapeLayer1DTest")
 {
     const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32);
     const TensorInfo info1({ 5 }, DataType::Float32);
@@ -81,7 +82,7 @@
                                           DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer2DTest)
+TEST_CASE("AddBroadcastReshapeLayer2DTest")
 {
     const TensorInfo info0({ 1, 2, 3, 5 }, DataType::Float32);
     const TensorInfo info1({ 3, 5 }, DataType::Float32);
@@ -91,7 +92,7 @@
                                           DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DTest)
+TEST_CASE("AddBroadcastReshapeLayer3DTest")
 {
     const TensorInfo info0({ 2, 1, 1, 1 }, DataType::Float32);
     const TensorInfo info1({ 3, 4, 5 }, DataType::Float32);
@@ -101,7 +102,7 @@
                                           DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayer3DMergedTest)
+TEST_CASE("AddBroadcastReshapeLayer3DMergedTest")
 {
     const TensorInfo info0({ 2, 3, 1, 1 }, DataType::Float32);
     const TensorInfo info1({ 3, 4, 5 }, DataType::Float32);
@@ -111,7 +112,7 @@
                                           DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerSubtractionTest)
+TEST_CASE("AddBroadcastReshapeLayerSubtractionTest")
 {
     Graph graph;
     const TensorInfo info0({ 5 }, DataType::Float32);
@@ -130,7 +131,7 @@
     input1->GetOutputSlot().Connect(sub->GetInputSlot(1));
     sub->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<SubtractionLayer>,
@@ -140,7 +141,7 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
 
     // Broadcast reshape layer has been added to the graph correctly
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ReshapeLayer>,
@@ -148,15 +149,15 @@
                              &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:sub-0");
-    BOOST_TEST(reshapeLayer);
+    CHECK(reshapeLayer);
     auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
 
     // Tensorshape and the data type are correct
-    BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 })));
-    BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::Float32));
+    CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 1, 5 })));
+    CHECK((addedReshapeTensorInfo.GetDataType() == DataType::Float32));
 }
 
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerDivisionTest)
+TEST_CASE("AddBroadcastReshapeLayerDivisionTest")
 {
     Graph graph;
     const TensorInfo info0({ 1, 4, 5 }, DataType::QAsymmS8);
@@ -175,7 +176,7 @@
     input1->GetOutputSlot().Connect(div->GetInputSlot(1));
     div->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<DivisionLayer>,
@@ -185,7 +186,7 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
 
     // Broadcast reshape layer has been added to the graph correctly
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ReshapeLayer>,
@@ -193,15 +194,15 @@
                              &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:div-0");
-    BOOST_TEST(reshapeLayer);
+    CHECK(reshapeLayer);
     auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
 
     // Tensorshape and the data type are correct
-    BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 })));
-    BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8));
+    CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 4, 5 })));
+    CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmS8));
 }
 
-BOOST_AUTO_TEST_CASE(AddBroadcastReshapeLayerMultiplicationTest)
+TEST_CASE("AddBroadcastReshapeLayerMultiplicationTest")
 {
     Graph graph;
     const TensorInfo info0({ 3, 5 }, DataType::QAsymmU8);
@@ -220,7 +221,7 @@
     input1->GetOutputSlot().Connect(mul->GetInputSlot(1));
     mul->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<MultiplicationLayer>,
@@ -230,7 +231,7 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
 
     // Broadcast reshape layer has been added to the graph correctly
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ReshapeLayer>,
@@ -238,15 +239,15 @@
                              &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
-    BOOST_TEST(reshapeLayer);
+    CHECK(reshapeLayer);
     auto addedReshapeTensorInfo = reshapeLayer->GetOutputSlot().GetTensorInfo();
 
     // Tensorshape and the data type are correct
-    BOOST_TEST((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 })));
-    BOOST_TEST((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8));
+    CHECK((addedReshapeTensorInfo.GetShape() == TensorShape({ 1, 1, 3, 5 })));
+    CHECK((addedReshapeTensorInfo.GetDataType() == DataType::QAsymmU8));
 }
 
-BOOST_AUTO_TEST_CASE(AddNoBroadcastReshapeLayerTest)
+TEST_CASE("AddNoBroadcastReshapeLayerTest")
 {
     Graph graph;
     const TensorInfo info0({ 1, 1, 1, 1 }, DataType::QAsymmU8);
@@ -265,7 +266,7 @@
     input1->GetOutputSlot().Connect(mul->GetInputSlot(1));
     mul->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<MultiplicationLayer>,
@@ -275,17 +276,17 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
 
     // Broadcast reshape layer has not been added to the graph
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<MultiplicationLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
-    BOOST_TEST(!reshapeLayer);
+    CHECK(!reshapeLayer);
 }
 
-BOOST_AUTO_TEST_CASE(ReshapeParentConstLayerTest)
+TEST_CASE("ReshapeParentConstLayerTest")
 {
     Graph graph;
     const TensorInfo info0({ 1, 2, 3, 5 }, DataType::QAsymmU8);
@@ -309,7 +310,7 @@
     constant->GetOutputSlot().Connect(mul->GetInputSlot(1));
     mul->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ConstantLayer>,
                              &IsLayerOfType<MultiplicationLayer>,
@@ -319,22 +320,22 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
 
     // Broadcast reshape layer has not been added to the graph
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ConstantLayer>,
                              &IsLayerOfType<MultiplicationLayer>,
                              &IsLayerOfType<OutputLayer>));
 
     TensorShape expectedShape = TensorShape{ 1, 1, 1, 5 };
-    BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == expectedShape);
+    CHECK(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == expectedShape);
 
-    BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetNumDimensions() == info0.GetNumDimensions());
+    CHECK(constant->m_LayerOutput.get()->GetTensorInfo().GetNumDimensions() == info0.GetNumDimensions());
 
     Layer* const reshapeLayer = GetFirstLayerWithName(graph, "Reshape_for:mul-0");
-    BOOST_TEST(!reshapeLayer);
+    CHECK(!reshapeLayer);
 }
 
-BOOST_AUTO_TEST_CASE(ReshapeParentConstAddLayerMultipleConnectionsTest)
+TEST_CASE("ReshapeParentConstAddLayerMultipleConnectionsTest")
 {
     // In this test case we recreate the situation where an Addition layer has
     // a constant second term, e.g. [1,512] + [1]. The AddBroadcastReshapeLayer
@@ -367,7 +368,7 @@
     // This second connection should prevent the modification of the const output tensor.
     constant->GetOutputSlot().Connect(add2->GetInputSlot(1));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ConstantLayer>,
                              &IsLayerOfType<AdditionLayer>,
@@ -378,7 +379,7 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(AddBroadcastReshapeLayer()));
 
     // Broadcast reshape should have been added before each addition layer.
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<ConstantLayer>,
                              &IsLayerOfType<ReshapeLayer>,
@@ -388,14 +389,14 @@
                              &IsLayerOfType<OutputLayer>));
 
     // Ensure the output shape of the constant hasn't changed.
-    BOOST_TEST(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == constantTermInfo.GetShape());
+    CHECK(constant->m_LayerOutput.get()->GetTensorInfo().GetShape() == constantTermInfo.GetShape());
     // There should be two extra reshape layers with appropriate names.
     Layer* const reshapeLayer1 = GetFirstLayerWithName(graph, "Reshape_for:add1-1");
     Layer* const reshapeLayer2 = GetFirstLayerWithName(graph, "Reshape_for:add2-1");
-    BOOST_TEST(reshapeLayer1);
-    BOOST_TEST(reshapeLayer2);
+    CHECK(reshapeLayer1);
+    CHECK(reshapeLayer2);
 }
 
 
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
index e4c1f2f..b78a1bf 100644
--- a/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsBFloatTests.cpp
@@ -8,14 +8,15 @@
 #include <BFloat16.hpp>
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToBFloatTest)
+TEST_CASE("ConvertConstantsFloatToBFloatTest")
 {
     armnn::Graph graph;
 
@@ -48,27 +49,27 @@
     fc->GetOutputSlot().Connect(output->GetInputSlot(0));
 
     // Check tensor data type before conversion
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
 
     // Run the optimizer
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToBFloat()));
 
     // Check tensor data type after conversion
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
+    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
 
     // Check whether data matches expected Bf16 data
     const BFloat16* data = fc->m_Weight->GetConstTensor<BFloat16>();
-    BOOST_CHECK(data[0] == BFloat16(0.0f));
-    BOOST_CHECK(data[1] == BFloat16(-1.0f));
-    BOOST_CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
-    BOOST_CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
-    BOOST_CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
-    BOOST_CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
-    BOOST_CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
-    BOOST_CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
+    CHECK(data[0] == BFloat16(0.0f));
+    CHECK(data[1] == BFloat16(-1.0f));
+    CHECK(data[2] == BFloat16(3.796875f)); // 0x4073
+    CHECK(data[3] == BFloat16(3.1072295E29f)); // 0x707B
+    CHECK(data[4] == BFloat16(9.131327E-10f)); // 0x307B
+    CHECK(data[5] == BFloat16(-3.796875f)); // 0xC073
+    CHECK(data[6] == BFloat16(-3.1072295E29f)); // 0xF07B
+    CHECK(data[7] == BFloat16(-9.131327E-10f)); // 0xB07B
 }
 
-BOOST_AUTO_TEST_CASE(ConvertConstantsBFloatToFloatTest)
+TEST_CASE("ConvertConstantsBFloatToFloatTest")
 {
     armnn::Graph graph;
 
@@ -104,24 +105,24 @@
     fc->GetOutputSlot().Connect(output->GetInputSlot(0));
 
     //Test the tensor info is correct.
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
+    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16);
 
     // Run the optimizer
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsBFloatToFloat()));
 
     //Test the tensor info is correct.
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
 
     // Now test the data matches float32 data
     const float* data = fc->m_Weight->GetConstTensor<float>();
-    BOOST_CHECK(data[0] == 0.0f);
-    BOOST_CHECK(data[1] == -1.0f);
-    BOOST_CHECK(data[2] == 3.796875f);
-    BOOST_CHECK(data[3] == 3.1072295E29f);
-    BOOST_CHECK(data[4] == 9.131327E-10f);
-    BOOST_CHECK(data[5] == -3.796875f);
-    BOOST_CHECK(data[6] == -3.1072295E29f);
-    BOOST_CHECK(data[7] == -9.131327E-10f);
+    CHECK(data[0] == 0.0f);
+    CHECK(data[1] == -1.0f);
+    CHECK(data[2] == 3.796875f);
+    CHECK(data[3] == 3.1072295E29f);
+    CHECK(data[4] == 9.131327E-10f);
+    CHECK(data[5] == -3.796875f);
+    CHECK(data[6] == -3.1072295E29f);
+    CHECK(data[7] == -9.131327E-10f);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
index 1dfe7f4..e6cca4f 100644
--- a/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsFloatToHalfTests.cpp
@@ -8,14 +8,15 @@
 #include <Optimizer.hpp>
 #include <Half.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(ConvertConstantsFloatToHalfTest)
+TEST_CASE("ConvertConstantsFloatToHalfTest")
 {
     armnn::Graph graph;
 
@@ -41,20 +42,20 @@
     fc->GetOutputSlot().Connect(output->GetInputSlot(0));
 
     // Check tensor data type before conversion
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
 
     // Run the optimizer
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsFloatToHalf()));
 
     // Check tensor data type after conversion
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
 
     // Check whether data matches expected fp16 data
     const Half* data = fc->m_Weight->GetConstTensor<Half>();
-    BOOST_CHECK(data[0] == Half(1.0f));
-    BOOST_CHECK(data[1] == Half(2.0f));
-    BOOST_CHECK(data[2] == Half(3.0f));
-    BOOST_CHECK(data[3] == Half(4.0f));
+    CHECK(data[0] == Half(1.0f));
+    CHECK(data[1] == Half(2.0f));
+    CHECK(data[2] == Half(3.0f));
+    CHECK(data[3] == Half(4.0f));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
index 1ddf526..2ec1279 100644
--- a/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
+++ b/src/armnn/test/optimizations/ConvertConstantsHalfToFloatTests.cpp
@@ -7,12 +7,13 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(ConvertConstantsHalfToFloatTest)
+TEST_CASE("ConvertConstantsHalfToFloatTest")
 {
     armnn::Graph graph;
 
@@ -41,20 +42,20 @@
     fc->GetOutputSlot().Connect(output->GetInputSlot(0));
 
     //Test the tensor info is correct.
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float16);
 
     // Run the optimizer
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(ConvertConstantsHalfToFloat()));
 
     //Test the tensor info is correct.
-    BOOST_CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
+    CHECK(fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::Float32);
 
     // Now test the data matches float32 data
     const float* data = fc->m_Weight->GetConstTensor<float>();
-    BOOST_CHECK(1.0f == data[0]);
-    BOOST_CHECK(2.0f == data[1]);
-    BOOST_CHECK(3.0f == data[2]);
-    BOOST_CHECK(4.0f == data[3]);
+    CHECK(1.0f == data[0]);
+    CHECK(2.0f == data[1]);
+    CHECK(3.0f == data[2]);
+    CHECK(4.0f == data[3]);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/FoldPadTests.cpp b/src/armnn/test/optimizations/FoldPadTests.cpp
index 20cfab1..7b4ac41 100644
--- a/src/armnn/test/optimizations/FoldPadTests.cpp
+++ b/src/armnn/test/optimizations/FoldPadTests.cpp
@@ -6,15 +6,16 @@
 #include "LayersFwd.hpp"
 #include <Network.hpp>
 #include <test/TestUtils.hpp>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include <backendsCommon/TensorHandle.hpp>
 #include <Optimizer.hpp>
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn;
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConvolution2dLayer)
+TEST_CASE("FoldPadLayerIntoConvolution2dLayer")
 {
     Graph              graph;
     const unsigned int inputShape[]   = {1, 2, 2, 3};
@@ -67,7 +68,7 @@
             (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<PadLayer>,
                              checkSimpleConv2d,
@@ -85,13 +86,13 @@
             (conv2dLayerParams.m_BiasEnabled == false) && (conv2dLayerParams.m_DataLayout == DataLayout::NHWC);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              checkPadFoldedIntoConv2d,
                              &IsLayerOfType<OutputLayer>));
 }
 
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConvolution2dLayer)
+TEST_CASE("FoldPadLayerIntoDepthwiseConvolution2dLayer")
 {
     Graph              graph;
     const unsigned int inputShape[]   = {1, 2, 2, 3};
@@ -146,7 +147,7 @@
             (depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<PadLayer>,
                              checkSimpleDepthwiseConv2d,
@@ -166,13 +167,13 @@
             (depthwiseConv2dLayerParams.m_DataLayout == DataLayout::NHWC);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              checkPadFoldedIntoDepthwiseConv2d,
                              &IsLayerOfType<OutputLayer>));
 }
 
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer")
 {
     Graph              graph;
     const unsigned int inputShape[]  = {1, 2, 2, 3};
@@ -218,7 +219,7 @@
             (pool2dLayer->GetParameters() == pooling2dDescriptor);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<PadLayer>,
                              checkSimplePool2d,
@@ -248,13 +249,13 @@
             (pool2dLayerParams.m_PadBottom == 1) && (pool2dLayerParams.m_PaddingMethod == PaddingMethod::IgnoreValue);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              checkPadFoldedIntoPool2d,
                              &IsLayerOfType<OutputLayer>));
 }
 
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized)
+TEST_CASE("FoldPadLayerIntoPooling2d_PadWithMultipleOutputsShouldNotBeOptimized")
 {
     // In this test case we'll setup a pad layer with two outputs. One goes to a polling layers and the other
     // goes to an output layer. FoldPadLayerIntoPooling2d should not optimize this graph as it uses the
@@ -308,7 +309,7 @@
     };
 
     // Initial sequence.
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<PadLayer>,
                              checkSimplePool2d,
@@ -318,7 +319,7 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d()));
 
     // The network should not change.
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<PadLayer>,
                              checkSimplePool2d,
@@ -326,7 +327,7 @@
                              &IsLayerOfType<OutputLayer>));
 }
 
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer_PoolingLayerWithExcludePaddingShouldNotTakeMorePadding")
 {
     // In this test setup input, Pad layer, Pooling layer that includes padding, output layer. The optimization
     // should not work as the pooling layer already includes and existing pad and specifies PaddingMethod::Exclude.
@@ -380,7 +381,7 @@
             (pool2dLayer->GetParameters() == pooling2dDescriptor);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<PadLayer>,
                              checkSimplePool2d,
@@ -389,14 +390,14 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d()));
 
     // The optimization should not have modified the graph.
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<PadLayer>,
                              checkSimplePool2d,
                              &IsLayerOfType<OutputLayer>));
 }
 
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer_MaxPoolingLayerWithLargePadValueShouldNotBeFolded")
 {
     // In this test setup input, Pad layer with a large pad value, Max Pooling layer, output layer. The optimization
     // should not work as the pad value will modify the result of the max pooling layer.
@@ -447,7 +448,7 @@
             (pool2dLayer->GetParameters() == pooling2dDescriptor);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<PadLayer>,
                              checkSimplePool2d,
@@ -456,7 +457,7 @@
     armnn::Optimizer::Pass(graph, MakeOptimizations(FoldPadIntoPooling2d()));
 
     // The optimization should not have modified the graph.
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(),
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<PadLayer>,
                              checkSimplePool2d,
@@ -464,7 +465,7 @@
 }
 
 #if defined(ARMNNREF_ENABLED)
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization)
+TEST_CASE("FoldPadLayerIntoPooling2dLayer_ExecuteInferenceWithAndWithoutOptimization")
 {
     // The idea of this test to run a simple pad+pool2d network twice. Once
     // with FoldPadLayerIntoPooling2dLayer enabled and a second time with it
@@ -523,7 +524,7 @@
         IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
         // Load network into runtime
         NetworkId            networkIdentifier;
-        BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+        CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
 
         InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
 
@@ -544,7 +545,7 @@
 
         // Optimize and load and execute it a second time.
         optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
-        BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+        CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
         std::vector<float> goldenData(32, 0.0f);
         std::vector<float> padOutputData(72, 0.0f);
         OutputTensors      goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
@@ -552,7 +553,7 @@
         run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
 
         // Now we can compare goldenData against optimizedData. They should be the same.
-        BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
+        CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
     }
     catch (const std::exception& e)
     {
@@ -561,7 +562,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimization)
+TEST_CASE("FoldPadLayerIntoConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
 {
     // The idea of this test to run a simple pad+conv2d network twice. Once
     // with FoldPadLayerIntoConv2dLayer enabled and a second time with it
@@ -641,7 +642,7 @@
         IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
         // Load network into runtime
         NetworkId            networkIdentifier;
-        BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+        CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
 
         InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
 
@@ -662,7 +663,7 @@
 
         // Optimize and load and execute it a second time.
         optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
-        BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+        CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
         std::vector<float> goldenData(100, 0.0f);
         std::vector<float> padOutputData(108, 0.0f);
         OutputTensors      goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
@@ -670,7 +671,7 @@
         run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
 
         // Now we can compare goldenData against optimizedData. They should be the same.
-        BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
+        CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
     }
     catch (const std::exception& e)
     {
@@ -679,7 +680,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOptimization)
+TEST_CASE("FoldPadLayerIntoDepthwiseConv2dLayer_ExecuteInferenceWithAndWithoutOptimization")
 {
     // The idea of this test to run a simple pad+depthwiseconv2d network twice. Once
     // with FoldPadLayerIntoDeptwiseConv2dLayer enabled and a second time with it
@@ -759,7 +760,7 @@
         IOptimizedNetworkPtr optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
         // Load network into runtime
         NetworkId            networkIdentifier;
-        BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+        CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
 
         InputTensors inputTensors{{0, ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}};
 
@@ -780,7 +781,7 @@
 
         // Optimize and load and execute it a second time.
         optimizedNetwork = Optimize(*network, {Compute::CpuRef}, run->GetDeviceSpec());
-        BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
+        CHECK(run->LoadNetwork(networkIdentifier, std::move(optimizedNetwork)) == Status::Success);
         std::vector<float> goldenData(300, 0.0f);
         std::vector<float> padOutputData(108, 0.0f);
         OutputTensors      goldenTensors{{0, Tensor(outputInfo, goldenData.data())},
@@ -788,7 +789,7 @@
         run->EnqueueWorkload(networkIdentifier, inputTensors, goldenTensors);
 
         // Now we can compare goldenData against optimizedData. They should be the same.
-        BOOST_TEST(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
+        CHECK(std::equal(goldenData.begin(), goldenData.end(), optimizedData.begin()));
     }
     catch (const std::exception& e)
     {
@@ -798,4 +799,4 @@
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
index f93fa77..384b14c 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToBf16ConverterTests.cpp
@@ -7,12 +7,13 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationNoConversionTest)
+TEST_CASE("Fp32NetworkToBf16OptimizationNoConversionTest")
 {
     armnn::Graph graph;
 
@@ -31,18 +32,18 @@
     input->GetOutputSlot().Connect(floor->GetInputSlot(0));
     floor->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
 
     // Run the optimizer
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::FloorLayer>,
                              &IsLayerOfType<armnn::OutputLayer>));
 }
 
-BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationConv2DTest)
+TEST_CASE("Fp32NetworkToBf16OptimizationConv2DTest")
 {
     armnn::Graph graph;
 
@@ -82,37 +83,37 @@
     input->GetOutputSlot().Connect(conv->GetInputSlot(0));
     conv->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
 
     // Run the optimizer
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
                              &IsLayerOfType<armnn::OutputLayer>));
 
     armnn::TensorInfo inputTensor = conv->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
     armnn::TensorInfo outputTensor = conv->GetOutputSlot(0).GetTensorInfo();
-    BOOST_TEST((conv->GetDataType() == armnn::DataType::BFloat16));
-    BOOST_TEST((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
-    BOOST_TEST((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
-    BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
-    BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
+    CHECK((conv->GetDataType() == armnn::DataType::BFloat16));
+    CHECK((conv->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
+    CHECK((conv->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
+    CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
+    CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
 
     // Check whether data matches expected Bf16 data
     const armnn::BFloat16* data = conv->m_Weight->GetConstTensor<armnn::BFloat16>();
-    BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
-    BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
-    BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
-    BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
-    BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
-    BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
-    BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
-    BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
+    CHECK(data[0] == armnn::BFloat16(0.0f));
+    CHECK(data[1] == armnn::BFloat16(-1.0f));
+    CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
+    CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
+    CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
+    CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
+    CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
+    CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
 }
 
-BOOST_AUTO_TEST_CASE(Fp32NetworkToBf16OptimizationFullyConnectedTest)
+TEST_CASE("Fp32NetworkToBf16OptimizationFullyConnectedTest")
 {
     armnn::Graph graph;
 
@@ -152,35 +153,35 @@
     input->GetOutputSlot().Connect(fc->GetInputSlot(0));
     fc->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::FullyConnectedLayer>, &IsLayerOfType<armnn::OutputLayer>));
 
     // Run the optimizer
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToBf16Converter()));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::ConvertFp32ToBf16Layer>, &IsLayerOfType<armnn::FullyConnectedLayer>,
                              &IsLayerOfType<armnn::OutputLayer>));
 
     armnn::TensorInfo inputTensor = fc->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
     armnn::TensorInfo outputTensor = fc->GetOutputSlot(0).GetTensorInfo();
-    BOOST_TEST((fc->GetDataType() == armnn::DataType::BFloat16));
-    BOOST_TEST((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
-    BOOST_TEST((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
-    BOOST_TEST((inputTensor.GetDataType() == armnn::DataType::BFloat16));
-    BOOST_TEST((outputTensor.GetDataType() == armnn::DataType::Float32));
+    CHECK((fc->GetDataType() == armnn::DataType::BFloat16));
+    CHECK((fc->m_Weight->GetTensorInfo().GetDataType() == armnn::DataType::BFloat16));
+    CHECK((fc->m_Bias->GetTensorInfo().GetDataType() == armnn::DataType::Float32));
+    CHECK((inputTensor.GetDataType() == armnn::DataType::BFloat16));
+    CHECK((outputTensor.GetDataType() == armnn::DataType::Float32));
 
     // Check whether data matches expected Bf16 data
     const armnn::BFloat16* data = fc->m_Weight->GetConstTensor<armnn::BFloat16>();
-    BOOST_CHECK(data[0] == armnn::BFloat16(0.0f));
-    BOOST_CHECK(data[1] == armnn::BFloat16(-1.0f));
-    BOOST_CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
-    BOOST_CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
-    BOOST_CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
-    BOOST_CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
-    BOOST_CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
-    BOOST_CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
+    CHECK(data[0] == armnn::BFloat16(0.0f));
+    CHECK(data[1] == armnn::BFloat16(-1.0f));
+    CHECK(data[2] == armnn::BFloat16(3.796875f)); // 0x4073
+    CHECK(data[3] == armnn::BFloat16(3.1072295E29f)); // 0x707B
+    CHECK(data[4] == armnn::BFloat16(9.131327E-10f)); // 0x307B
+    CHECK(data[5] == armnn::BFloat16(-3.796875f)); // 0xC073
+    CHECK(data[6] == armnn::BFloat16(-3.1072295E29f)); // 0xF07B
+    CHECK(data[7] == armnn::BFloat16(-9.131327E-10f)); // 0xB07B
 }
 
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
index 16037a8..e2ac1bd 100644
--- a/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
+++ b/src/armnn/test/optimizations/Fp32NetworkToFp16ConverterTests.cpp
@@ -7,12 +7,13 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(Fp32NetworkToFp16OptimizationTest)
+TEST_CASE("Fp32NetworkToFp16OptimizationTest")
 {
     armnn::Graph graph;
 
@@ -31,15 +32,15 @@
     input->GetOutputSlot().Connect(floor->GetInputSlot(0));
     floor->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
 
     // Run the optimizer
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(Fp32NetworkToFp16Converter()));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>, &IsLayerOfType<armnn::FloorLayer>,
                              &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::OutputLayer>));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseActivationTests.cpp b/src/armnn/test/optimizations/FuseActivationTests.cpp
index 71a554b..9e33213 100644
--- a/src/armnn/test/optimizations/FuseActivationTests.cpp
+++ b/src/armnn/test/optimizations/FuseActivationTests.cpp
@@ -10,15 +10,15 @@
 #include <armnn/INetwork.hpp>
 #include <test/TestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <QuantizeHelper.hpp>
 #include <string>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
-
+TEST_SUITE("Optimizer")
+{
 namespace armnn
 {
 
@@ -352,8 +352,8 @@
             (layer->GetNameStr() == "fused-activation-into-receiverLayer");
     };
 
-    BOOST_CHECK(3 == graphFused.GetNumLayers());
-    BOOST_TEST(CheckSequence(graphFused.cbegin(),
+    CHECK(3 == graphFused.GetNumLayers());
+    CHECK(CheckSequence(graphFused.cbegin(),
                              graphFused.cend(),
                              &IsLayerOfType<InputLayer>,
                              checkFusedConv2d,
@@ -361,7 +361,7 @@
 
     // Load network into runtime
     NetworkId networkIdentifier;
-    BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+    CHECK(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
 
     //Creates structures for inputs and outputs.
     std::vector<float> data = GetVector<float>(LayerTest::inputSize, 1.0f, 0.1f);
@@ -374,7 +374,7 @@
         {0, Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputDataFused.data())}};
 
     // Execute network
-    BOOST_TEST(run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused) == Status::Success);
+    CHECK(run->EnqueueWorkload(networkIdentifier, inputTensorsFused, outputTensorsFused) == Status::Success);
 
     // SECOND NETWORK: NotFused
     // Construct ArmNN network
@@ -388,8 +388,8 @@
 
     Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
 
-    BOOST_CHECK(5 == graphNotFused.GetNumLayers());
-    BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
+    CHECK(5 == graphNotFused.GetNumLayers());
+    CHECK(CheckSequence(graphNotFused.cbegin(),
                              graphNotFused.cend(),
                              &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<LayerType>,
@@ -399,7 +399,7 @@
 
     // Load network into runtime
     NetworkId networkIdentifierNotFused;
-    BOOST_TEST(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
+    CHECK(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
 
     //Creates structures for inputs and outputs.
     std::vector<T> inputDataNotFused = armnnUtils::QuantizedVector<T>(data, scale, offset);
@@ -413,14 +413,14 @@
         {1, Tensor(runNotFused->GetOutputTensorInfo(networkIdentifierNotFused, 1), outputData2NotFused.data())}};
 
     // Execute network
-    BOOST_TEST(runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused)
+    CHECK(runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused)
                == Status::Success);
 
     // Check the output of the fused-activation matches with the output of the activation in the "NotFused" network
     for (unsigned int n = 0; n < outputDataFused.size(); ++n)
     {
-        BOOST_CHECK_CLOSE(static_cast<float>(outputDataFused[n]), static_cast<float>(outputDataNotFused[n]),
-                          T(tolerance));
+        auto outputNotFused = static_cast<float>(outputDataNotFused[n]);
+        CHECK(static_cast<float>(outputDataFused[n]) == doctest::Approx(outputNotFused).epsilon(tolerance));
     }
 }
 
@@ -445,7 +445,7 @@
 
         // Load network into runtime
         NetworkId networkIdentifier;
-        BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+        CHECK(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
 
         //Creates structures for inputs and outputs.
         std::vector<float> data           = GetVector<float>(LayerTest::inputSize, 1.0f, 0.1f);
@@ -476,7 +476,7 @@
 using namespace armnn;
 #if defined(ARMCOMPUTENEON_ENABLED)
 // ReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoConvFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -484,7 +484,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -492,7 +492,7 @@
     FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -500,7 +500,7 @@
     FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32CpuAccTest)
+TEST_CASE("FuseReLUIntoBatchNormFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -510,7 +510,7 @@
 }
 
 // BoundedReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -520,7 +520,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -530,7 +530,7 @@
     FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::Float32 > , DataType::Float32 >
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -540,7 +540,7 @@
     FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoBatchNormFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -552,7 +552,7 @@
 }
 
 // ReLU fused into Receiver Layers QAsymmU8
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvQAsymmU8CpuAccTest)
+TEST_CASE("FuseReLUIntoConvQAsymmU8CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -560,7 +560,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvQAsymmU8CpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvQAsymmU8CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -568,7 +568,7 @@
     FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedQAsymmU8CpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedQAsymmU8CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -578,7 +578,7 @@
 }
 
 // BoundedReLu fused into Receiver Layers QAsymmS8
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvQASymmS8CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -588,7 +588,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmS8>, DataType::QAsymmS8>
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvQASymmS8CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -598,7 +598,7 @@
     FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::QAsymmS8 > , DataType::QAsymmS8 >
         (activationDescriptor, 0.0001f, Compute::CpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8CpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedQASymmS8CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -610,7 +610,7 @@
 }
 
 // TanH fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseTanHIntoConvFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -620,7 +620,7 @@
 }
 
 // HardSwish fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32CpuAccTest)
+TEST_CASE("FuseHardSwishIntoConvFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -630,7 +630,7 @@
 }
 
 // Test that all receiver layers follow by all activation layers work, either fused or not fused
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32CpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat32CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     for (int i = 0; i != 12; ++i)
@@ -638,17 +638,17 @@
         activationDescriptor.m_Function = static_cast<ActivationFunction>(i);
         activationDescriptor.m_A = 1.0f;
         activationDescriptor.m_B = -1.0f;
-        BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+        CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
             (activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " << i);
-        BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
+        CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
             (activationDescriptor, Compute::CpuAcc)), "DepthwiseConvolution + Activation function " << i);
-        BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+        CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
             (activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " << i);
-        BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+        CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
             (activationDescriptor, Compute::CpuAcc)), "BatchNorm + Activation function " << i);
     }
 }
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16CpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat16CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     for (int i = 0; i != 12; ++i)
@@ -656,59 +656,59 @@
         activationDescriptor.m_Function = static_cast<ActivationFunction>(i);
         activationDescriptor.m_A = 1.0f;
         activationDescriptor.m_B = -1.0f;
-        BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
+        CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
             (activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " << i);
-        BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
+        CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
             (activationDescriptor, Compute::CpuAcc)), "DepthwiseConvolution + Activation function " << i);
-        BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
+        CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
             (activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " << i);
-        BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
+        CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
             (activationDescriptor, Compute::CpuAcc)), "BatchNorm + Activation function " << i);
     }
 }
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationQAsymmU8CpuAccTest)
+TEST_CASE("LayerFollowedByActivationQAsymmU8CpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
 
     activationDescriptor.m_Function = ActivationFunction::Sigmoid;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc, 1.f / 256.f, 0)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc, 1.f / 256.f, 0)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 
     activationDescriptor.m_Function = ActivationFunction::TanH;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc, 1.f / 128.f, 128)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc, 1.f / 128.f, 128)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 
     activationDescriptor.m_Function = ActivationFunction::ReLu;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
     activationDescriptor.m_A = 1.0f;
     activationDescriptor.m_B = -1.0f;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 
     activationDescriptor.m_Function = ActivationFunction::HardSwish;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::CpuAcc)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 }
@@ -716,7 +716,7 @@
 
 #if defined(ARMCOMPUTECL_ENABLED)
 // ReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoConvFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -724,7 +724,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -732,7 +732,7 @@
     FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -740,7 +740,7 @@
     FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoBatchNormFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -748,7 +748,7 @@
     FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoMulFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -756,7 +756,7 @@
     FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoAddFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -764,7 +764,7 @@
     FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoSubFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -772,7 +772,7 @@
     FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseReLUIntoDivFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -782,7 +782,7 @@
 }
 
 // BoundedReLu fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -792,7 +792,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -802,7 +802,7 @@
     FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -812,7 +812,7 @@
     FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoBatchNormFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoBatchNormFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -822,7 +822,7 @@
     FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoMulFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -832,7 +832,7 @@
     FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoAddFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -842,7 +842,7 @@
     FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoSubFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -852,7 +852,7 @@
     FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDivFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -864,7 +864,7 @@
 }
 
 // ReLu fused into Receiver Layers Float16
-BOOST_AUTO_TEST_CASE(FuseReLUIntoConvFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoConvFloat16GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -872,7 +872,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDWConvFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoDWConvFloat16GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -880,7 +880,7 @@
     FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoFullyConnectedFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoFullyConnectedFloat16GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -888,7 +888,7 @@
     FuseActivationIntoPreviousLayerTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoBatchNormFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoBatchNormFloat16GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -896,7 +896,7 @@
     FuseActivationIntoPreviousLayerTest<BatchNormTest<DataType::Float16>, DataType::Float16>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoMulFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoMulFloat16GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -904,7 +904,7 @@
     FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float16>, DataType::Float16>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoAddFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoAddFloat16GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -912,7 +912,7 @@
     FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float16>, DataType::Float16>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoSubFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoSubFloat16GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -920,7 +920,7 @@
     FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float16>, DataType::Float16>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUIntoDivFloat16GpuAccTest)
+TEST_CASE("FuseReLUIntoDivFloat16GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -930,7 +930,7 @@
 }
 
 // ReLU fused into Receiver Layers QAsymmU8
-BOOST_AUTO_TEST_CASE(FuseReLUQIntoConvAsymmU8GpuAccTest)
+TEST_CASE("FuseReLUQIntoConvAsymmU8GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -938,7 +938,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUQIntoDWConvAsymmU8GpuAccTest)
+TEST_CASE("FuseReLUQIntoDWConvAsymmU8GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -946,7 +946,7 @@
     FuseActivationIntoPreviousLayerTest<DWConvolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseReLUQIntoFullyConnectedAsymmU8GpuAccTest)
+TEST_CASE("FuseReLUQIntoFullyConnectedAsymmU8GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::ReLu;
@@ -956,7 +956,7 @@
 }
 
 // BoundedReLu fused into Receiver Layers QAsymmS8
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoConvQASymmS8GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoConvQASymmS8GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -966,7 +966,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::QAsymmS8>, DataType::QAsymmS8>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoDWConvQASymmS8GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoDWConvQASymmS8GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -976,7 +976,7 @@
     FuseActivationIntoPreviousLayerTest < DWConvolution2dTest < DataType::QAsymmS8 > , DataType::QAsymmS8 >
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseBoundedReLUIntoFullyConnectedQASymmS8GpuAccTest)
+TEST_CASE("FuseBoundedReLUIntoFullyConnectedQASymmS8GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
@@ -988,7 +988,7 @@
 }
 
 // TanH fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseTanHIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoConvFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -996,7 +996,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseTanHIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoMulFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1004,7 +1004,7 @@
     FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseTanHIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoAddFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1012,7 +1012,7 @@
     FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseTanHIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoSubFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1020,7 +1020,7 @@
     FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseTanHIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseTanHIntoDivFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::TanH;
@@ -1030,7 +1030,7 @@
 }
 
 // HardSwish fused into Receiver Layers Float32
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoConvFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoConvFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1038,7 +1038,7 @@
     FuseActivationIntoPreviousLayerTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoMulFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoMulFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1046,7 +1046,7 @@
     FuseActivationIntoPreviousLayerTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoAddFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoAddFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1054,7 +1054,7 @@
     FuseActivationIntoPreviousLayerTest<AdditionTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoSubFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoSubFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1062,7 +1062,7 @@
     FuseActivationIntoPreviousLayerTest<SubtractionTest<DataType::Float32>, DataType::Float32>
         (activationDescriptor, 0.0001f, Compute::GpuAcc);
 }
-BOOST_AUTO_TEST_CASE(FuseHardSwishIntoDivFloat32GpuAccTest)
+TEST_CASE("FuseHardSwishIntoDivFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     activationDescriptor.m_Function = ActivationFunction::HardSwish;
@@ -1072,7 +1072,7 @@
 }
 
 // Test that all receiver layers follow by all activation layers work, either fused or not fused
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat32GpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat32GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     for (int i = 0; i != 12; ++i)
@@ -1082,26 +1082,26 @@
         activationDescriptor.m_B = -1.0f;
         if (activationDescriptor.m_Function != ActivationFunction::Elu)
         {
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
+            CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float32>, DataType::Float32>
                 (activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
+            CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float32>, DataType::Float32>
                 (activationDescriptor, Compute::GpuAcc)), "DepthwiseConvolution + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
+            CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float32>, DataType::Float32>
                 (activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
+            CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float32>, DataType::Float32>
                 (activationDescriptor, Compute::GpuAcc)), "BatchNorm + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
+            CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float32>, DataType::Float32>
                 (activationDescriptor, Compute::GpuAcc)), "Multiplication + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float32>, DataType::Float32>
+            CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float32>, DataType::Float32>
                 (activationDescriptor, Compute::GpuAcc)), "Addition + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float32>, DataType::Float32>
+            CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float32>, DataType::Float32>
                 (activationDescriptor, Compute::GpuAcc)), "Subtraction + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float32>, DataType::Float32>
+            CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float32>, DataType::Float32>
                 (activationDescriptor, Compute::GpuAcc)), "Division + Activation function " << i);
         }
     }
 }
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationFloat16GpuAccTest)
+TEST_CASE("LayerFollowedByActivationFloat16GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
     for (int i = 0; i != 12; ++i)
@@ -1111,71 +1111,71 @@
         activationDescriptor.m_B = -1.0f;
         if (activationDescriptor.m_Function != ActivationFunction::Elu)
         {
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
+            CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::Float16>, DataType::Float16>
                 (activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
+            CHECK_MESSAGE((FuseActivationSimpleTest<DWConvolution2dTest<DataType::Float16>, DataType::Float16>
                 (activationDescriptor, Compute::GpuAcc)), "Depthwise + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
+            CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::Float16>, DataType::Float16>
                 (activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
+            CHECK_MESSAGE((FuseActivationSimpleTest<BatchNormTest<DataType::Float16>, DataType::Float16>
                 (activationDescriptor, Compute::GpuAcc)), "BatchNorm + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float16>, DataType::Float16>
+            CHECK_MESSAGE((FuseActivationSimpleTest<MultiplicationTest<DataType::Float16>, DataType::Float16>
                 (activationDescriptor, Compute::GpuAcc)), "Multiplication + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float16>, DataType::Float16>
+            CHECK_MESSAGE((FuseActivationSimpleTest<AdditionTest<DataType::Float16>, DataType::Float16>
                 (activationDescriptor, Compute::GpuAcc)), "Addition + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float16>, DataType::Float16>
+            CHECK_MESSAGE((FuseActivationSimpleTest<SubtractionTest<DataType::Float16>, DataType::Float16>
                 (activationDescriptor, Compute::GpuAcc)), "Subtraction + Activation function " << i);
-            BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float16>, DataType::Float16>
+            CHECK_MESSAGE((FuseActivationSimpleTest<DivisionTest<DataType::Float16>, DataType::Float16>
                 (activationDescriptor, Compute::GpuAcc)), "Division + Activation function " << i);
         }
     }
 }
-BOOST_AUTO_TEST_CASE(LayerFollowedByActivationQAsymmU8GpuAccTest)
+TEST_CASE("LayerFollowedByActivationQAsymmU8GpuAccTest")
 {
     ActivationDescriptor activationDescriptor;
 
     activationDescriptor.m_Function = ActivationFunction::Sigmoid;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc, 1.f / 256.f, 0)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc, 1.f / 256.f, 0)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 
     activationDescriptor.m_Function = ActivationFunction::TanH;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc, 1.f / 128.f, 128)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc, 1.f / 128.f, 128)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 
     activationDescriptor.m_Function = ActivationFunction::ReLu;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 
     activationDescriptor.m_Function = ActivationFunction::BoundedReLu;
     activationDescriptor.m_A = 1.0f;
     activationDescriptor.m_B = -1.0f;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 
     activationDescriptor.m_Function = ActivationFunction::HardSwish;
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<Convolution2dTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc)), "Convolution + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
-    BOOST_CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
+    CHECK_MESSAGE((FuseActivationSimpleTest<FullyConnectedTest<DataType::QAsymmU8>, DataType::QAsymmU8>
         (activationDescriptor, Compute::GpuAcc)), "FullyConnected + Activation function " <<
         static_cast<int>(activationDescriptor.m_Function));
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/FuseBatchNormTests.cpp b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
index be66c5e..671f565 100644
--- a/src/armnn/test/optimizations/FuseBatchNormTests.cpp
+++ b/src/armnn/test/optimizations/FuseBatchNormTests.cpp
@@ -10,12 +10,12 @@
 #include <armnn/INetwork.hpp>
 #include <test/TestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
-
+TEST_SUITE("Optimizer")
+{
 namespace
 {
 
@@ -194,8 +194,8 @@
                (layer->GetNameStr() == "fused-batchNorm-into-convolution");
     };
 
-    BOOST_CHECK(3 == graphFused.GetNumLayers());
-    BOOST_TEST(CheckSequence(graphFused.cbegin(),
+    CHECK(3 == graphFused.GetNumLayers());
+    CHECK(CheckSequence(graphFused.cbegin(),
                              graphFused.cend(),
                              &IsLayerOfType<InputLayer>,
                              checkFusedConv2d,
@@ -203,7 +203,7 @@
 
     // Load network into runtime
     NetworkId networkIdentifier;
-    BOOST_TEST(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
+    CHECK(run->LoadNetwork(networkIdentifier, std::move(optNetFused)) == Status::Success);
 
     //Creates structures for inputs and outputs.
     std::vector<T> inputDataFused = GetVector<T>(48, 1.0f, 0.1f);
@@ -235,8 +235,8 @@
 
     Graph& graphNotFused = GetGraphForTesting(optNetNotFused.get());
 
-    BOOST_CHECK(5 == graphNotFused.GetNumLayers());
-    BOOST_TEST(CheckSequence(graphNotFused.cbegin(),
+    CHECK(5 == graphNotFused.GetNumLayers());
+    CHECK(CheckSequence(graphNotFused.cbegin(),
                              graphNotFused.cend(),
                              &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<ConvLayerType>,
@@ -246,7 +246,7 @@
 
     // Load network into runtime
     NetworkId networkIdentifierNotFused;
-    BOOST_TEST(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
+    CHECK(runNotFused->LoadNetwork(networkIdentifierNotFused, std::move(optNetNotFused)) == Status::Success);
 
     //Creates structures for inputs and outputs.
     std::vector<T> inputDataNotFused = GetVector<T>(48, 1.0f, 0.1f);
@@ -269,33 +269,34 @@
     runNotFused->EnqueueWorkload(networkIdentifierNotFused, inputTensorsNotFused, outputTensorsNotFused);
 
     // Check the output of the fused-convolution matches with the output of the batchNormm in the "NotFused" network
+    auto epsilon = T(tolerance);
     for (unsigned int n = 0; n < outputDataFused.size(); ++n)
     {
-        BOOST_CHECK_CLOSE(outputDataFused[n], outputDataNotFused[n], T(tolerance));
+        CHECK_EQ(outputDataFused[n], doctest::Approx(outputDataNotFused[n]).epsilon(epsilon));
     }
 }
 
 // This unit test needs the reference backend, it's not available if the reference backend is not built
 #if defined(ARMNNREF_ENABLED)
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoConv2DFloat32Test)
+TEST_CASE("FuseBatchNormIntoConv2DFloat32Test")
 {
     FuseBatchNormIntoConvTest<Conv2dTest, DataType::Float32>(false, 0.0001f, armnn::Compute::CpuRef);
 }
 
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoConv2DFloat16Test)
+TEST_CASE("FuseBatchNormIntoConv2DFloat16Test")
 {
     FuseBatchNormIntoConvTest<Conv2dTest, DataType::Float16>(false, 0.1f, armnn::Compute::CpuRef);
 }
 
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoDepthwiseConv2DFloat32Test)
+TEST_CASE("FuseBatchNormIntoDepthwiseConv2DFloat32Test")
 {
     FuseBatchNormIntoConvTest<DepthwiseConv2dTest, DataType::Float32>(true, 0.0001f,armnn::Compute::CpuRef);
 }
 
-BOOST_AUTO_TEST_CASE(FuseBatchNormIntoDepthwiseConv2DFloat16Test)
+TEST_CASE("FuseBatchNormIntoDepthwiseConv2DFloat16Test")
 {
     FuseBatchNormIntoConvTest<DepthwiseConv2dTest, DataType::Float16>(true, 0.1f,armnn::Compute::CpuRef);
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
index 38b6397..03d0d22 100644
--- a/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
+++ b/src/armnn/test/optimizations/InsertDebugLayerTests.cpp
@@ -7,12 +7,13 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(InsertDebugOptimizationTest)
+TEST_CASE("InsertDebugOptimizationTest")
 {
     armnn::Graph graph;
 
@@ -31,15 +32,15 @@
     input->GetOutputSlot().Connect(floor->GetInputSlot(0));
     floor->GetOutputSlot().Connect(output->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>));
 
     // Run the optimizer
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(InsertDebugLayer()));
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::DebugLayer>, &IsLayerOfType<armnn::FloorLayer>,
                              &IsLayerOfType<armnn::DebugLayer>, &IsLayerOfType<armnn::OutputLayer>));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/MovePermuteUpTests.cpp b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
index 13c6926..38a65a6 100644
--- a/src/armnn/test/optimizations/MovePermuteUpTests.cpp
+++ b/src/armnn/test/optimizations/MovePermuteUpTests.cpp
@@ -7,12 +7,13 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(MovePermuteUpTest)
+TEST_CASE("MovePermuteUpTest")
 {
     const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
     const armnn::TensorInfo permuted({ 1, 3, 5, 2 }, armnn::DataType::Float32);
@@ -66,7 +67,7 @@
         ->GetOutputHandler()
         .SetTensorInfo(info);
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
                              &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
@@ -76,7 +77,7 @@
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MovePermuteUp()));
 
     // The permute is moved to the top. New permutes for layers with multiple inputs.
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
                              &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
@@ -86,7 +87,7 @@
 
     std::list<std::string> testRelatedLayers = { permuteLayerName };
 
-    BOOST_TEST(CheckRelatedLayers<armnn::PermuteLayer>(graph, testRelatedLayers));
+    CHECK(CheckRelatedLayers<armnn::PermuteLayer>(graph, testRelatedLayers));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
index cb41ff0..68d277a 100644
--- a/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
+++ b/src/armnn/test/optimizations/MoveTransposeUpTests.cpp
@@ -7,12 +7,13 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(MoveTransposeUpTest)
+TEST_CASE("MoveTransposeUpTest")
 {
     const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
     const armnn::TensorInfo transposed({ 1, 3, 5, 2 }, armnn::DataType::Float32);
@@ -67,7 +68,7 @@
         ->GetOutputHandler()
         .SetTensorInfo(info);
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::MultiplicationLayer>, &IsLayerOfType<armnn::MemCopyLayer>,
                              &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::FakeQuantizationLayer>,
@@ -77,7 +78,7 @@
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(MoveTransposeUp()));
 
     // The transpose is moved to the top. New transposes for layers with multiple inputs.
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
                              &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::MultiplicationLayer>,
@@ -87,7 +88,7 @@
 
     std::list<std::string> testRelatedLayers = { transposeLayerName };
 
-    BOOST_TEST(CheckRelatedLayers<armnn::TransposeLayer>(graph, testRelatedLayers));
+    CHECK(CheckRelatedLayers<armnn::TransposeLayer>(graph, testRelatedLayers));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
index 8c3c435..694b103 100644
--- a/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeConsecutiveReshapesTests.cpp
@@ -7,12 +7,13 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(OptimizeConsecutiveReshapesTest)
+TEST_CASE("OptimizeConsecutiveReshapesTest")
 {
     armnn::Graph graph;
 
@@ -39,7 +40,7 @@
         reshape1->GetOutputHandler().SetTensorInfo(info1);
         reshape2->GetOutputHandler().SetTensorInfo(info2);
 
-        BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+        CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                                  &IsLayerOfType<armnn::ReshapeLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
                                  &IsLayerOfType<armnn::OutputLayer>));
 
@@ -53,13 +54,13 @@
         };
 
         // The two reshapes are replaced by a single equivalent reshape.
-        BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+        CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
                                  &IsLayerOfType<armnn::OutputLayer>));
 
         // Check the new reshape layer has the other two reshapes as related layers
         std::list<std::string> testRelatedLayers = { reshape2Name, reshape1Name };
 
-        BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+        CHECK(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
     }
 
     {
@@ -72,9 +73,9 @@
         armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeConsecutiveReshapes()));
 
         // The two reshapes are removed.
-        BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+        CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                                  &IsLayerOfType<armnn::OutputLayer>));
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
index d87113c..4b6dfe5 100644
--- a/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeInverseConversionsTests.cpp
@@ -7,14 +7,15 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(OptimizeInverseConversionsTest)
+TEST_CASE("OptimizeInverseConversionsTest")
 {
     armnn::Graph graph;
 
@@ -32,7 +33,7 @@
     graph.InsertNewLayer<armnn::ConvertFp16ToFp32Layer>(output->GetInputSlot(0), "convert3");
     graph.InsertNewLayer<armnn::ConvertFp32ToFp16Layer>(output->GetInputSlot(0), "convert4");
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::ConvertFp32ToFp16Layer>,
                              &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>, &IsLayerOfType<armnn::Convolution2dLayer>,
                              &IsLayerOfType<armnn::ConvertFp16ToFp32Layer>,
@@ -42,8 +43,8 @@
         graph, armnn::MakeOptimizations(OptimizeInverseConversionsFp16(), OptimizeInverseConversionsFp32()));
 
     // Check that all consecutive inverse conversions are removed
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::Convolution2dLayer>, &IsLayerOfType<armnn::OutputLayer>));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
index 0664ef7..98c84d4 100644
--- a/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
+++ b/src/armnn/test/optimizations/OptimizeInversePermutesTests.cpp
@@ -7,14 +7,15 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(OptimizeInversePermutesTest)
+TEST_CASE("OptimizeInversePermutesTest")
 {
     armnn::Graph graph;
 
@@ -28,18 +29,18 @@
     graph.InsertNewLayer<armnn::PermuteLayer>(output->GetInputSlot(0), armnn::PermuteDescriptor({ 0, 3, 1, 2 }),
                                               "perm0312");
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::PermuteLayer>,
                              &IsLayerOfType<armnn::OutputLayer>));
 
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeInversePermutes()));
 
     // The permutes are removed.
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::OutputLayer>));
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeInverseTransposesTest)
+TEST_CASE("OptimizeInverseTransposesTest")
 {
     armnn::Graph graph;
 
@@ -55,15 +56,15 @@
                                                 armnn::TransposeDescriptor({ 0, 2, 3, 1 }),
                                                 "transpose0231");
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::TransposeLayer>,
                              &IsLayerOfType<armnn::OutputLayer>));
 
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(OptimizeInverseTransposes()));
 
     // The permutes are removed.
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::OutputLayer>));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
index ab990e7..e91e16f 100644
--- a/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAndBatchToSpaceAsDepthToSpaceTests.cpp
@@ -8,11 +8,12 @@
 #include <Network.hpp>
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
 namespace
@@ -83,13 +84,13 @@
 
 /// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
 /// Note this does not ensure the correctness of the optimization - that is done in the below test.
-BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest)
+TEST_CASE("PermuteAndBatchToSpaceAsDepthToSpaceOptimizerTest")
 {
     std::unique_ptr<NetworkImpl> network = CreateTestNetworkImpl();
     Graph graph         = network.get()->GetGraph();
 
     // Confirm initial graph is as we expect
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<PermuteLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<PermuteLayer>,
                              &IsLayerOfType<BatchToSpaceNdLayer>, &IsLayerOfType<OutputLayer>));
 
     // Perform the optimization which should merge the two layers into a DepthToSpace
@@ -103,23 +104,23 @@
                layer->GetOutputHandler().GetTensorInfo() == TensorInfo({ 1, 4, 6, 1 }, DataType::Float32);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
                              &IsLayerOfType<OutputLayer>));
 
     // Check the new layer has the two merged layers listed as related layers
     std::list<std::string> testRelatedLayers = { "batchToSpace", "permute" };
-    BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
+    CHECK(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
 }
 
 /// Tests that the optimization performed by PermuteAndBatchToSpaceAsDepthToSpace is as expected.
 /// Note this does not ensure the correctness of the optimization - that is done in the below test.
-BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest)
+TEST_CASE("TransposeAndBatchToSpaceAsDepthToSpaceOptimizerTest")
 {
     std::unique_ptr<NetworkImpl> network = CreateTransposeTestNetworkImpl();
     Graph graph         = network.get()->GetGraph();
 
     // Confirm initial graph is as we expect
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, &IsLayerOfType<TransposeLayer>,
                              &IsLayerOfType<BatchToSpaceNdLayer>, &IsLayerOfType<OutputLayer>));
 
     // Perform the optimization which should merge the two layers into a DepthToSpace
@@ -133,12 +134,12 @@
                layer->GetOutputHandler().GetTensorInfo() == TensorInfo({ 1, 4, 6, 1 }, DataType::Float32);
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<InputLayer>, checkDepthToSpace,
                              &IsLayerOfType<OutputLayer>));
 
     // Check the new layer has the two merged layers listed as related layers
     std::list<std::string> testRelatedLayers = { "batchToSpace", "permute" };
-    BOOST_TEST(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
+    CHECK(CheckRelatedLayers<DepthToSpaceLayer>(graph, testRelatedLayers));
 }
 
 // This unit test needs the reference backend, it's not available if the reference backend is not built
@@ -208,7 +209,7 @@
 
 /// Tests that a optimization performed by PermuteAndBatchToSpaceAsDepthToSpace does not change the behaviour
 /// of the network (i.e. it still produces the correct output).
-BOOST_AUTO_TEST_CASE(PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
+TEST_CASE("PermuteAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
 {
     INetworkPtr network = CreateTestNetwork();
 
@@ -217,7 +218,7 @@
 
     // Confirm that the optimization has actually taken place
     const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
-    BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
+    CHECK(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
 
     // Load the graph into a runtime so we can check it produces the correct output
@@ -250,12 +251,12 @@
         -3.0f, -4.0f,   -30.0f, -40.0f,    -300.0f, -400.0f,
         // clang-format on
     };
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
 /// Tests that a optimization performed by PermuteAndBatchToSpaceAsDepthToSpace does not change the behaviour
 /// of the network (i.e. it still produces the correct output).
-BOOST_AUTO_TEST_CASE(TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest)
+TEST_CASE("TransposeAndBatchToSpaceAsDepthToSpaceCorrectnessTest")
 {
     INetworkPtr network = CreateTransposeTestNetwork();
 
@@ -264,7 +265,7 @@
 
     // Confirm that the optimization has actually taken place
     const Graph& optGraph = GetGraphForTesting(optimizedNetwork.get());
-    BOOST_TEST(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
+    CHECK(CheckSequence(optGraph.cbegin(), optGraph.cend(), &IsLayerOfType<InputLayer>,
                              &IsLayerOfType<DepthToSpaceLayer>, &IsLayerOfType<OutputLayer>));
 
     // Load the graph into a runtime so we can check it produces the correct output
@@ -297,8 +298,8 @@
             -3.0f, -4.0f,   -30.0f, -40.0f,    -300.0f, -400.0f,
             // clang-format on
     };
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
index 3f3c254..fdd0a6d 100644
--- a/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
+++ b/src/armnn/test/optimizations/PermuteAsReshapeTests.cpp
@@ -7,14 +7,15 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(PermuteAsReshapeTest)
+TEST_CASE("PermuteAsReshapeTest")
 {
     armnn::Graph graph;
 
@@ -36,7 +37,7 @@
         ->GetOutputHandler()
         .SetTensorInfo(infoOut);
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>));
 
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(PermuteAsReshape()));
@@ -50,11 +51,11 @@
                (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
                              &IsLayerOfType<armnn::OutputLayer>));
 
     std::list<std::string> testRelatedLayers = { permuteLayerName };
-    BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+    CHECK(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
index b42c0a2..df9a0db 100644
--- a/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
+++ b/src/armnn/test/optimizations/ReduceMultipleAxesTests.cpp
@@ -8,12 +8,12 @@
 
 #include <armnn/INetwork.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
-
+TEST_SUITE("Optimizer_ReduceMultipleAxes")
+{
 INetworkPtr CreateSimpleReduceNetwork(ReduceDescriptor reduceDescriptor,
                                       TensorShape& inputShape,
                                       TensorShape& outputShape)
@@ -22,10 +22,10 @@
     INetworkPtr network = INetwork::Create();
 
     const std::string layerName("reduce_layer");
-    const TensorInfo inputInfo (inputShape, DataType::Float32);
+    const TensorInfo inputInfo(inputShape, DataType::Float32);
     const TensorInfo outputInfo(outputShape, DataType::Float32);
 
-    IConnectableLayer* const inputLayer  = network->AddInputLayer(0);
+    IConnectableLayer* const inputLayer = network->AddInputLayer(0);
     IConnectableLayer* const reduceLayer = network->AddReduceLayer(reduceDescriptor, layerName.c_str());
     IConnectableLayer* const outputLayer1 = network->AddOutputLayer(0);
     IConnectableLayer* const outputLayer2 = network->AddOutputLayer(1);
@@ -56,37 +56,36 @@
     Graph& graph = GetGraphForTesting(optNet.get());
     if (numOfAxes == 2)
     {
-        BOOST_CHECK(graph.GetNumLayers() == 5);
-        BOOST_TEST(CheckSequence(graph.cbegin(),
-                                 graph.cend(),
-                                 &IsLayerOfType<InputLayer>,
-                                 &IsLayerOfType<ReduceLayer>,
-                                 &IsLayerOfType<ReduceLayer>,
-                                 &IsLayerOfType<OutputLayer>,
-                                 &IsLayerOfType<OutputLayer>));
-    }
-    else
+        CHECK(graph.GetNumLayers() == 5);
+        CHECK(CheckSequence(graph.cbegin(),
+                            graph.cend(),
+                            &IsLayerOfType<InputLayer>,
+                            &IsLayerOfType<ReduceLayer>,
+                            &IsLayerOfType<ReduceLayer>,
+                            &IsLayerOfType<OutputLayer>,
+                            &IsLayerOfType<OutputLayer>));
+    } else
     {
-        BOOST_CHECK(graph.GetNumLayers() == 6);
-        BOOST_TEST(CheckSequence(graph.cbegin(),
-                                 graph.cend(),
-                                 &IsLayerOfType<InputLayer>,
-                                 &IsLayerOfType<ReduceLayer>,
-                                 &IsLayerOfType<ReduceLayer>,
-                                 &IsLayerOfType<ReduceLayer>,
-                                 &IsLayerOfType<OutputLayer>,
-                                 &IsLayerOfType<OutputLayer>));
+        CHECK(graph.GetNumLayers() == 6);
+        CHECK(CheckSequence(graph.cbegin(),
+                            graph.cend(),
+                            &IsLayerOfType<InputLayer>,
+                            &IsLayerOfType<ReduceLayer>,
+                            &IsLayerOfType<ReduceLayer>,
+                            &IsLayerOfType<ReduceLayer>,
+                            &IsLayerOfType<OutputLayer>,
+                            &IsLayerOfType<OutputLayer>));
     }
 
     // Get last layer in new chain, layers name follow 0, 1, 2 pattern
     std::string layerName = "reduce_layer_" + std::to_string(numOfAxes - 1);
     Layer* const reduceLayer = GetFirstLayerWithName(graph, layerName);
-    BOOST_TEST(reduceLayer);
+    CHECK(reduceLayer);
     auto reduceTensorInfo = reduceLayer->GetOutputSlot().GetTensorInfo();
 
     // Tensorshape and the data type are correct
-    BOOST_TEST((reduceTensorInfo.GetShape() == outputShape));
-    BOOST_TEST((reduceTensorInfo.GetDataType() == DataType::Float32));
+    CHECK((reduceTensorInfo.GetShape() == outputShape));
+    CHECK((reduceTensorInfo.GetDataType() == DataType::Float32));
 
     // Load network into runtime
     NetworkId networkIdentifier;
@@ -95,45 +94,45 @@
     // Create input and output tensors
     std::vector<float> outputData(expectedOutput.size());
     InputTensors inputTensors
-    {
-        { 0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data()) }
-    };
+        {
+            {0, armnn::ConstTensor(run->GetInputTensorInfo(networkIdentifier, 0), inputData.data())}
+        };
     OutputTensors outputTensors
-    {
-        { 0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data()) },
-        { 1, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 1), outputData.data()) }
-    };
+        {
+            {0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())},
+            {1, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 1), outputData.data())}
+        };
 
     // Run inference
     run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
 
     // Checks the results
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
 void ReduceSumWithTwoAxesKeepDimsTest(Compute backendId)
 {
     armnn::ReduceDescriptor reduceDescriptor;
-    reduceDescriptor.m_vAxis = { 1, 2 };
+    reduceDescriptor.m_vAxis = {1, 2};
     reduceDescriptor.m_KeepDims = true;
     reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
 
-    TensorShape inputShape  = { 1, 3, 2, 4 };
-    TensorShape outputShape = { 1, 1, 1, 4 };
+    TensorShape inputShape = {1, 3, 2, 4};
+    TensorShape outputShape = {1, 1, 1, 4};
 
     // Construct ArmNN network
     INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
 
     // Creates structures for input & output.
-    const std::vector<float> inputData({  1.0f,   2.0f,   3.0f,   4.0f,
-                                          5.0f,   6.0f,   7.0f,   8.0f,
+    const std::vector<float> inputData({1.0f, 2.0f, 3.0f, 4.0f,
+                                        5.0f, 6.0f, 7.0f, 8.0f,
 
-                                          10.0f,  20.0f,  30.0f,  40.0f,
-                                          50.0f,  60.0f,  70.0f,  80.0f,
+                                        10.0f, 20.0f, 30.0f, 40.0f,
+                                        50.0f, 60.0f, 70.0f, 80.0f,
 
-                                          100.0f, 200.0f, 300.0f, 400.0f,
-                                          500.0f, 600.0f, 700.0f, 800.0f });
-    const std::vector<float> expectedOutput({ 666.0f, 888.0f, 1110.0f, 1332.0f });
+                                        100.0f, 200.0f, 300.0f, 400.0f,
+                                        500.0f, 600.0f, 700.0f, 800.0f});
+    const std::vector<float> expectedOutput({666.0f, 888.0f, 1110.0f, 1332.0f});
 
     ReduceWithMultipleAxesTest(network,
                                outputShape,
@@ -146,26 +145,26 @@
 void ReduceSumWithTwoAxesTest(Compute backendId)
 {
     armnn::ReduceDescriptor reduceDescriptor;
-    reduceDescriptor.m_vAxis = { 1, 2 };
+    reduceDescriptor.m_vAxis = {1, 2};
     reduceDescriptor.m_KeepDims = false;
     reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
 
-    TensorShape inputShape  = { 1, 3, 2, 4 };
-    TensorShape outputShape = { 1, 4 };
+    TensorShape inputShape = {1, 3, 2, 4};
+    TensorShape outputShape = {1, 4};
 
     // Construct ArmNN network
     INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
 
     // Creates structures for input & output.
-    const std::vector<float> inputData({  1.0f,   2.0f,   3.0f,   4.0f,
-                                          5.0f,   6.0f,   7.0f,   8.0f,
+    const std::vector<float> inputData({1.0f, 2.0f, 3.0f, 4.0f,
+                                        5.0f, 6.0f, 7.0f, 8.0f,
 
-                                          10.0f,  20.0f,  30.0f,  40.0f,
-                                          50.0f,  60.0f,  70.0f,  80.0f,
+                                        10.0f, 20.0f, 30.0f, 40.0f,
+                                        50.0f, 60.0f, 70.0f, 80.0f,
 
-                                          100.0f, 200.0f, 300.0f, 400.0f,
-                                          500.0f, 600.0f, 700.0f, 800.0f });
-    const std::vector<float> expectedOutput({ 666.0f, 888.0f, 1110.0f, 1332.0f });
+                                        100.0f, 200.0f, 300.0f, 400.0f,
+                                        500.0f, 600.0f, 700.0f, 800.0f});
+    const std::vector<float> expectedOutput({666.0f, 888.0f, 1110.0f, 1332.0f});
 
     ReduceWithMultipleAxesTest(network,
                                outputShape,
@@ -178,29 +177,29 @@
 void ReduceSumWithThreeAxesKeepDimsTest(Compute backendId)
 {
     armnn::ReduceDescriptor reduceDescriptor;
-    reduceDescriptor.m_vAxis = { 0, 2, 3 };
+    reduceDescriptor.m_vAxis = {0, 2, 3};
     reduceDescriptor.m_KeepDims = true;
     reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
 
-    TensorShape inputShape  = { 2, 2, 2, 2 };
-    TensorShape outputShape = { 1, 2, 1, 1 };
+    TensorShape inputShape = {2, 2, 2, 2};
+    TensorShape outputShape = {1, 2, 1, 1};
 
     // Construct ArmNN network
     INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
 
     // Creates structures for input & output.
-    const std::vector<float> inputData({  1.0f,   2.0f,
-                                          3.0f,   4.0f,
+    const std::vector<float> inputData({1.0f, 2.0f,
+                                        3.0f, 4.0f,
 
-                                          5.0f,   6.0f,
-                                          7.0f,   8.0f,
+                                        5.0f, 6.0f,
+                                        7.0f, 8.0f,
 
-                                          10.0f,  20.0f,
-                                          30.0f,  40.0f,
+                                        10.0f, 20.0f,
+                                        30.0f, 40.0f,
 
-                                          50.0f,  60.0f,
-                                          70.0f,  80.0f });
-    const std::vector<float> expectedOutput({ 110.0f, 286.0f });
+                                        50.0f, 60.0f,
+                                        70.0f, 80.0f});
+    const std::vector<float> expectedOutput({110.0f, 286.0f});
 
     ReduceWithMultipleAxesTest(network,
                                outputShape,
@@ -213,29 +212,29 @@
 void ReduceSumWithThreeAxesTest(Compute backendId)
 {
     armnn::ReduceDescriptor reduceDescriptor;
-    reduceDescriptor.m_vAxis = { 0, 2, 3 };
+    reduceDescriptor.m_vAxis = {0, 2, 3};
     reduceDescriptor.m_KeepDims = false;
     reduceDescriptor.m_ReduceOperation = armnn::ReduceOperation::Sum;
 
-    TensorShape inputShape  = { 2, 2, 2, 2 };
-    TensorShape outputShape = { 2 };
+    TensorShape inputShape = {2, 2, 2, 2};
+    TensorShape outputShape = {2};
 
     // Construct ArmNN network
     INetworkPtr network = CreateSimpleReduceNetwork(reduceDescriptor, inputShape, outputShape);
 
     // Creates structures for input & output.
-    const std::vector<float> inputData({  1.0f,   2.0f,
-                                          3.0f,   4.0f,
+    const std::vector<float> inputData({1.0f, 2.0f,
+                                        3.0f, 4.0f,
 
-                                          5.0f,   6.0f,
-                                          7.0f,   8.0f,
+                                        5.0f, 6.0f,
+                                        7.0f, 8.0f,
 
-                                          10.0f,  20.0f,
-                                          30.0f,  40.0f,
+                                        10.0f, 20.0f,
+                                        30.0f, 40.0f,
 
-                                          50.0f,  60.0f,
-                                          70.0f,  80.0f });
-    const std::vector<float> expectedOutput({ 110.0f, 286.0f });
+                                        50.0f, 60.0f,
+                                        70.0f, 80.0f});
+    const std::vector<float> expectedOutput({110.0f, 286.0f});
 
     ReduceWithMultipleAxesTest(network,
                                outputShape,
@@ -247,47 +246,47 @@
 
 using namespace armnn;
 #if defined(ARMCOMPUTENEON_ENABLED)
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesKeepDimsCpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesKeepDimsCpuAccTest")
 {
     ReduceSumWithTwoAxesKeepDimsTest(Compute::CpuAcc);
 }
 
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesCpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesCpuAccTest")
 {
     ReduceSumWithTwoAxesTest(Compute::CpuAcc);
 }
 
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesKeepDimsCpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesKeepDimsCpuAccTest")
 {
     ReduceSumWithThreeAxesKeepDimsTest(Compute::CpuAcc);
 }
 
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesCpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesCpuAccTest")
 {
     ReduceSumWithThreeAxesTest(Compute::CpuAcc);
 }
 #endif
 
 #if defined(ARMCOMPUTECL_ENABLED)
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesKeepDimsGpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesKeepDimsGpuAccTest")
 {
     ReduceSumWithTwoAxesKeepDimsTest(Compute::GpuAcc);
 }
 
-BOOST_AUTO_TEST_CASE(ReduceSumWithTwoAxesGpuAccTest)
+TEST_CASE("ReduceSumWithTwoAxesGpuAccTest")
 {
     ReduceSumWithTwoAxesTest(Compute::GpuAcc);
 }
 
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesKeepDimsGpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesKeepDimsGpuAccTest")
 {
     ReduceSumWithThreeAxesKeepDimsTest(Compute::GpuAcc);
 }
 
-BOOST_AUTO_TEST_CASE(ReduceSumWithThreeAxesGpuAccTest)
+TEST_CASE("ReduceSumWithThreeAxesGpuAccTest")
 {
     ReduceSumWithThreeAxesTest(Compute::GpuAcc);
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
index 1c97267..069d284 100644
--- a/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
+++ b/src/armnn/test/optimizations/SquashEqualSiblingsTests.cpp
@@ -7,14 +7,15 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(SquashEqualSiblingsTest)
+TEST_CASE("SquashEqualSiblingsTest")
 {
     armnn::Graph graph;
 
@@ -54,7 +55,7 @@
     layer->GetOutputSlot().Connect(graph.AddLayer<armnn::OutputLayer>(outputId++, "")->GetInputSlot(0));
     input->GetOutputSlot().Connect(layer->GetInputSlot(0));
 
-    BOOST_TEST(CheckSequence(
+    CHECK(CheckSequence(
         graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, &IsLayerOfType<armnn::PermuteLayer>,
         &IsLayerOfType<armnn::ReshapeLayer>, &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
         &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>,
@@ -64,11 +65,11 @@
 
     // The permutes and reshapes are squashed.
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::PermuteLayer>, &IsLayerOfType<armnn::ReshapeLayer>,
                              &IsLayerOfType<armnn::FloorLayer>, &IsLayerOfType<armnn::OutputLayer>,
                              &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>,
                              &IsLayerOfType<armnn::OutputLayer>, &IsLayerOfType<armnn::OutputLayer>));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
index 1c9f15c..5d1d950 100644
--- a/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
+++ b/src/armnn/test/optimizations/TransposeAsReshapeTests.cpp
@@ -7,14 +7,15 @@
 
 #include <Optimizer.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Optimizer)
+TEST_SUITE("Optimizer")
+{
 using namespace armnn::optimizations;
 
-BOOST_AUTO_TEST_CASE(TransposeAsReshapeTest)
+TEST_CASE("TransposeAsReshapeTest")
 {
     armnn::Graph graph;
 
@@ -36,7 +37,7 @@
             ->GetOutputHandler()
             .SetTensorInfo(infoOut);
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>,
                              &IsLayerOfType<armnn::TransposeLayer>, &IsLayerOfType<armnn::OutputLayer>));
 
     armnn::Optimizer::Pass(graph, armnn::MakeOptimizations(TransposeAsReshape()));
@@ -50,11 +51,11 @@
                (reshapeLayer->GetOutputHandler().GetTensorInfo().GetShape() == infoOut.GetShape());
     };
 
-    BOOST_TEST(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
+    CHECK(CheckSequence(graph.cbegin(), graph.cend(), &IsLayerOfType<armnn::InputLayer>, checkReshape,
                              &IsLayerOfType<armnn::OutputLayer>));
 
     std::list<std::string> testRelatedLayers = { transposeLayerName };
-    BOOST_TEST(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
+    CHECK(CheckRelatedLayers<armnn::ReshapeLayer>(graph, testRelatedLayers));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnDeserializer/test/DeserializeAbs.cpp b/src/armnnDeserializer/test/DeserializeAbs.cpp
index 97f9b50..bb03ecd 100644
--- a/src/armnnDeserializer/test/DeserializeAbs.cpp
+++ b/src/armnnDeserializer/test/DeserializeAbs.cpp
@@ -3,14 +3,15 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
+#include <doctest/doctest.h>
+
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Abs")
+{
     struct AbsFixture : public ParserFlatbuffersSerializeFixture
     {
         explicit AbsFixture(const std::string &inputShape,
@@ -108,7 +109,7 @@
         {}
     };
 
-    BOOST_FIXTURE_TEST_CASE(SimpleAbsTest, SimpleAbsFixture)
+    TEST_CASE_FIXTURE(SimpleAbsFixture, "SimpleAbsTest")
     {
         RunTest<4, armnn::DataType::Float32>(
                 0,
@@ -116,4 +117,4 @@
                 {{"OutputLayer", { 100.0f, 50.5f, 25.9999f, 0.5f , 0.0f, 1.5555f, 25.5f, 100.0f }}});
     }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnDeserializer/test/DeserializeActivation.cpp b/src/armnnDeserializer/test/DeserializeActivation.cpp
index a5f71c9..dc3dcb4 100644
--- a/src/armnnDeserializer/test/DeserializeActivation.cpp
+++ b/src/armnnDeserializer/test/DeserializeActivation.cpp
@@ -3,14 +3,15 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
+#include <doctest/doctest.h>
+
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(DeserializeParser)
-
+TEST_SUITE("DeserializeParser_Activation")
+{
 struct ActivationFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit ActivationFixture(const std::string& inputShape,
@@ -141,7 +142,7 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(ActivationReluQuantisedAsymm8, SimpleActivationFixture)
+TEST_CASE_FIXTURE(SimpleActivationFixture, "ActivationReluQuantisedAsymm8")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
             0,
@@ -149,7 +150,7 @@
             {{"OutputLayer", {10, 0, 2, 0}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ActivationReluFloat32, SimpleActivationFixture2)
+TEST_CASE_FIXTURE(SimpleActivationFixture2, "ActivationReluFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
             0,
@@ -158,7 +159,7 @@
 }
 
 
-BOOST_FIXTURE_TEST_CASE(ActivationBoundedReluQuantisedAsymm8, SimpleActivationFixture3)
+TEST_CASE_FIXTURE(SimpleActivationFixture3, "ActivationBoundedReluQuantisedAsymm8")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
             0,
@@ -166,7 +167,7 @@
             {{"OutputLayer", {5, 0, 2, 0}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ActivationBoundedReluFloat32, SimpleActivationFixture4)
+TEST_CASE_FIXTURE(SimpleActivationFixture4, "ActivationBoundedReluFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
             0,
@@ -174,4 +175,4 @@
             {{"OutputLayer", {5, 0, 5, 3}}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeAdd.cpp b/src/armnnDeserializer/test/DeserializeAdd.cpp
index 0161208..b2a645b 100644
--- a/src/armnnDeserializer/test/DeserializeAdd.cpp
+++ b/src/armnnDeserializer/test/DeserializeAdd.cpp
@@ -3,16 +3,17 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <armnn/utility/IgnoreUnused.hpp>
 
+#include <doctest/doctest.h>
+
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Add")
+{
 struct AddFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit AddFixture(const std::string & inputShape1,
@@ -142,7 +143,7 @@
                                      "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(AddQuantisedAsymm8, SimpleAddFixture)
+TEST_CASE_FIXTURE(SimpleAddFixture, "AddQuantisedAsymm8")
 {
   RunTest<2, armnn::DataType::QAsymmU8>(
       0,
@@ -151,7 +152,7 @@
       {{"OutputLayer", { 4, 6, 8, 10 }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(AddFloat32, SimpleAddFixture2)
+TEST_CASE_FIXTURE(SimpleAddFixture2, "AddFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
     0,
@@ -160,4 +161,4 @@
     {{"OutputLayer", { 116, 93, 236, 15 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeArgMinMax.cpp b/src/armnnDeserializer/test/DeserializeArgMinMax.cpp
index 6358b53..5a79228 100644
--- a/src/armnnDeserializer/test/DeserializeArgMinMax.cpp
+++ b/src/armnnDeserializer/test/DeserializeArgMinMax.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include "../Deserializer.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(DeserializeParser)
-
+TEST_SUITE("DeserializeParser_ArgMinMax")
+{
 struct ArgMinMaxFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit ArgMinMaxFixture(const std::string& inputShape,
@@ -123,7 +122,7 @@
                                                 "Max") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ArgMinMax, SimpleArgMinMaxFixture)
+TEST_CASE_FIXTURE(SimpleArgMinMaxFixture, "ArgMinMax")
 {
     RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed64>(
             0,
@@ -131,4 +130,4 @@
             {{"OutputLayer",{ 3l }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp b/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp
index ba3f01e..03eb371 100644
--- a/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp
+++ b/src/armnnDeserializer/test/DeserializeBatchNormalization.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_BatchNormalization")
+{
 struct BatchNormalizationFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit BatchNormalizationFixture(const std::string &inputShape,
@@ -159,7 +158,7 @@
                                                  "NHWC"){}
 };
 
-BOOST_FIXTURE_TEST_CASE(BatchNormalizationFloat32, BatchNormFixture)
+TEST_CASE_FIXTURE(BatchNormFixture, "BatchNormalizationFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(0,
                                          {{"InputLayer", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f }}},
@@ -168,4 +167,4 @@
                                            1.4138602f,  2.12079024f,  2.8277204f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeBatchToSpaceNd.cpp b/src/armnnDeserializer/test/DeserializeBatchToSpaceNd.cpp
index 2105792..fea695d 100644
--- a/src/armnnDeserializer/test/DeserializeBatchToSpaceNd.cpp
+++ b/src/armnnDeserializer/test/DeserializeBatchToSpaceNd.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_BatchToSpaceND")
+{
 struct BatchToSpaceNdFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit BatchToSpaceNdFixture(const std::string &inputShape,
@@ -116,7 +115,7 @@
                                                           "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleBatchToSpaceNdFloat32, SimpleBatchToSpaceNdFixture)
+TEST_CASE_FIXTURE(SimpleBatchToSpaceNdFixture, "SimpleBatchToSpaceNdFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(0,
                                          {
@@ -133,4 +132,4 @@
                                          });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeCast.cpp b/src/armnnDeserializer/test/DeserializeCast.cpp
index c8e3e10..b56b6ce 100644
--- a/src/armnnDeserializer/test/DeserializeCast.cpp
+++ b/src/armnnDeserializer/test/DeserializeCast.cpp
@@ -9,13 +9,10 @@
 #include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
-
+TEST_SUITE("Deserializer_Cast")
+{
 struct CastFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit CastFixture(const std::string& inputShape,
@@ -112,12 +109,12 @@
                                       "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleCast, SimpleCastFixture)
+TEST_CASE_FIXTURE(SimpleCastFixture, "SimpleCast")
 {
-RunTest<2, armnn::DataType::Signed32 , armnn::DataType::Float32>(
-0,
-{{"inputTensor",  { 0,   -1,   5,   -100,   200,   -255 }}},
-{{"outputTensor", { 0.0f, -1.0f, 5.0f, -100.0f, 200.0f, -255.0f }}});
+    RunTest<2, armnn::DataType::Signed32 , armnn::DataType::Float32>(
+        0,
+        {{"inputTensor",  { 0,   -1,   5,   -100,   200,   -255 }}},
+        {{"outputTensor", { 0.0f, -1.0f, 5.0f, -100.0f, 200.0f, -255.0f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeComparison.cpp b/src/armnnDeserializer/test/DeserializeComparison.cpp
index c995c92..a941f12 100644
--- a/src/armnnDeserializer/test/DeserializeComparison.cpp
+++ b/src/armnnDeserializer/test/DeserializeComparison.cpp
@@ -9,12 +9,10 @@
 #include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Comparison")
+{
 #define DECLARE_SIMPLE_COMPARISON_FIXTURE(operation, dataType) \
 struct Simple##operation##dataType##Fixture : public SimpleComparisonFixture \
 { \
@@ -24,7 +22,7 @@
 
 #define DECLARE_SIMPLE_COMPARISON_TEST_CASE(operation, dataType) \
 DECLARE_SIMPLE_COMPARISON_FIXTURE(operation, dataType) \
-BOOST_FIXTURE_TEST_CASE(operation##dataType, Simple##operation##dataType##Fixture) \
+TEST_CASE_FIXTURE(Simple##operation##dataType##Fixture, #operation#dataType) \
 { \
     using T = armnn::ResolveType<armnn::DataType::dataType>; \
     constexpr float   qScale  = 1.f; \
@@ -259,4 +257,4 @@
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(LessOrEqual,    QAsymmU8)
 DECLARE_SIMPLE_COMPARISON_TEST_CASE(NotEqual,       QAsymmU8)
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeConstant.cpp b/src/armnnDeserializer/test/DeserializeConstant.cpp
index 7aceb7b..682e8a1 100644
--- a/src/armnnDeserializer/test/DeserializeConstant.cpp
+++ b/src/armnnDeserializer/test/DeserializeConstant.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(DeserializeParser)
-
+TEST_SUITE("DeserializeParser_Constant")
+{
 struct ConstantAddFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit ConstantAddFixture(const std::string & shape,
@@ -140,7 +139,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleConstantAddQuantisedAsymm8, SimpleConstantAddFixture)
+TEST_CASE_FIXTURE(SimpleConstantAddFixture, "SimpleConstantAddQuantisedAsymm8")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
             0,
@@ -148,4 +147,4 @@
             { 2, 4, 6, 8, 10, 12 });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnDeserializer/test/DeserializeConvolution2d.cpp b/src/armnnDeserializer/test/DeserializeConvolution2d.cpp
index b09c095..6461250 100644
--- a/src/armnnDeserializer/test/DeserializeConvolution2d.cpp
+++ b/src/armnnDeserializer/test/DeserializeConvolution2d.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Convolution2D")
+{
 struct Convolution2dFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit Convolution2dFixture(const std::string & inputShape1,
@@ -130,7 +129,7 @@
                                      "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(Convolution2dFloat32, SimpleConvolution2dFixture)
+TEST_CASE_FIXTURE(SimpleConvolution2dFixture, "Convolution2dFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
             0,
@@ -138,4 +137,4 @@
             {{"OutputLayer", {23, 33, 24, 91, 99, 48, 26, 50, 19}}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeDepthToSpace.cpp b/src/armnnDeserializer/test/DeserializeDepthToSpace.cpp
index 55de807..e446a2d 100644
--- a/src/armnnDeserializer/test/DeserializeDepthToSpace.cpp
+++ b/src/armnnDeserializer/test/DeserializeDepthToSpace.cpp
@@ -7,12 +7,10 @@
 
 #include <armnnDeserializer/IDeserializer.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_DepthToSpace")
+{
 struct DepthToSpaceFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit DepthToSpaceFixture(const std::string& inputShape,
@@ -115,7 +113,7 @@
                                                        "Float32") {}     // data type
 };
 
-BOOST_FIXTURE_TEST_CASE(DepthToSpaceFloat32, DepthToSpaceFloat32Fixture)
+TEST_CASE_FIXTURE(DepthToSpaceFloat32Fixture, "DepthToSpaceFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -133,4 +131,4 @@
         });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeDivision.cpp b/src/armnnDeserializer/test/DeserializeDivision.cpp
index a77cb92..41e65d4 100644
--- a/src/armnnDeserializer/test/DeserializeDivision.cpp
+++ b/src/armnnDeserializer/test/DeserializeDivision.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Division")
+{
 struct DivisionFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit DivisionFixture(const std::string & inputShape1,
@@ -138,7 +137,7 @@
                                                "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(DivisionQuantisedAsymm8, SimpleDivisionFixture)
+TEST_CASE_FIXTURE(SimpleDivisionFixture, "DivisionQuantisedAsymm8")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
         0,
@@ -147,7 +146,7 @@
         {{"OutputLayer", { 0, 5, 3,  3 }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(DivisionFloat32, SimpleDivisionFixture2)
+TEST_CASE_FIXTURE(SimpleDivisionFixture2, "DivisionFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -156,4 +155,4 @@
         {{"OutputLayer", { 20,  5,  226, 3 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeFill.cpp b/src/armnnDeserializer/test/DeserializeFill.cpp
index 7043479..2a961b4 100644
--- a/src/armnnDeserializer/test/DeserializeFill.cpp
+++ b/src/armnnDeserializer/test/DeserializeFill.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Fill")
+{
 struct FillFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit FillFixture()
@@ -123,7 +122,7 @@
     SimpleFillFixture() : FillFixture() {}
 };
 
-BOOST_FIXTURE_TEST_CASE(Fill, SimpleFillFixture)
+TEST_CASE_FIXTURE(SimpleFillFixture, "Fill")
 {
     RunTest<4, armnn::DataType::Signed32, armnn::DataType::Float32>(
             0,
@@ -131,4 +130,4 @@
             {{"OutputLayer",{ 1, 1, 1, 1, 1, 1, 1, 1, 1}}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeFloor.cpp b/src/armnnDeserializer/test/DeserializeFloor.cpp
index 8ce235e..b4b584d 100644
--- a/src/armnnDeserializer/test/DeserializeFloor.cpp
+++ b/src/armnnDeserializer/test/DeserializeFloor.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Floor")
+{
 struct FloorFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit FloorFixture(const std::string& shape,
@@ -97,7 +96,7 @@
                                         "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(Floor, SimpleFloorFixture)
+TEST_CASE_FIXTURE(SimpleFloorFixture, "Floor")
 {
     RunTest<4, armnn::DataType::Float32>(
             4,
@@ -106,4 +105,4 @@
 }
 
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeFullyConnected.cpp b/src/armnnDeserializer/test/DeserializeFullyConnected.cpp
index 66269dc..da2db08 100644
--- a/src/armnnDeserializer/test/DeserializeFullyConnected.cpp
+++ b/src/armnnDeserializer/test/DeserializeFullyConnected.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(DeserializeParser)
-
+TEST_SUITE("DeserializeParser_FullyConnected")
+{
 struct FullyConnectedFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit FullyConnectedFixture(const std::string & inputShape1,
@@ -128,7 +127,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
+TEST_CASE_FIXTURE(FullyConnectedWithNoBiasFixture, "FullyConnectedWithNoBias")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
          0,
@@ -136,4 +135,4 @@
          {{"OutputLayer", { 400/2 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeGather.cpp b/src/armnnDeserializer/test/DeserializeGather.cpp
index 0590e70..47919c4 100644
--- a/src/armnnDeserializer/test/DeserializeGather.cpp
+++ b/src/armnnDeserializer/test/DeserializeGather.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Gather")
+{
 struct GatherFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit GatherFixture(const std::string& inputShape,
@@ -133,7 +132,7 @@
                                                  "[ 2, 3, 2, 3 ]", "0", "Float32", "IntData") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(GatherFloat32, SimpleGatherFixtureFloat32)
+TEST_CASE_FIXTURE(SimpleGatherFixtureFloat32, "GatherFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(0,
                                          {{"InputLayer", {  1,  2,  3,
@@ -156,5 +155,5 @@
                                                             4,  5,  6 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
diff --git a/src/armnnDeserializer/test/DeserializeInstanceNormalization.cpp b/src/armnnDeserializer/test/DeserializeInstanceNormalization.cpp
index 5f63429..0d29246 100644
--- a/src/armnnDeserializer/test/DeserializeInstanceNormalization.cpp
+++ b/src/armnnDeserializer/test/DeserializeInstanceNormalization.cpp
@@ -8,10 +8,8 @@
 
 #include <string>
 
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_InstanceNormalization")
+{
 struct InstanceNormalizationFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit InstanceNormalizationFixture(const std::string &inputShape,
@@ -120,7 +118,7 @@
                                                                        "NHWC") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(InstanceNormalizationFloat32, InstanceNormalizationFloat32Fixture)
+TEST_CASE_FIXTURE(InstanceNormalizationFloat32Fixture, "InstanceNormalizationFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -152,4 +150,4 @@
         });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeL2Normalization.cpp b/src/armnnDeserializer/test/DeserializeL2Normalization.cpp
index c32343f..d9a512e 100644
--- a/src/armnnDeserializer/test/DeserializeL2Normalization.cpp
+++ b/src/armnnDeserializer/test/DeserializeL2Normalization.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_L2Normalization")
+{
 struct L2NormalizationFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit L2NormalizationFixture(const std::string &inputShape,
@@ -114,7 +113,7 @@
                                            "0.00000001"){}
 };
 
-BOOST_FIXTURE_TEST_CASE(L2NormalizationFloat32, L2NormFixture)
+TEST_CASE_FIXTURE(L2NormFixture, "L2NormalizationFloat32")
 {
     // 1 / sqrt(1^2 + 2^2 + 3^2)
     const float approxInvL2Norm = 0.267261f;
@@ -126,7 +125,7 @@
                                                            3.0f * approxInvL2Norm }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(L2NormalizationEpsilonLimitFloat32, L2NormFixture)
+TEST_CASE_FIXTURE(L2NormFixture, "L2NormalizationEpsilonLimitFloat32")
 {
     // 1 / sqrt(1e-8)
     const float approxInvL2Norm = 10000;
@@ -138,4 +137,4 @@
                                                            0.00000003f * approxInvL2Norm }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeLogSoftmax.cpp b/src/armnnDeserializer/test/DeserializeLogSoftmax.cpp
index 201b64b..d437ac8 100644
--- a/src/armnnDeserializer/test/DeserializeLogSoftmax.cpp
+++ b/src/armnnDeserializer/test/DeserializeLogSoftmax.cpp
@@ -6,10 +6,8 @@
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_LogSoftmax")
+{
 struct LogSoftmaxFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit LogSoftmaxFixture(const std::string &shape,
@@ -111,7 +109,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(LogSoftmaxFloat32, LogSoftmaxFloat32Fixture)
+TEST_CASE_FIXTURE(LogSoftmaxFloat32Fixture, "LogSoftmaxFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -125,4 +123,4 @@
         });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeMean.cpp b/src/armnnDeserializer/test/DeserializeMean.cpp
index e8eebdf..6c82183 100644
--- a/src/armnnDeserializer/test/DeserializeMean.cpp
+++ b/src/armnnDeserializer/test/DeserializeMean.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Mean")
+{
 struct MeanFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit MeanFixture(const std::string &inputShape,
@@ -113,7 +112,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleMean, SimpleMeanFixture)
+TEST_CASE_FIXTURE(SimpleMeanFixture, "SimpleMean")
 {
     RunTest<4, armnn::DataType::Float32>(
          0,
@@ -121,4 +120,4 @@
          {{"OutputLayer", { 2.0f, 2.0f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnDeserializer/test/DeserializeMultiplication.cpp b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
index 313c78c..0956fc5 100644
--- a/src/armnnDeserializer/test/DeserializeMultiplication.cpp
+++ b/src/armnnDeserializer/test/DeserializeMultiplication.cpp
@@ -8,12 +8,10 @@
 
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Multiplication")
+{
 struct MultiplicationFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit MultiplicationFixture(const std::string & inputShape1,
@@ -143,7 +141,7 @@
                                                            "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(MultiplicationQuantisedAsymm8, SimpleMultiplicationFixture)
+TEST_CASE_FIXTURE(SimpleMultiplicationFixture, "MultiplicationQuantisedAsymm8")
 {
   RunTest<2, armnn::DataType::QAsymmU8>(
       0,
@@ -152,7 +150,7 @@
       {{"OutputLayer", { 0, 5, 12, 21 }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(MultiplicationFloat32, SimpleMultiplicationFixture2)
+TEST_CASE_FIXTURE(SimpleMultiplicationFixture2, "MultiplicationFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
     0,
@@ -161,4 +159,4 @@
     {{"OutputLayer", { 500, 320, 226, 108 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeNormalization.cpp b/src/armnnDeserializer/test/DeserializeNormalization.cpp
index de68167..a398bfa 100644
--- a/src/armnnDeserializer/test/DeserializeNormalization.cpp
+++ b/src/armnnDeserializer/test/DeserializeNormalization.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Normalization")
+{
 struct NormalizationFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit NormalizationFixture(const std::string &inputShape,
@@ -116,7 +115,7 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(Float32NormalizationNhwcDataLayout, FloatNhwcLocalBrightnessAcrossNormalizationFixture)
+TEST_CASE_FIXTURE(FloatNhwcLocalBrightnessAcrossNormalizationFixture, "Float32NormalizationNhwcDataLayout")
 {
     RunTest<4, armnn::DataType::Float32>(0, { 1.0f, 2.0f, 3.0f, 4.0f,
                                               5.0f, 6.0f, 7.0f, 8.0f },
@@ -130,7 +129,7 @@
         "Float32", "1", "0", "NCHW") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(Float32NormalizationNchwDataLayout, FloatNchwLocalBrightnessWithinNormalizationFixture)
+TEST_CASE_FIXTURE(FloatNchwLocalBrightnessWithinNormalizationFixture, "Float32NormalizationNchwDataLayout")
 {
     RunTest<4, armnn::DataType::Float32>(0, { 1.0f, 2.0f, 3.0f, 4.0f,
                                               5.0f, 6.0f, 7.0f, 8.0f },
@@ -138,5 +137,4 @@
                                               0.0285714f, 0.0342857f, 0.04f, 0.0457143f });
 }
 
-
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnDeserializer/test/DeserializePad.cpp b/src/armnnDeserializer/test/DeserializePad.cpp
index c2d21e5..43de229 100644
--- a/src/armnnDeserializer/test/DeserializePad.cpp
+++ b/src/armnnDeserializer/test/DeserializePad.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Pad")
+{
 struct PadFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit PadFixture(const std::string &inputShape,
@@ -110,7 +109,7 @@
                                     "QuantisedAsymm8") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimplePadQuantisedAsymm8, SimplePadFixture)
+TEST_CASE_FIXTURE(SimplePadFixture, "SimplePadQuantisedAsymm8")
 {
     RunTest<3, armnn::DataType::QAsymmU8>(0,
                                                  {
@@ -126,4 +125,4 @@
                                                  });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializePermute.cpp b/src/armnnDeserializer/test/DeserializePermute.cpp
index 8cfe25b..7b474e2 100644
--- a/src/armnnDeserializer/test/DeserializePermute.cpp
+++ b/src/armnnDeserializer/test/DeserializePermute.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Permute")
+{
 struct PermuteFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit PermuteFixture(const std::string &inputShape,
@@ -110,7 +109,7 @@
                                               "QuantisedAsymm8") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimplePermute2DQuantisedAsymm8, SimplePermute2DFixture)
+TEST_CASE_FIXTURE(SimplePermute2DFixture, "SimplePermute2DQuantisedAsymm8")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 2, 3, 4, 5, 6 },
@@ -125,7 +124,7 @@
                                               "QuantisedAsymm8") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimplePermute4DQuantisedAsymm8, SimplePermute4DFixture)
+TEST_CASE_FIXTURE(SimplePermute4DFixture, "SimplePermute4DQuantisedAsymm8")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(0,
                                                  {  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,
@@ -134,4 +133,4 @@
                                                     3, 15,  7, 19, 11, 23,  4, 16,  8, 20, 12, 24 });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializePooling2d.cpp b/src/armnnDeserializer/test/DeserializePooling2d.cpp
index bf7b8ae..1c6fe9c 100644
--- a/src/armnnDeserializer/test/DeserializePooling2d.cpp
+++ b/src/armnnDeserializer/test/DeserializePooling2d.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Pooling2d")
+{
 struct Pooling2dFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit Pooling2dFixture(const std::string &inputShape,
@@ -141,30 +140,30 @@
                                                   "Float32", "NHWC", "L2") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(Pooling2dFloat32Avg, SimpleAvgPooling2dFixture)
+TEST_CASE_FIXTURE(SimpleAvgPooling2dFixture, "Pooling2dFloat32Avg")
 {
     RunTest<4, armnn::DataType::Float32>(0, { 2, 3, 5, 2 }, { 3 });
 }
 
-BOOST_FIXTURE_TEST_CASE(Pooling2dQuantisedAsymm8Avg, SimpleAvgPooling2dFixture2)
+TEST_CASE_FIXTURE(SimpleAvgPooling2dFixture2, "Pooling2dQuantisedAsymm8Avg")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(0,{ 20, 40, 60, 80 },{ 50 });
 }
 
-BOOST_FIXTURE_TEST_CASE(Pooling2dFloat32Max, SimpleMaxPooling2dFixture)
+TEST_CASE_FIXTURE(SimpleMaxPooling2dFixture, "Pooling2dFloat32Max")
 {
     RunTest<4, armnn::DataType::Float32>(0, { 2, 5, 5, 2 }, { 5 });
 }
 
-BOOST_FIXTURE_TEST_CASE(Pooling2dQuantisedAsymm8Max, SimpleMaxPooling2dFixture2)
+TEST_CASE_FIXTURE(SimpleMaxPooling2dFixture2, "Pooling2dQuantisedAsymm8Max")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(0,{ 20, 40, 60, 80 },{ 80 });
 }
 
-BOOST_FIXTURE_TEST_CASE(Pooling2dFloat32L2, SimpleL2Pooling2dFixture)
+TEST_CASE_FIXTURE(SimpleL2Pooling2dFixture, "Pooling2dFloat32L2")
 {
     RunTest<4, armnn::DataType::Float32>(0, { 2, 3, 5, 2 }, { 3.2403703f });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
diff --git a/src/armnnDeserializer/test/DeserializeRank.cpp b/src/armnnDeserializer/test/DeserializeRank.cpp
index 920283f..3cb3459 100644
--- a/src/armnnDeserializer/test/DeserializeRank.cpp
+++ b/src/armnnDeserializer/test/DeserializeRank.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Rank")
+{
 struct RankFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit RankFixture(const std::string &inputShape,
@@ -120,32 +119,32 @@
     SimpleRankDimSize4Fixture() : RankFixture("[ 2, 2, 1, 1 ]", "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(RankDimSize1Float16, SimpleRankDimSize1Fixture)
+TEST_CASE_FIXTURE(SimpleRankDimSize1Fixture, "RankDimSize1Float16")
 {
     RunTest<1, armnn::DataType::QSymmS16, armnn::DataType::Signed32>( 0,
                                                                       { 1, 2, 3, 4, 5, 6, 7, 8 },
                                                                       { 1 });
 }
 
-BOOST_FIXTURE_TEST_CASE(RankDimSize2QAsymmU8, SimpleRankDimSize2Fixture)
+TEST_CASE_FIXTURE(SimpleRankDimSize2Fixture, "RankDimSize2QAsymmU8")
 {
     RunTest<1, armnn::DataType::QSymmS8, armnn::DataType::Signed32>( 0,
                                                                     { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
                                                                     { 2 });
 }
 
-BOOST_FIXTURE_TEST_CASE(RankDimSize3Signed32, SimpleRankDimSize3Fixture)
+TEST_CASE_FIXTURE(SimpleRankDimSize3Fixture, "RankDimSize3Signed32")
 {
     RunTest<1, armnn::DataType::Signed32, armnn::DataType::Signed32>( 0,
                                                                     { 111, 85, 226, 3 },
                                                                     { 3 });
 }
 
-BOOST_FIXTURE_TEST_CASE(RankDimSize4Float32, SimpleRankDimSize4Fixture)
+TEST_CASE_FIXTURE(SimpleRankDimSize4Fixture, "RankDimSize4Float32")
 {
     RunTest<1, armnn::DataType::Float32, armnn::DataType::Signed32>( 0,
                                                                    { 111, 85, 226, 3 },
                                                                    { 4 });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnDeserializer/test/DeserializeReduceSum.cpp b/src/armnnDeserializer/test/DeserializeReduceSum.cpp
index 326560f..1d72b0f 100644
--- a/src/armnnDeserializer/test/DeserializeReduceSum.cpp
+++ b/src/armnnDeserializer/test/DeserializeReduceSum.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include "../Deserializer.hpp"
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_ReduceSum")
+{
 struct ReduceSumFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit ReduceSumFixture(const std::string& inputShape,
@@ -114,7 +113,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleReduceSum, SimpleReduceSumFixture)
+TEST_CASE_FIXTURE(SimpleReduceSumFixture, "SimpleReduceSum")
 {
     RunTest<4, armnn::DataType::Float32>(
          0,
@@ -122,4 +121,4 @@
          {{"OutputLayer", { 6.0f, 6.0f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeReshape.cpp b/src/armnnDeserializer/test/DeserializeReshape.cpp
index c771a23..5a1984a 100644
--- a/src/armnnDeserializer/test/DeserializeReshape.cpp
+++ b/src/armnnDeserializer/test/DeserializeReshape.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Reshape")
+{
 struct ReshapeFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit ReshapeFixture(const std::string &inputShape,
@@ -109,14 +108,14 @@
                                              "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ReshapeQuantisedAsymm8, SimpleReshapeFixture)
+TEST_CASE_FIXTURE(SimpleReshapeFixture, "ReshapeQuantisedAsymm8")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(0,
                                                 { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
                                                 { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
 }
 
-BOOST_FIXTURE_TEST_CASE(ReshapeFloat32, SimpleReshapeFixture2)
+TEST_CASE_FIXTURE(SimpleReshapeFixture2, "ReshapeFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(0,
                                         { 111, 85, 226, 3 },
@@ -124,4 +123,4 @@
 }
 
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnDeserializer/test/DeserializeResizeBilinear.cpp b/src/armnnDeserializer/test/DeserializeResizeBilinear.cpp
index a9e5522..da2be7b 100644
--- a/src/armnnDeserializer/test/DeserializeResizeBilinear.cpp
+++ b/src/armnnDeserializer/test/DeserializeResizeBilinear.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_ResizeBilinear")
+{
 struct ResizeBilinearFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit ResizeBilinearFixture(const std::string& inputShape,
@@ -116,7 +115,7 @@
                                                           "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleResizeBilinearFloat32, SimpleResizeBilinearFixture)
+TEST_CASE_FIXTURE(SimpleResizeBilinearFixture, "SimpleResizeBilinearFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(0,
                                          {
@@ -128,4 +127,4 @@
                                          });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeRsqrt.cpp b/src/armnnDeserializer/test/DeserializeRsqrt.cpp
index 68de783..2264f7f 100644
--- a/src/armnnDeserializer/test/DeserializeRsqrt.cpp
+++ b/src/armnnDeserializer/test/DeserializeRsqrt.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Rsqrt")
+{
 struct RsqrtFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit RsqrtFixture(const std::string & inputShape,
@@ -102,7 +101,7 @@
                                     "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(Rsqrt2d, Rsqrt2dFixture)
+TEST_CASE_FIXTURE(Rsqrt2dFixture, "Rsqrt2d")
 {
   RunTest<2, armnn::DataType::Float32>(
       0,
@@ -113,4 +112,4 @@
 }
 
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeSlice.cpp b/src/armnnDeserializer/test/DeserializeSlice.cpp
index bc5cea6..e0bfb21 100644
--- a/src/armnnDeserializer/test/DeserializeSlice.cpp
+++ b/src/armnnDeserializer/test/DeserializeSlice.cpp
@@ -7,12 +7,10 @@
 
 #include <armnnDeserializer/IDeserializer.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Slice")
+{
 struct SliceFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit SliceFixture(const std::string& inputShape,
@@ -115,7 +113,7 @@
                                         "Float32") {}     // data type
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleSliceFloat32, SimpleSliceFixture)
+TEST_CASE_FIXTURE(SimpleSliceFixture, "SimpleSliceFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -155,4 +153,4 @@
         });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeSpaceToBatchNd.cpp b/src/armnnDeserializer/test/DeserializeSpaceToBatchNd.cpp
index 6ee2a3f..0c8675b 100644
--- a/src/armnnDeserializer/test/DeserializeSpaceToBatchNd.cpp
+++ b/src/armnnDeserializer/test/DeserializeSpaceToBatchNd.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_SpaceToBatchND")
+{
 struct SpaceToBatchNdFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit SpaceToBatchNdFixture(const std::string &inputShape,
@@ -116,7 +115,7 @@
                                                           "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleSpaceToBatchNdFloat32, SimpleSpaceToBatchNdFixture)
+TEST_CASE_FIXTURE(SimpleSpaceToBatchNdFixture, "SimpleBatchToSpaceNdFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(0,
                                          {
@@ -137,4 +136,4 @@
                                          });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeStridedSlice.cpp b/src/armnnDeserializer/test/DeserializeStridedSlice.cpp
index 6634222..e3ef71e 100644
--- a/src/armnnDeserializer/test/DeserializeStridedSlice.cpp
+++ b/src/armnnDeserializer/test/DeserializeStridedSlice.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_StridedSlice")
+{
 struct StridedSliceFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit StridedSliceFixture(const std::string& inputShape,
@@ -134,7 +133,7 @@
                                                       "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleStridedSliceFloat32, SimpleStridedSliceFixture)
+TEST_CASE_FIXTURE(SimpleStridedSliceFixture, "SimpleStridedSliceFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(0,
                                          {
@@ -163,7 +162,7 @@
                                                     "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(StridedSliceMaskFloat32, StridedSliceMaskFixture)
+TEST_CASE_FIXTURE(StridedSliceMaskFixture, "StridedSliceMaskFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(0,
                                          {
@@ -178,4 +177,4 @@
                                          });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeSubtraction.cpp b/src/armnnDeserializer/test/DeserializeSubtraction.cpp
index 789653c..63e7c91 100644
--- a/src/armnnDeserializer/test/DeserializeSubtraction.cpp
+++ b/src/armnnDeserializer/test/DeserializeSubtraction.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Subtraction")
+{
 struct SubtractionFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit SubtractionFixture(const std::string & inputShape1,
@@ -145,7 +144,7 @@
                                                              "Float32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SubtractionQuantisedAsymm8, SimpleSubtractionFixture)
+TEST_CASE_FIXTURE(SimpleSubtractionFixture, "SubtractionQuantisedAsymm8")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
         0,
@@ -154,7 +153,7 @@
         {{"outputLayer", { 1, 3, 5, 7 }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(SubtractionFloat32, SimpleSubtractionFixture2)
+TEST_CASE_FIXTURE(SimpleSubtractionFixture2, "SubtractionFloat32")
 {
     RunTest<2, armnn::DataType::Float32>(
         0,
@@ -163,7 +162,7 @@
         {{"outputLayer", { 1, 3, 5, 7 }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(SubtractionBroadcast, SimpleSubtractionFixtureBroadcast)
+TEST_CASE_FIXTURE(SimpleSubtractionFixtureBroadcast, "SubtractionBroadcast")
 {
     RunTest<2, armnn::DataType::Float32>(
         0,
@@ -172,4 +171,4 @@
         {{"outputLayer", { 2, 3, 4, 5 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/DeserializeTranspose.cpp b/src/armnnDeserializer/test/DeserializeTranspose.cpp
index 3e5ad56..fe656d1 100644
--- a/src/armnnDeserializer/test/DeserializeTranspose.cpp
+++ b/src/armnnDeserializer/test/DeserializeTranspose.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersSerializeFixture.hpp"
 #include <armnnDeserializer/IDeserializer.hpp>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(Deserializer)
-
+TEST_SUITE("Deserializer_Transpose")
+{
 struct TransposeFixture : public ParserFlatbuffersSerializeFixture
 {
     explicit TransposeFixture(const std::string &inputShape,
@@ -110,7 +109,7 @@
                                                   "QuantisedAsymm8") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleTranspose2DQuantisedAsymm8, SimpleTranspose2DFixture)
+TEST_CASE_FIXTURE(SimpleTranspose2DFixture, "SimpleTranspose2DQuantisedAsymm8")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 2, 3, 4, 5, 6 },
@@ -125,7 +124,7 @@
                                                   "QuantisedAsymm8") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleTranspose4DQuantisedAsymm8, SimpleTranspose4DFixture)
+TEST_CASE_FIXTURE(SimpleTranspose4DFixture, "SimpleTranspose4DQuantisedAsymm8")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(0,
                                                  {  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12,
@@ -134,4 +133,4 @@
                                                     3, 15,  7, 19, 11, 23,  4, 16,  8, 20, 12, 24 });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
index a62cb96..ac8d4b3 100644
--- a/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
+++ b/src/armnnDeserializer/test/ParserFlatbuffersSerializeFixture.hpp
@@ -19,6 +19,7 @@
 #include <ResolveType.hpp>
 
 #include <fmt/format.h>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -154,12 +155,12 @@
                       const float scale, const int64_t zeroPoint)
     {
         armnn::IgnoreUnused(name);
-        BOOST_CHECK_EQUAL(shapeSize, tensors->dimensions()->size());
-        BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(),
-                                      tensors->dimensions()->begin(), tensors->dimensions()->end());
-        BOOST_CHECK_EQUAL(tensorType.dataType(), tensors->dataType());
-        BOOST_CHECK_EQUAL(scale, tensors->quantizationScale());
-        BOOST_CHECK_EQUAL(zeroPoint, tensors->quantizationOffset());
+        CHECK_EQ(shapeSize, tensors->dimensions()->size());
+        CHECK(std::equal(shape.begin(), shape.end(),
+                                      tensors->dimensions()->begin(), tensors->dimensions()->end()));
+        CHECK_EQ(tensorType.dataType(), tensors->dataType());
+        CHECK_EQ(scale, tensors->quantizationScale());
+        CHECK_EQ(zeroPoint, tensors->quantizationOffset());
     }
 };
 
@@ -241,6 +242,6 @@
         auto outputExpected = it.second;
         auto result = CompareTensors(outputExpected, outputStorage[it.first],
                                      bindingInfo.second.GetShape(), bindingInfo.second.GetShape());
-        BOOST_TEST(result.m_Result, result.m_Message.str());
+        CHECK_MESSAGE(result.m_Result, result.m_Message.str());
     }
 }
diff --git a/src/armnnOnnxParser/test/Addition.cpp b/src/armnnOnnxParser/test/Addition.cpp
index 6fc8eb1..d18c277 100644
--- a/src/armnnOnnxParser/test/Addition.cpp
+++ b/src/armnnOnnxParser/test/Addition.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_Addition")
+{
 struct AddMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     AddMainFixture(const std::string& dataType)
@@ -383,37 +382,37 @@
     AddInvalidScalarFixture() : AddScalarFixture("6") { }
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidAddTest, AddValidFixture)
+TEST_CASE_FIXTURE(AddValidFixture, "ValidAddTest")
 {
     RunTest<4>({{"Input0", {1.0f, 2.0f, -3.0f, -4.0f}},
                 {"Input1", {1.0f, 2.0f, 3.0, 4.0f}}}, {{"Output", {2.0, 4.0, 0, 0.0}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeAdd, AddInvalidFixture)
+TEST_CASE_FIXTURE(AddInvalidFixture, "IncorrectDataTypeAdd")
 {
-   BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+   CHECK_THROWS_AS(Setup(), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(InvalidBroadcastAdd, AddInvalidBroadcastFixture)
+TEST_CASE_FIXTURE(AddInvalidBroadcastFixture, "InvalidBroadcastAdd")
 {
-   BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+   CHECK_THROWS_AS(Setup(), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidBroadcastAdd, AddValidBroadcastFixture)
+TEST_CASE_FIXTURE(AddValidBroadcastFixture, "ValidBroadcastAdd")
 {
     RunTest<4>({{"Input0", {1.0f, 2.0f, -3.0f, -4.0f}},
                 {"Input1", {1.0f, 2.0f, 3.0, 4.0f}}}, {{"Output", {2.0, 4.0, 0, 0.0}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidAddScalarTest, AddValidScalarFixture)
+TEST_CASE_FIXTURE(AddValidScalarFixture, "ValidAddScalarTest")
 {
     RunTest<4>({{"Input0", {1.0f, 2.0f, -3.0f, -4.0f}},
                 {"Input1", {-8.0f}}}, {{"Output", {-7.0, -6.0, -11.0, -12.0}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeAddScalar, AddInvalidScalarFixture)
+TEST_CASE_FIXTURE(AddInvalidScalarFixture, "IncorrectDataTypeAddScalar")
 {
-    BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+    CHECK_THROWS_AS(Setup(), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnOnnxParser/test/BatchNorm.cpp b/src/armnnOnnxParser/test/BatchNorm.cpp
index bbe9616..f582dbd 100644
--- a/src/armnnOnnxParser/test/BatchNorm.cpp
+++ b/src/armnnOnnxParser/test/BatchNorm.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_BatchNorm")
+{
 struct BatchNormalizationMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     BatchNormalizationMainFixture()
@@ -164,7 +163,7 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidBatchNormalizationTest, BatchNormalizationMainFixture)
+TEST_CASE_FIXTURE(BatchNormalizationMainFixture, "ValidBatchNormalizationTest")
 {
     RunTest<4>({{"Input", {1, 2, 3, 4, 5, 6, 7, 8, 9}}},             // Input data.
                {{"Output", {-2.8277204f, -2.12079024f, -1.4138602f,
@@ -332,11 +331,11 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidBatchNormalizationBisTest, BatchNormalizationBisFixture)
+TEST_CASE_FIXTURE(BatchNormalizationBisFixture, "ValidBatchNormalizationBisTest")
 {
     RunTest<4>({{"Input", {-1, 0.0, 1, 2, 3.0, 4.0}}},           // Input data.
                {{"Output", {-0.999995f, 0.0, 0.999995f,
                             -0.22474074f, 1.0f, 2.2247407f}}});  // Expected output data.
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/Clip.cpp b/src/armnnOnnxParser/test/Clip.cpp
index 6420304..b0447bc 100644
--- a/src/armnnOnnxParser/test/Clip.cpp
+++ b/src/armnnOnnxParser/test/Clip.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_Clip")
+{
 struct ClipMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     ClipMainFixture(std::string min, std::string max)
@@ -68,7 +67,7 @@
     ClipFixture() : ClipMainFixture("2", "3.5") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidClipTest, ClipFixture)
+TEST_CASE_FIXTURE(ClipFixture, "ValidClipTest")
 {
     RunTest<1>({{"Input",  { -1.5f, 1.25f, 3.5f, 8.0, 2.5}}},
                {{ "Output", { 2.0f, 2.0f, 3.5f, 3.5, 2.5}}});
@@ -79,7 +78,7 @@
     ClipNoMaxInputFixture() : ClipMainFixture("0", std::string()) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidNoMaxInputClipTest, ClipNoMaxInputFixture)
+TEST_CASE_FIXTURE(ClipNoMaxInputFixture, "ValidNoMaxInputClipTest")
 {
     RunTest<1>({{"Input",  { -1.5f, -5.25f, -0.5f, 8.0f, std::numeric_limits<float>::max() }}},
                {{ "Output", { 0.0f, 0.0f, 0.0f, 8.0f, std::numeric_limits<float>::max() }}});
@@ -90,7 +89,7 @@
     ClipNoMinInputFixture() : ClipMainFixture(std::string(), "6") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidNoMinInputClipTest, ClipNoMinInputFixture)
+TEST_CASE_FIXTURE(ClipNoMinInputFixture, "ValidNoMinInputClipTest")
 {
     RunTest<1>({{"Input",   { std::numeric_limits<float>::lowest(), -5.25f, -0.5f, 8.0f, 200.0f }}},
                {{ "Output", { std::numeric_limits<float>::lowest(), -5.25f, -0.5f, 6.0f, 6.0f }}});
@@ -101,7 +100,7 @@
     ClipNoInputFixture() : ClipMainFixture(std::string(), std::string()) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidNoInputClipTest, ClipNoInputFixture)
+TEST_CASE_FIXTURE(ClipNoInputFixture, "ValidNoInputClipTest")
 {
     RunTest<1>({{"Input",   { std::numeric_limits<float>::lowest(), -1.25f, 3.5f, 8.0f,
                               std::numeric_limits<float>::max()}}},
@@ -109,4 +108,4 @@
                               std::numeric_limits<float>::max()}}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/Const.cpp b/src/armnnOnnxParser/test/Const.cpp
index 8777757..35df4e9 100644
--- a/src/armnnOnnxParser/test/Const.cpp
+++ b/src/armnnOnnxParser/test/Const.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_Const")
+{
 struct ConstMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     ConstMainFixture(const std::string& dataType)
@@ -74,14 +73,14 @@
     ConstInvalidFixture() : ConstMainFixture("10") { }
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidConstTest, ConstValidFixture)
+TEST_CASE_FIXTURE(ConstValidFixture, "ValidConstTest")
 {
     RunTest<1>({ }, {{ "Output" , {0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeConst, ConstInvalidFixture)
+TEST_CASE_FIXTURE(ConstInvalidFixture, "IncorrectDataTypeConst")
 {
-   BOOST_CHECK_THROW( Setup(), armnn::ParseException);
+   CHECK_THROWS_AS( Setup(), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/Constructor.cpp b/src/armnnOnnxParser/test/Constructor.cpp
index 76095e1..0fc3a1e 100644
--- a/src/armnnOnnxParser/test/Constructor.cpp
+++ b/src/armnnOnnxParser/test/Constructor.cpp
@@ -3,14 +3,15 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_CASE(Create)
+TEST_SUITE("OnnxParser_Constructor")
+{
+TEST_CASE("Create")
 {
     armnnOnnxParser::IOnnxParserPtr parser(armnnOnnxParser::IOnnxParser::Create());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/Conv2D.cpp b/src/armnnOnnxParser/test/Conv2D.cpp
index a38cc19..0603881 100644
--- a/src/armnnOnnxParser/test/Conv2D.cpp
+++ b/src/armnnOnnxParser/test/Conv2D.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_Conv2D")
+{
 struct SimpleConv2DFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     SimpleConv2DFixture()
@@ -578,7 +577,7 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidConvTest, SimpleConv2DFixture)
+TEST_CASE_FIXTURE(SimpleConv2DFixture, "ValidConvTest")
 {
     RunTest<4>({{"Input", {1.0, 2.0, 3.0,
                            4.0, 5.0, 6.0,
@@ -588,7 +587,7 @@
                            7.0 * 4 + 8.0 * 1 + 9.0 * 2}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidConvWithBiasTest, Conv2DWithBiasesFixture)
+TEST_CASE_FIXTURE(Conv2DWithBiasesFixture, "ValidConvWithBiasTest")
 {
     RunTest<4>({{"Input", {1.0, 2.0,
                            3.0, 4.0}}},
@@ -598,7 +597,7 @@
                            4.0 * 2 + 0 * 1 + 0 * 0 + 0 * 6 + 10}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidConvDimReducTest, Conv2DDimReducingFixture)
+TEST_CASE_FIXTURE(Conv2DDimReducingFixture, "ValidConvDimReducTest")
 {
     RunTest<4>({{"Input", {1.0, 2.0, 3.0, 4.0, -1, -2, 3, 4, 1 , 1, 1, 1 }}},
               {{"Output", {-1 * 1 + 2 * -1, -1 * 2 + 2 * -2,
@@ -606,7 +605,7 @@
                            1, 2, 3, 4}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidConvWithDilationTest, Conv2DwithDilationFixture)
+TEST_CASE_FIXTURE(Conv2DwithDilationFixture, "ValidConvWithDilationTest")
 {
     RunTest<4>({{"Input", {1.0, 2.0, 3.0, 4.0, 5.0, 6.0,
                            7.0, 8.0, 9.0, 10.0, 11.0, 12.0,
@@ -617,4 +616,4 @@
                {{"Output", {39.0, 58.0, 153.0, 172.0 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/CreateNetwork.cpp b/src/armnnOnnxParser/test/CreateNetwork.cpp
index 1e3e1b4..eeeaca8 100644
--- a/src/armnnOnnxParser/test/CreateNetwork.cpp
+++ b/src/armnnOnnxParser/test/CreateNetwork.cpp
@@ -3,13 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include  "armnnOnnxParser/IOnnxParser.hpp"
+#include <doctest/doctest.h>
+
 #include "google/protobuf/stubs/logging.h"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
-BOOST_AUTO_TEST_CASE(CreateNetworkFromString)
+TEST_SUITE("OnnxParser_CreateNetwork")
+{
+TEST_CASE("CreateNetworkFromString")
 {
   std::string TestModel = R"(
                           ir_version: 3
@@ -43,21 +44,21 @@
     armnnOnnxParser::IOnnxParserPtr parser(armnnOnnxParser::IOnnxParser::Create());
 
     armnn::INetworkPtr network = parser->CreateNetworkFromString(TestModel.c_str());
-    BOOST_TEST(network.get());
+    CHECK(network.get());
 }
 
-BOOST_AUTO_TEST_CASE(CreateNetworkFromStringWithNullptr)
+TEST_CASE("CreateNetworkFromStringWithNullptr")
 {
     armnnOnnxParser::IOnnxParserPtr parser(armnnOnnxParser::IOnnxParser::Create());
-    BOOST_CHECK_THROW(parser->CreateNetworkFromString(""), armnn::InvalidArgumentException );
+    CHECK_THROWS_AS(parser->CreateNetworkFromString(""), armnn::InvalidArgumentException );
 }
 
-BOOST_AUTO_TEST_CASE(CreateNetworkWithInvalidString)
+TEST_CASE("CreateNetworkWithInvalidString")
 {
     auto silencer = google::protobuf::LogSilencer(); //get rid of errors from protobuf
     armnnOnnxParser::IOnnxParserPtr parser(armnnOnnxParser::IOnnxParser::Create());
-    BOOST_CHECK_THROW(parser->CreateNetworkFromString( "I'm not a model so I should raise an error" ),
+    CHECK_THROWS_AS(parser->CreateNetworkFromString( "I'm not a model so I should raise an error" ),
                       armnn::ParseException );
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/DepthConv.cpp b/src/armnnOnnxParser/test/DepthConv.cpp
index fdeb21c..21d99bb 100644
--- a/src/armnnOnnxParser/test/DepthConv.cpp
+++ b/src/armnnOnnxParser/test/DepthConv.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_DepthConv")
+{
 struct SimpleDepthConv2DFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     SimpleDepthConv2DFixture()
@@ -153,10 +152,10 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(ValidDepthConvTest, SimpleDepthConv2DFixture)
+TEST_CASE_FIXTURE(SimpleDepthConv2DFixture, "ValidDepthConvTest")
 {
     RunTest<4>({{"Input", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12}}},
                {{"Output", { 10, 52, 126 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/Flatten.cpp b/src/armnnOnnxParser/test/Flatten.cpp
index 1ba509e..46ac0df 100644
--- a/src/armnnOnnxParser/test/Flatten.cpp
+++ b/src/armnnOnnxParser/test/Flatten.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_Flatter")
+{
 struct FlattenMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     FlattenMainFixture(const std::string& dataType)
@@ -382,7 +381,7 @@
     FlattenInvalidAxisFixture() : FlattenInvalidNegativeAxisFixture("1") { }
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidFlattenTest, FlattenValidFixture)
+TEST_CASE_FIXTURE(FlattenValidFixture, "ValidFlattenTest")
 {
     RunTest<2>({{"Input",
                           { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f,
@@ -394,7 +393,7 @@
                             1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidFlattenDefaultTest, FlattenDefaultValidFixture)
+TEST_CASE_FIXTURE(FlattenDefaultValidFixture, "ValidFlattenDefaultTest")
 {
     RunTest<2>({{"Input",
                     { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f,
@@ -406,7 +405,7 @@
                         1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidFlattenAxisZeroTest, FlattenAxisZeroValidFixture)
+TEST_CASE_FIXTURE(FlattenAxisZeroValidFixture, "ValidFlattenAxisZeroTest")
 {
     RunTest<2>({{"Input",
                     { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f,
@@ -418,7 +417,7 @@
                         1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidFlattenNegativeAxisTest, FlattenNegativeAxisValidFixture)
+TEST_CASE_FIXTURE(FlattenNegativeAxisValidFixture, "ValidFlattenNegativeAxisTest")
 {
     RunTest<2>({{"Input",
                     { 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f,
@@ -430,14 +429,14 @@
                         1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeFlatten, FlattenInvalidFixture)
+TEST_CASE_FIXTURE(FlattenInvalidFixture, "IncorrectDataTypeFlatten")
 {
-    BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+    CHECK_THROWS_AS(Setup(), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(IncorrectAxisFlatten, FlattenInvalidAxisFixture)
+TEST_CASE_FIXTURE(FlattenInvalidAxisFixture, "IncorrectAxisFlatten")
 {
-    BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+    CHECK_THROWS_AS(Setup(), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/FullyConnected.cpp b/src/armnnOnnxParser/test/FullyConnected.cpp
index 133b8a6..53094c1 100644
--- a/src/armnnOnnxParser/test/FullyConnected.cpp
+++ b/src/armnnOnnxParser/test/FullyConnected.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_FullyConnected")
+{
 // A MatMul in isolation, not connected to an add. Should result in a non-biased FullyConnected layer.
 struct MatMulFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
@@ -93,7 +92,7 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(MatMul, MatMulFixture)
+TEST_CASE_FIXTURE(MatMulFixture, "MatMul")
 {
     RunTest<1>({{"Input", { 2 }}}, {{"Output", { 34 }}});
 }
@@ -225,7 +224,7 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(FullyConnected, FullyConnectedFixture)
+TEST_CASE_FIXTURE(FullyConnectedFixture, "FullyConnected")
 {
     RunTest<1>({{"Input", { 3 }}}, {{"Output", { 7 }}});
 }
@@ -430,7 +429,7 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFc, MatMulUsedInTwoFcFixture)
+TEST_CASE_FIXTURE(MatMulUsedInTwoFcFixture, "MatMulUsedInTwoFc")
 {
     RunTest<1>({{"Input", { 3 }}}, {{"Output", { 23 }}});
 }
@@ -593,9 +592,9 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(MatMulUsedInTwoFcStaggered, MatMulUsedInTwoFcStaggeredFixture)
+TEST_CASE_FIXTURE(MatMulUsedInTwoFcStaggeredFixture, "MatMulUsedInTwoFcStaggered")
 {
     RunTest<1>({{"Input", { 3 }}}, {{"Output", { 13 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/GetInputsOutputs.cpp b/src/armnnOnnxParser/test/GetInputsOutputs.cpp
index 5bb3095..ac06cba 100644
--- a/src/armnnOnnxParser/test/GetInputsOutputs.cpp
+++ b/src/armnnOnnxParser/test/GetInputsOutputs.cpp
@@ -2,17 +2,16 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
 #include "../OnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 #include <onnx/onnx.pb.h>
 #include "google/protobuf/stubs/logging.h"
 
-
 using ModelPtr = std::unique_ptr<onnx::ModelProto>;
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_GetInputsOutputs")
+{
 struct GetInputsOutputsMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     explicit GetInputsOutputsMainFixture()
@@ -66,21 +65,21 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(GetInput, GetInputsOutputsMainFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsMainFixture, "GetInput")
 {
     ModelPtr model = armnnOnnxParser::OnnxParserImpl::LoadModelFromString(m_Prototext.c_str());
     std::vector<std::string> tensors = armnnOnnxParser::OnnxParserImpl::GetInputs(model);
-    BOOST_CHECK_EQUAL(1, tensors.size());
-    BOOST_CHECK_EQUAL("Input", tensors[0]);
+    CHECK_EQ(1, tensors.size());
+    CHECK_EQ("Input", tensors[0]);
 
 }
 
-BOOST_FIXTURE_TEST_CASE(GetOutput, GetInputsOutputsMainFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsMainFixture, "GetOutput")
 {
     ModelPtr model = armnnOnnxParser::OnnxParserImpl::LoadModelFromString(m_Prototext.c_str());
     std::vector<std::string> tensors = armnnOnnxParser::OnnxParserImpl::GetOutputs(model);
-    BOOST_CHECK_EQUAL(1, tensors.size());
-    BOOST_CHECK_EQUAL("Output", tensors[0]);
+    CHECK_EQ(1, tensors.size());
+    CHECK_EQ("Output", tensors[0]);
 }
 
 struct GetEmptyInputsOutputsFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
@@ -137,22 +136,22 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(GetEmptyInputs, GetEmptyInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetEmptyInputsOutputsFixture, "GetEmptyInputs")
 {
     ModelPtr model = armnnOnnxParser::OnnxParserImpl::LoadModelFromString(m_Prototext.c_str());
     std::vector<std::string> tensors = armnnOnnxParser::OnnxParserImpl::GetInputs(model);
-    BOOST_CHECK_EQUAL(0, tensors.size());
+    CHECK_EQ(0, tensors.size());
 }
 
-BOOST_AUTO_TEST_CASE(GetInputsNullModel)
+TEST_CASE("GetInputsNullModel")
 {
-    BOOST_CHECK_THROW(armnnOnnxParser::OnnxParserImpl::LoadModelFromString(""), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(armnnOnnxParser::OnnxParserImpl::LoadModelFromString(""), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(GetOutputsNullModel)
+TEST_CASE("GetOutputsNullModel")
 {
     auto silencer = google::protobuf::LogSilencer(); //get rid of errors from protobuf
-    BOOST_CHECK_THROW(armnnOnnxParser::OnnxParserImpl::LoadModelFromString("nknnk"), armnn::ParseException);
+    CHECK_THROWS_AS(armnnOnnxParser::OnnxParserImpl::LoadModelFromString("nknnk"), armnn::ParseException);
 }
 
 struct GetInputsMultipleFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
@@ -241,15 +240,13 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsMultipleFixture)
+TEST_CASE_FIXTURE(GetInputsMultipleFixture, "GetInputsMultipleInputs")
 {
     ModelPtr model = armnnOnnxParser::OnnxParserImpl::LoadModelFromString(m_Prototext.c_str());
     std::vector<std::string> tensors = armnnOnnxParser::OnnxParserImpl::GetInputs(model);
-    BOOST_CHECK_EQUAL(2, tensors.size());
-    BOOST_CHECK_EQUAL("Input0", tensors[0]);
-    BOOST_CHECK_EQUAL("Input1", tensors[1]);
+    CHECK_EQ(2, tensors.size());
+    CHECK_EQ("Input0", tensors[0]);
+    CHECK_EQ("Input1", tensors[1]);
 }
 
-
-
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/Pooling.cpp b/src/armnnOnnxParser/test/Pooling.cpp
index 85bf914..73d113d 100644
--- a/src/armnnOnnxParser/test/Pooling.cpp
+++ b/src/armnnOnnxParser/test/Pooling.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_Pooling")
+{
 struct PoolingMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     PoolingMainFixture(const std::string& dataType, const std::string& op)
@@ -110,7 +109,7 @@
     MaxPoolInvalidFixture() : PoolingMainFixture("10", "\"MaxPool\"") { }
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidMaxPoolTest, MaxPoolValidFixture)
+TEST_CASE_FIXTURE(MaxPoolValidFixture, "ValidMaxPoolTest")
 {
     RunTest<4>({{"Input", {1.0f, 2.0f, 3.0f, -4.0f}}}, {{"Output", {3.0f}}});
 }
@@ -217,12 +216,12 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(AveragePoolValid, AvgPoolValidFixture)
+TEST_CASE_FIXTURE(AvgPoolValidFixture, "AveragePoolValid")
 {
     RunTest<4>({{"Input", {1.0f, 2.0f, 3.0f, -4.0f}}}, {{"Output", {0.5}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidAvgWithPadTest, PoolingWithPadFixture)
+TEST_CASE_FIXTURE(PoolingWithPadFixture, "ValidAvgWithPadTest")
 {
     RunTest<4>({{"Input", {1.0f, 2.0f, 3.0f, -4.0f}}}, {{"Output", {1.0/8.0}}});
 }
@@ -297,14 +296,14 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(GlobalAvgTest, GlobalAvgFixture)
+TEST_CASE_FIXTURE(GlobalAvgFixture, "GlobalAvgTest")
 {
     RunTest<4>({{"Input", {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0}}}, {{"Output", {10/4.0, 26/4.0}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeMaxPool, MaxPoolInvalidFixture)
+TEST_CASE_FIXTURE(MaxPoolInvalidFixture, "IncorrectDataTypeMaxPool")
 {
-   BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+   CHECK_THROWS_AS(Setup(), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/ProtoxtFixture.cpp b/src/armnnOnnxParser/test/ProtoxtFixture.cpp
index 917f6f0..067b440 100644
--- a/src/armnnOnnxParser/test/ProtoxtFixture.cpp
+++ b/src/armnnOnnxParser/test/ProtoxtFixture.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include  "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_PrototxtFixture")
+{
 struct ProtoxtTestFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     ProtoxtTestFixture()
@@ -65,17 +64,17 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(ProtoxtTest, ProtoxtTestFixture)
+TEST_CASE_FIXTURE(ProtoxtTestFixture, "ProtoxtTest")
 {
     //TODO : add a test to check if the inputs and outputs are correctly inferred.
 }
 
-BOOST_FIXTURE_TEST_CASE(ProtoxtTestWithBadInputs, ProtoxtTestFixture)
+TEST_CASE_FIXTURE(ProtoxtTestFixture, "ProtoxtTestWithBadInputs")
 {
 
-   // BOOST_CHECK_THROW(RunTest<4>({{ "InexistantInput" , {0.0, 1.0, 2.0, 3.0}}},
+   // CHECK_THROWS_AS(RunTest<4>({{ "InexistantInput" , {0.0, 1.0, 2.0, 3.0}}},
    //                              {{ "InexistantOutput" , {0.0, 1.0, 2.0, 3.0}}}),
    //                   armnn::InvalidArgumentException );
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/Relu.cpp b/src/armnnOnnxParser/test/Relu.cpp
index a7f3bf3..031872e 100644
--- a/src/armnnOnnxParser/test/Relu.cpp
+++ b/src/armnnOnnxParser/test/Relu.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_Relu")
+{
 struct ReluMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     ReluMainFixture()
@@ -61,10 +60,10 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidReluTest, ReluMainFixture)
+TEST_CASE_FIXTURE(ReluMainFixture, "ValidReluTest")
 {
     RunTest<1>({{"Input",  { -1.0f, -0.5f, 1.25f, -3.0f}}},
                {{ "Output", { 0.0f, 0.0f, 1.25f, 0.0f}}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnOnnxParser/test/Reshape.cpp b/src/armnnOnnxParser/test/Reshape.cpp
index 119a406..e9bcd27 100644
--- a/src/armnnOnnxParser/test/Reshape.cpp
+++ b/src/armnnOnnxParser/test/Reshape.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "armnnOnnxParser/IOnnxParser.hpp"
 #include  "ParserPrototxtFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(OnnxParser)
-
+TEST_SUITE("OnnxParser_Reshape")
+{
 struct ReshapeMainFixture : public armnnUtils::ParserPrototxtFixture<armnnOnnxParser::IOnnxParser>
 {
     ReshapeMainFixture(const std::string& dataType)
@@ -189,12 +188,12 @@
     ReshapeInvalidFixture() : ReshapeMainFixture("10") { }
 };
 
-BOOST_FIXTURE_TEST_CASE(ValidReshapeTest, ReshapeValidFixture)
+TEST_CASE_FIXTURE(ReshapeValidFixture, "ValidReshapeTest")
 {
     RunTest<2>({{"Input", { 0.0f, 1.0f, 2.0f, 3.0f }}}, {{"Output", { 0.0f, 1.0f, 2.0f, 3.0f }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(ValidRank4ReshapeTest, ReshapeValidRank4Fixture)
+TEST_CASE_FIXTURE(ReshapeValidRank4Fixture, "ValidRank4ReshapeTest")
 {
     RunTest<2>(
         {{"Input",
@@ -207,9 +206,9 @@
                      1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f, 1.0f, 2.0f, 3.0f, 4.0f}}});
 }
 
-BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeReshape, ReshapeInvalidFixture)
+TEST_CASE_FIXTURE(ReshapeInvalidFixture, "IncorrectDataTypeReshape")
 {
-   BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+   CHECK_THROWS_AS(Setup(), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnSerializer/test/ActivationSerializationTests.cpp b/src/armnnSerializer/test/ActivationSerializationTests.cpp
index fbe1ae0..fb99e0b 100644
--- a/src/armnnSerializer/test/ActivationSerializationTests.cpp
+++ b/src/armnnSerializer/test/ActivationSerializationTests.cpp
@@ -11,12 +11,12 @@
 #include <armnnDeserializer/IDeserializer.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <sstream>
 
-BOOST_AUTO_TEST_SUITE(SerializerTests)
-
+TEST_SUITE("SerializerTests")
+{
 class VerifyActivationName : public armnn::IStrategy
 {
 public:
@@ -29,12 +29,12 @@
         IgnoreUnused(layer, descriptor, constants, id);
         if (layer->GetType() == armnn::LayerType::Activation)
         {
-            BOOST_TEST(name == "activation");
+            CHECK(std::string(name) == "activation");
         }
     }
 };
 
-BOOST_AUTO_TEST_CASE(ActivationSerialization)
+TEST_CASE("ActivationSerialization")
 {
     armnnDeserializer::IDeserializerPtr parser = armnnDeserializer::IDeserializer::Create();
 
@@ -97,8 +97,7 @@
         {0, armnn::Tensor(run->GetOutputTensorInfo(networkIdentifier, 0), outputData.data())}
     };
     run->EnqueueWorkload(networkIdentifier, inputTensors, outputTensors);
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(),
-    expectedOutputData.begin(), expectedOutputData.end());
+    CHECK(std::equal(outputData.begin(), outputData.end(), expectedOutputData.begin(), expectedOutputData.end()));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnSerializer/test/ComparisonSerializationTests.cpp b/src/armnnSerializer/test/ComparisonSerializationTests.cpp
index 3aee9a7..88778b3 100644
--- a/src/armnnSerializer/test/ComparisonSerializationTests.cpp
+++ b/src/armnnSerializer/test/ComparisonSerializationTests.cpp
@@ -12,11 +12,10 @@
 #include <armnnDeserializer/IDeserializer.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-
-BOOST_AUTO_TEST_SUITE(SerializerTests)
-
+TEST_SUITE("SerializerTests")
+{
 struct ComparisonModel
 {
     ComparisonModel(const std::string& layerName,
@@ -68,7 +67,7 @@
                 VerifyNameAndConnections(layer, name);
                 const armnn::ComparisonDescriptor& layerDescriptor =
                         static_cast<const armnn::ComparisonDescriptor&>(descriptor);
-                BOOST_CHECK(layerDescriptor.m_Operation == m_Descriptor.m_Operation);
+                CHECK(layerDescriptor.m_Operation == m_Descriptor.m_Operation);
                 break;
             }
             default:
@@ -82,7 +81,7 @@
     armnn::ComparisonDescriptor m_Descriptor;
 };
 
-BOOST_AUTO_TEST_CASE(SerializeEqual)
+TEST_CASE("SerializeEqual")
 {
     const std::string layerName("equal");
 
@@ -95,13 +94,13 @@
     ComparisonModel model(layerName, inputInfo, outputInfo, descriptor);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*model.m_network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     ComparisonLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeGreater)
+TEST_CASE("SerializeGreater")
 {
     const std::string layerName("greater");
 
@@ -114,10 +113,10 @@
     ComparisonModel model(layerName, inputInfo, outputInfo, descriptor);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*model.m_network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     ComparisonLayerVerifier verifier(layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp
index 4705c0b..c2bc873 100644
--- a/src/armnnSerializer/test/LstmSerializationTests.cpp
+++ b/src/armnnSerializer/test/LstmSerializationTests.cpp
@@ -14,13 +14,12 @@
 #include <armnn/LstmParams.hpp>
 #include <armnn/QuantizedLstmParams.hpp>
 
-#include <boost/test/unit_test.hpp>
-
+#include <doctest/doctest.h>
 #include <fmt/format.h>
 
 
-BOOST_AUTO_TEST_SUITE(SerializerTests)
-
+TEST_SUITE("SerializerTests")
+{
 template<typename Descriptor>
 armnn::LstmInputParams ConstantVector2LstmInputParams(const std::vector<armnn::ConstTensor>& constants,
                                                       Descriptor& descriptor)
@@ -175,7 +174,7 @@
     armnn::LstmInputParams m_InputParams;
 };
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmCifgPeepholeNoProjection)
+TEST_CASE("SerializeDeserializeLstmCifgPeepholeNoProjection")
 {
     armnn::LstmDescriptor descriptor;
     descriptor.m_ActivationFunc = 4;
@@ -278,7 +277,7 @@
     lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     VerifyLstmLayer<armnn::LstmDescriptor> checker(
         layerName,
@@ -289,7 +288,7 @@
     deserializedNetwork->ExecuteStrategy(checker);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeAndProjection)
+TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeAndProjection")
 {
     armnn::LstmDescriptor descriptor;
     descriptor.m_ActivationFunc = 4;
@@ -424,7 +423,7 @@
     lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     VerifyLstmLayer<armnn::LstmDescriptor> checker(
         layerName,
@@ -435,7 +434,7 @@
     deserializedNetwork->ExecuteStrategy(checker);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm)
+TEST_CASE("SerializeDeserializeLstmNoCifgWithPeepholeWithProjectionWithLayerNorm")
 {
     armnn::LstmDescriptor descriptor;
     descriptor.m_ActivationFunc = 4;
@@ -589,7 +588,7 @@
     lstmLayer->GetOutputSlot(3).SetTensorInfo(outputStateTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     VerifyLstmLayer<armnn::LstmDescriptor> checker(
             layerName,
@@ -600,7 +599,7 @@
     deserializedNetwork->ExecuteStrategy(checker);
 }
 
-BOOST_AUTO_TEST_CASE(EnsureLstmLayersBackwardCompatibility)
+TEST_CASE("EnsureLstmLayersBackwardCompatibility")
 {
     // The hex data below is a flat buffer containing a lstm layer with no Cifg, with peephole and projection
     // enabled. That data was obtained before additional layer normalization parameters where added to the
@@ -1220,7 +1219,7 @@
         DeserializeNetwork(std::string(lstmNoCifgWithPeepholeAndProjectionModel.begin(),
                                        lstmNoCifgWithPeepholeAndProjectionModel.end()));
 
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     // generating the same model parameters which where used to serialize the model (Layer norm is not specified)
     armnn::LstmDescriptor descriptor;
@@ -1428,7 +1427,7 @@
     armnn::QuantizedLstmInputParams m_InputParams;
 };
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeQuantizedLstm)
+TEST_CASE("SerializeDeserializeQuantizedLstm")
 {
     const uint32_t batchSize = 1;
     const uint32_t inputSize = 2;
@@ -1600,7 +1599,7 @@
     quantizedLstmLayer->GetOutputSlot(1).SetTensorInfo(outputStateTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     VerifyQuantizedLstmLayer checker(layerName,
                                      {inputTensorInfo, cellStateTensorInfo, outputStateTensorInfo},
@@ -1610,7 +1609,7 @@
     deserializedNetwork->ExecuteStrategy(checker);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmBasic)
+TEST_CASE("SerializeDeserializeQLstmBasic")
 {
     armnn::QLstmDescriptor descriptor;
 
@@ -1755,7 +1754,7 @@
     qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     VerifyLstmLayer<armnn::QLstmDescriptor> checker(
             layerName,
@@ -1767,7 +1766,7 @@
     deserializedNetwork->ExecuteStrategy(checker);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmCifgLayerNorm)
+TEST_CASE("SerializeDeserializeQLstmCifgLayerNorm")
 {
     armnn::QLstmDescriptor descriptor;
 
@@ -1944,7 +1943,7 @@
     qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     VerifyLstmLayer<armnn::QLstmDescriptor> checker(layerName,
                                                     {inputInfo, cellStateInfo, outputStateInfo},
@@ -1955,7 +1954,7 @@
     deserializedNetwork->ExecuteStrategy(checker);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeQLstmAdvanced)
+TEST_CASE("SerializeDeserializeQLstmAdvanced")
 {
     armnn::QLstmDescriptor descriptor;
 
@@ -2185,7 +2184,7 @@
     qLstmLayer->GetOutputSlot(2).SetTensorInfo(outputStateInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     VerifyLstmLayer<armnn::QLstmDescriptor> checker(layerName,
                                                     {inputInfo, cellStateInfo, outputStateInfo},
@@ -2196,4 +2195,4 @@
     deserializedNetwork->ExecuteStrategy(checker);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnSerializer/test/SerializerTestUtils.cpp b/src/armnnSerializer/test/SerializerTestUtils.cpp
index 586d2a0..caa1157 100644
--- a/src/armnnSerializer/test/SerializerTestUtils.cpp
+++ b/src/armnnSerializer/test/SerializerTestUtils.cpp
@@ -6,6 +6,8 @@
 #include "SerializerTestUtils.hpp"
 #include "../Serializer.hpp"
 
+#include <doctest/doctest.h>
+
 using armnnDeserializer::IDeserializer;
 
 LayerVerifierBase::LayerVerifierBase(const std::string& layerName,
@@ -37,34 +39,33 @@
 
 void LayerVerifierBase::VerifyNameAndConnections(const armnn::IConnectableLayer* layer, const char* name)
 {
-    BOOST_TEST(name == m_LayerName.c_str());
+    CHECK(std::string(name) == m_LayerName.c_str());
 
-    BOOST_TEST(layer->GetNumInputSlots() == m_InputTensorInfos.size());
-    BOOST_TEST(layer->GetNumOutputSlots() == m_OutputTensorInfos.size());
+    CHECK(layer->GetNumInputSlots() == m_InputTensorInfos.size());
+    CHECK(layer->GetNumOutputSlots() == m_OutputTensorInfos.size());
 
     for (unsigned int i = 0; i < m_InputTensorInfos.size(); i++)
     {
         const armnn::IOutputSlot* connectedOutput = layer->GetInputSlot(i).GetConnection();
-        BOOST_CHECK(connectedOutput);
+        CHECK(connectedOutput);
 
         const armnn::TensorInfo& connectedInfo = connectedOutput->GetTensorInfo();
-        BOOST_TEST(connectedInfo.GetShape() == m_InputTensorInfos[i].GetShape());
-        BOOST_TEST(
+        CHECK(connectedInfo.GetShape() == m_InputTensorInfos[i].GetShape());
+        CHECK(
             GetDataTypeName(connectedInfo.GetDataType()) == GetDataTypeName(m_InputTensorInfos[i].GetDataType()));
 
-        BOOST_TEST(connectedInfo.GetQuantizationScale() == m_InputTensorInfos[i].GetQuantizationScale());
-        BOOST_TEST(connectedInfo.GetQuantizationOffset() == m_InputTensorInfos[i].GetQuantizationOffset());
+        CHECK(connectedInfo.GetQuantizationScale() == m_InputTensorInfos[i].GetQuantizationScale());
+        CHECK(connectedInfo.GetQuantizationOffset() == m_InputTensorInfos[i].GetQuantizationOffset());
     }
 
     for (unsigned int i = 0; i < m_OutputTensorInfos.size(); i++)
     {
         const armnn::TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
-        BOOST_TEST(outputInfo.GetShape() == m_OutputTensorInfos[i].GetShape());
-        BOOST_TEST(
-            GetDataTypeName(outputInfo.GetDataType()) == GetDataTypeName(m_OutputTensorInfos[i].GetDataType()));
+        CHECK(outputInfo.GetShape() == m_OutputTensorInfos[i].GetShape());
+        CHECK(GetDataTypeName(outputInfo.GetDataType()) == GetDataTypeName(m_OutputTensorInfos[i].GetDataType()));
 
-        BOOST_TEST(outputInfo.GetQuantizationScale() == m_OutputTensorInfos[i].GetQuantizationScale());
-        BOOST_TEST(outputInfo.GetQuantizationOffset() == m_OutputTensorInfos[i].GetQuantizationOffset());
+        CHECK(outputInfo.GetQuantizationScale() == m_OutputTensorInfos[i].GetQuantizationScale());
+        CHECK(outputInfo.GetQuantizationOffset() == m_OutputTensorInfos[i].GetQuantizationOffset());
     }
 }
 
@@ -74,23 +75,23 @@
 {
     if (expectedPtr == nullptr)
     {
-        BOOST_CHECK_MESSAGE(actualPtr == nullptr, tensorName + " should not exist");
+        CHECK_MESSAGE(actualPtr == nullptr, tensorName + " should not exist");
     }
     else
     {
-        BOOST_CHECK_MESSAGE(actualPtr != nullptr, tensorName + " should have been set");
+        CHECK_MESSAGE(actualPtr != nullptr, tensorName + " should have been set");
         if (actualPtr != nullptr)
         {
             const armnn::TensorInfo& expectedInfo = expectedPtr->GetInfo();
             const armnn::TensorInfo& actualInfo = actualPtr->GetInfo();
 
-            BOOST_CHECK_MESSAGE(expectedInfo.GetShape() == actualInfo.GetShape(),
+            CHECK_MESSAGE(expectedInfo.GetShape() == actualInfo.GetShape(),
                                 tensorName + " shapes don't match");
-            BOOST_CHECK_MESSAGE(
+            CHECK_MESSAGE(
                     GetDataTypeName(expectedInfo.GetDataType()) == GetDataTypeName(actualInfo.GetDataType()),
                     tensorName + " data types don't match");
 
-            BOOST_CHECK_MESSAGE(expectedPtr->GetNumBytes() == actualPtr->GetNumBytes(),
+            CHECK_MESSAGE(expectedPtr->GetNumBytes() == actualPtr->GetNumBytes(),
                                 tensorName + " (GetNumBytes) data sizes do not match");
             if (expectedPtr->GetNumBytes() == actualPtr->GetNumBytes())
             {
@@ -106,7 +107,7 @@
                         break;
                     }
                 }
-                BOOST_CHECK_MESSAGE(same, tensorName + " data does not match");
+                CHECK_MESSAGE(same, tensorName + " data does not match");
             }
         }
     }
@@ -114,8 +115,8 @@
 
 void CompareConstTensor(const armnn::ConstTensor& tensor1, const armnn::ConstTensor& tensor2)
 {
-    BOOST_TEST(tensor1.GetShape() == tensor2.GetShape());
-    BOOST_TEST(GetDataTypeName(tensor1.GetDataType()) == GetDataTypeName(tensor2.GetDataType()));
+    CHECK(tensor1.GetShape() == tensor2.GetShape());
+    CHECK(GetDataTypeName(tensor1.GetDataType()) == GetDataTypeName(tensor2.GetDataType()));
 
     switch (tensor1.GetDataType())
     {
@@ -138,8 +139,8 @@
             break;
         default:
             // Note that Float16 is not yet implemented
-            BOOST_TEST_MESSAGE("Unexpected datatype");
-            BOOST_TEST(false);
+            MESSAGE("Unexpected datatype");
+            CHECK(false);
     }
 }
 
diff --git a/src/armnnSerializer/test/SerializerTestUtils.hpp b/src/armnnSerializer/test/SerializerTestUtils.hpp
index e085d2e..c6f148b 100644
--- a/src/armnnSerializer/test/SerializerTestUtils.hpp
+++ b/src/armnnSerializer/test/SerializerTestUtils.hpp
@@ -12,8 +12,8 @@
 #include <random>
 #include <vector>
 
-#include <boost/test/unit_test.hpp>
-
+#include <cstdlib>
+#include <doctest/doctest.h>
 
 armnn::INetworkPtr DeserializeNetwork(const std::string& serializerString);
 
@@ -82,7 +82,7 @@
 protected:
     void VerifyDescriptor(const Descriptor& descriptor)
     {
-        BOOST_CHECK(descriptor == m_Descriptor);
+        CHECK(descriptor == m_Descriptor);
     }
 
     Descriptor m_Descriptor;
@@ -93,12 +93,12 @@
 {
     T typedData1 = static_cast<T>(data1);
     T typedData2 = static_cast<T>(data2);
-    BOOST_CHECK(typedData1);
-    BOOST_CHECK(typedData2);
+    CHECK(typedData1);
+    CHECK(typedData2);
 
     for (unsigned int i = 0; i < numElements; i++)
     {
-        BOOST_TEST(typedData1[i] == typedData2[i]);
+        CHECK(typedData1[i] == typedData2[i]);
     }
 }
 
@@ -161,7 +161,7 @@
     static std::default_random_engine generator;
 
     std::vector<DataType> randomData(size);
-    std::generate(randomData.begin(), randomData.end(), []() { return distribution(generator); });
+    generate(randomData.begin(), randomData.end(), []() { return distribution(generator); });
 
     return randomData;
 }
\ No newline at end of file
diff --git a/src/armnnSerializer/test/SerializerTests.cpp b/src/armnnSerializer/test/SerializerTests.cpp
index 3d6dd58..4cb1a81 100644
--- a/src/armnnSerializer/test/SerializerTests.cpp
+++ b/src/armnnSerializer/test/SerializerTests.cpp
@@ -17,13 +17,13 @@
 #include <random>
 #include <vector>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using armnnDeserializer::IDeserializer;
 
-BOOST_AUTO_TEST_SUITE(SerializerTests)
-
-BOOST_AUTO_TEST_CASE(SerializeAbs)
+TEST_SUITE("SerializerTests")
+{
+TEST_CASE("SerializeAbs")
 {
     const std::string layerName("abs");
     const armnn::TensorInfo tensorInfo({1, 2, 3}, armnn::DataType::Float32);
@@ -43,13 +43,13 @@
     absLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {tensorInfo}, {tensorInfo});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeAddition)
+TEST_CASE("SerializeAddition")
 {
     const std::string layerName("addition");
     const armnn::TensorInfo tensorInfo({1, 2, 3}, armnn::DataType::Float32);
@@ -70,7 +70,7 @@
 
     std::string serializedNetwork = SerializeNetwork(*network);
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(serializedNetwork);
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {tensorInfo, tensorInfo}, {tensorInfo});
     deserializedNetwork->ExecuteStrategy(verifier);
@@ -98,7 +98,7 @@
     argMinMaxLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::ArgMinMaxDescriptor> verifier(layerName,
                                                                          {inputInfo},
@@ -107,17 +107,17 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeArgMinMaxSigned32)
+TEST_CASE("SerializeArgMinMaxSigned32")
 {
     SerializeArgMinMaxTest(armnn::DataType::Signed32);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeArgMinMaxSigned64)
+TEST_CASE("SerializeArgMinMaxSigned64")
 {
     SerializeArgMinMaxTest(armnn::DataType::Signed64);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeBatchNormalization)
+TEST_CASE("SerializeBatchNormalization")
 {
     const std::string layerName("batchNormalization");
     const armnn::TensorInfo inputInfo ({ 1, 3, 3, 1 }, armnn::DataType::Float32);
@@ -161,14 +161,14 @@
     batchNormalizationLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptorAndConstants<armnn::BatchNormalizationDescriptor> verifier(
         layerName, {inputInfo}, {outputInfo}, descriptor, constants);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeBatchToSpaceNd)
+TEST_CASE("SerializeBatchToSpaceNd")
 {
     const std::string layerName("spaceToBatchNd");
     const armnn::TensorInfo inputInfo({4, 1, 2, 2}, armnn::DataType::Float32);
@@ -191,7 +191,7 @@
     batchToSpaceNdLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::BatchToSpaceNdDescriptor> verifier(layerName,
                                                                               {inputInfo},
@@ -200,7 +200,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeCast)
+TEST_CASE("SerializeCast")
 {
         const std::string layerName("cast");
 
@@ -221,13 +221,13 @@
         castLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
         armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-        BOOST_CHECK(deserializedNetwork);
+        CHECK(deserializedNetwork);
 
         LayerVerifierBase verifier(layerName, {inputInfo}, {outputInfo});
         deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeComparison)
+TEST_CASE("SerializeComparison")
 {
     const std::string layerName("comparison");
 
@@ -253,7 +253,7 @@
     comparisonLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::ComparisonDescriptor> verifier(layerName,
                                                                           { inputInfo, inputInfo },
@@ -262,7 +262,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeConstant)
+TEST_CASE("SerializeConstant")
 {
     class ConstantLayerVerifier : public LayerVerifierBase
     {
@@ -324,13 +324,13 @@
     add->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     ConstantLayerVerifier verifier(layerName, {}, {info}, {constTensor});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeConvolution2d)
+TEST_CASE("SerializeConvolution2d")
 {
     const std::string layerName("convolution2d");
     const armnn::TensorInfo inputInfo ({ 1, 5, 5, 1 }, armnn::DataType::Float32);
@@ -373,7 +373,7 @@
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const std::vector<armnn::ConstTensor>& constants {weights, biases};
     LayerVerifierBaseWithDescriptorAndConstants<armnn::Convolution2dDescriptor> verifier(
@@ -381,7 +381,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeConvolution2dWithPerAxisParams)
+TEST_CASE("SerializeConvolution2dWithPerAxisParams")
 {
     using namespace armnn;
 
@@ -428,7 +428,7 @@
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const std::vector<armnn::ConstTensor>& constants {weights, biases};
     LayerVerifierBaseWithDescriptorAndConstants<Convolution2dDescriptor> verifier(
@@ -436,7 +436,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDepthToSpace)
+TEST_CASE("SerializeDepthToSpace")
 {
     const std::string layerName("depthToSpace");
 
@@ -459,13 +459,13 @@
     depthToSpaceLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::DepthToSpaceDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2d)
+TEST_CASE("SerializeDepthwiseConvolution2d")
 {
     const std::string layerName("depwiseConvolution2d");
     const armnn::TensorInfo inputInfo ({ 1, 5, 5, 3 }, armnn::DataType::Float32);
@@ -508,7 +508,7 @@
     depthwiseConvLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const std::vector<armnn::ConstTensor>& constants {weights, biases};
     LayerVerifierBaseWithDescriptorAndConstants<armnn::DepthwiseConvolution2dDescriptor> verifier(
@@ -516,7 +516,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDepthwiseConvolution2dWithPerAxisParams)
+TEST_CASE("SerializeDepthwiseConvolution2dWithPerAxisParams")
 {
     using namespace armnn;
 
@@ -565,7 +565,7 @@
     depthwiseConvLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const std::vector<armnn::ConstTensor>& constants {weights, biases};
     LayerVerifierBaseWithDescriptorAndConstants<armnn::DepthwiseConvolution2dDescriptor> verifier(
@@ -573,7 +573,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDequantize)
+TEST_CASE("SerializeDequantize")
 {
     const std::string layerName("dequantize");
     const armnn::TensorInfo inputInfo({ 1, 5, 2, 3 }, armnn::DataType::QAsymmU8, 0.5f, 1);
@@ -591,13 +591,13 @@
     dequantizeLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {inputInfo}, {outputInfo});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeDetectionPostProcess)
+TEST_CASE("SerializeDeserializeDetectionPostProcess")
 {
     const std::string layerName("detectionPostProcess");
 
@@ -656,7 +656,7 @@
     }
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const std::vector<armnn::ConstTensor>& constants {anchors};
     LayerVerifierBaseWithDescriptorAndConstants<armnn::DetectionPostProcessDescriptor> verifier(
@@ -664,7 +664,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDivision)
+TEST_CASE("SerializeDivision")
 {
     const std::string layerName("division");
     const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
@@ -684,13 +684,13 @@
     divisionLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {info, info}, {info});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeEqual)
+TEST_CASE("SerializeDeserializeEqual")
 {
     const std::string layerName("EqualLayer");
     const armnn::TensorInfo inputTensorInfo1 = armnn::TensorInfo({2, 1, 2, 4}, armnn::DataType::Float32);
@@ -713,13 +713,13 @@
     equalLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {inputTensorInfo1, inputTensorInfo2}, {outputTensorInfo});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeFill)
+TEST_CASE("SerializeFill")
 {
     const std::string layerName("fill");
     const armnn::TensorInfo inputInfo({4}, armnn::DataType::Signed32);
@@ -739,14 +739,14 @@
     fillLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::FillDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
 
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeFloor)
+TEST_CASE("SerializeFloor")
 {
     const std::string layerName("floor");
     const armnn::TensorInfo info({4,4}, armnn::DataType::Float32);
@@ -763,13 +763,13 @@
     floorLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {info}, {info});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeFullyConnected)
+TEST_CASE("SerializeFullyConnected")
 {
     const std::string layerName("fullyConnected");
     const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32);
@@ -803,7 +803,7 @@
     fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const std::vector<armnn::ConstTensor> constants {weights, biases};
     LayerVerifierBaseWithDescriptorAndConstants<armnn::FullyConnectedDescriptor> verifier(
@@ -811,7 +811,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeFullyConnectedWeightsAsInputs)
+TEST_CASE("SerializeFullyConnectedWeightsAsInputs")
 {
     const std::string layerName("fullyConnected_weights_as_inputs");
     const armnn::TensorInfo inputInfo ({ 2, 5, 1, 1 }, armnn::DataType::Float32);
@@ -850,7 +850,7 @@
     fullyConnectedLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const std::vector<armnn::ConstTensor> constants {};
     LayerVerifierBaseWithDescriptorAndConstants<armnn::FullyConnectedDescriptor> verifier(
@@ -858,7 +858,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeGather)
+TEST_CASE("SerializeGather")
 {
     using GatherDescriptor = armnn::GatherDescriptor;
     class GatherLayerVerifier : public LayerVerifierBaseWithDescriptor<GatherDescriptor>
@@ -886,7 +886,7 @@
                 {
                     VerifyNameAndConnections(layer, name);
                     const GatherDescriptor& layerDescriptor = static_cast<const GatherDescriptor&>(descriptor);
-                    BOOST_CHECK(layerDescriptor.m_Axis == m_Descriptor.m_Axis);
+                    CHECK(layerDescriptor.m_Axis == m_Descriptor.m_Axis);
                 }
             }
         }
@@ -922,7 +922,7 @@
     gatherLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     GatherLayerVerifier verifier(layerName, {paramsInfo, indicesInfo}, {outputInfo}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
@@ -932,7 +932,7 @@
 // NOTE: Until the deprecated AddGreaterLayer disappears this test checks that calling
 //       AddGreaterLayer places a ComparisonLayer into the serialized format and that
 //       when this deserialises we have a ComparisonLayer
-BOOST_AUTO_TEST_CASE(SerializeGreaterDeprecated)
+TEST_CASE("SerializeGreaterDeprecated")
 {
     const std::string layerName("greater");
 
@@ -958,14 +958,14 @@
     equalLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, { inputInfo, inputInfo }, { outputInfo });
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
 
-BOOST_AUTO_TEST_CASE(SerializeInstanceNormalization)
+TEST_CASE("SerializeInstanceNormalization")
 {
     const std::string layerName("instanceNormalization");
     const armnn::TensorInfo info({ 1, 2, 1, 5 }, armnn::DataType::Float32);
@@ -989,14 +989,14 @@
     instanceNormLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::InstanceNormalizationDescriptor> verifier(
             layerName, {info}, {info}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeL2Normalization)
+TEST_CASE("SerializeL2Normalization")
 {
     const std::string l2NormLayerName("l2Normalization");
     const armnn::TensorInfo info({1, 2, 1, 5}, armnn::DataType::Float32);
@@ -1017,14 +1017,14 @@
     l2NormLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::L2NormalizationDescriptor> verifier(
             l2NormLayerName, {info}, {info}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(EnsureL2NormalizationBackwardCompatibility)
+TEST_CASE("EnsureL2NormalizationBackwardCompatibility")
 {
     // The hex data below is a flat buffer containing a simple network with one input
     // a L2Normalization layer and an output layer with dimensions as per the tensor infos below.
@@ -1066,7 +1066,7 @@
 
     armnn::INetworkPtr deserializedNetwork =
         DeserializeNetwork(std::string(l2NormalizationModel.begin(), l2NormalizationModel.end()));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const std::string layerName("l2Normalization");
     const armnn::TensorInfo inputInfo = armnn::TensorInfo({1, 2, 1, 5}, armnn::DataType::Float32);
@@ -1081,7 +1081,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeLogicalBinary)
+TEST_CASE("SerializeLogicalBinary")
 {
     const std::string layerName("logicalBinaryAnd");
 
@@ -1107,14 +1107,14 @@
     logicalBinaryLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::LogicalBinaryDescriptor> verifier(
             layerName, { inputInfo, inputInfo }, { outputInfo }, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeLogicalUnary)
+TEST_CASE("SerializeLogicalUnary")
 {
     const std::string layerName("elementwiseUnaryLogicalNot");
 
@@ -1139,7 +1139,7 @@
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
 
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::ElementwiseUnaryDescriptor> verifier(
             layerName, { inputInfo }, { outputInfo }, descriptor);
@@ -1147,7 +1147,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeLogSoftmax)
+TEST_CASE("SerializeLogSoftmax")
 {
     const std::string layerName("log_softmax");
     const armnn::TensorInfo info({1, 10}, armnn::DataType::Float32);
@@ -1168,13 +1168,13 @@
     logSoftmaxLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::LogSoftmaxDescriptor> verifier(layerName, {info}, {info}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeMaximum)
+TEST_CASE("SerializeMaximum")
 {
     const std::string layerName("maximum");
     const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32);
@@ -1194,13 +1194,13 @@
     maximumLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {info, info}, {info});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeMean)
+TEST_CASE("SerializeMean")
 {
     const std::string layerName("mean");
     const armnn::TensorInfo inputInfo({1, 1, 3, 2}, armnn::DataType::Float32);
@@ -1222,13 +1222,13 @@
     meanLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::MeanDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeMerge)
+TEST_CASE("SerializeMerge")
 {
     const std::string layerName("merge");
     const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32);
@@ -1248,7 +1248,7 @@
     mergeLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {info, info}, {info});
     deserializedNetwork->ExecuteStrategy(verifier);
@@ -1298,7 +1298,7 @@
 // NOTE: Until the deprecated AddMergerLayer disappears this test checks that calling
 //       AddMergerLayer places a ConcatLayer into the serialized format and that
 //       when this deserialises we have a ConcatLayer
-BOOST_AUTO_TEST_CASE(SerializeMerger)
+TEST_CASE("SerializeMerger")
 {
     const std::string layerName("merger");
     const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
@@ -1327,13 +1327,13 @@
 
     std::string mergerLayerNetwork = SerializeNetwork(*network);
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(mergerLayerNetwork);
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     MergerLayerVerifier verifier(layerName, {inputInfo, inputInfo}, {outputInfo}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(EnsureMergerLayerBackwardCompatibility)
+TEST_CASE("EnsureMergerLayerBackwardCompatibility")
 {
     // The hex data below is a flat buffer containing a simple network with two inputs
     // a merger layer (now deprecated) and an output layer with dimensions as per the tensor infos below.
@@ -1388,7 +1388,7 @@
     };
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(mergerModel.begin(), mergerModel.end()));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const armnn::TensorInfo inputInfo  = armnn::TensorInfo({ 2, 3, 2, 2 }, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 4, 3, 2, 2 }, armnn::DataType::Float32);
@@ -1402,7 +1402,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeConcat)
+TEST_CASE("SerializeConcat")
 {
     const std::string layerName("concat");
     const armnn::TensorInfo inputInfo = armnn::TensorInfo({2, 3, 2, 2}, armnn::DataType::Float32);
@@ -1429,7 +1429,7 @@
 
     std::string concatLayerNetwork = SerializeNetwork(*network);
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(concatLayerNetwork);
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     // NOTE: using the MergerLayerVerifier to ensure that it is a concat layer and not a
     //       merger layer that gets placed into the graph.
@@ -1437,7 +1437,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeMinimum)
+TEST_CASE("SerializeMinimum")
 {
     const std::string layerName("minimum");
     const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32);
@@ -1457,13 +1457,13 @@
     minimumLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {info, info}, {info});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeMultiplication)
+TEST_CASE("SerializeMultiplication")
 {
     const std::string layerName("multiplication");
     const armnn::TensorInfo info({ 1, 5, 2, 3 }, armnn::DataType::Float32);
@@ -1483,13 +1483,13 @@
     multiplicationLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {info, info}, {info});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializePrelu)
+TEST_CASE("SerializePrelu")
 {
     const std::string layerName("prelu");
 
@@ -1512,13 +1512,13 @@
     preluLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {inputTensorInfo, alphaTensorInfo}, {outputTensorInfo});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeNormalization)
+TEST_CASE("SerializeNormalization")
 {
     const std::string layerName("normalization");
     const armnn::TensorInfo info({2, 1, 2, 2}, armnn::DataType::Float32);
@@ -1542,13 +1542,13 @@
     normalizationLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::NormalizationDescriptor> verifier(layerName, {info}, {info}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializePad)
+TEST_CASE("SerializePad")
 {
     const std::string layerName("pad");
     const armnn::TensorInfo inputTensorInfo = armnn::TensorInfo({1, 2, 3, 4}, armnn::DataType::Float32);
@@ -1568,7 +1568,7 @@
     padLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::PadDescriptor> verifier(layerName,
                                                                    {inputTensorInfo},
@@ -1577,7 +1577,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(EnsurePadBackwardCompatibility)
+TEST_CASE("EnsurePadBackwardCompatibility")
 {
     // The PadDescriptor is being extended with a float PadValue (so a value other than 0
     // can be used to pad the tensor.
@@ -1620,7 +1620,7 @@
     };
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(std::string(padModel.begin(), padModel.end()));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const armnn::TensorInfo inputInfo  = armnn::TensorInfo({ 1, 2, 3, 4 }, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo = armnn::TensorInfo({ 1, 3, 5, 7 }, armnn::DataType::Float32);
@@ -1631,7 +1631,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializePermute)
+TEST_CASE("SerializePermute")
 {
     const std::string layerName("permute");
     const armnn::TensorInfo inputTensorInfo({4, 3, 2, 1}, armnn::DataType::Float32);
@@ -1651,14 +1651,14 @@
     permuteLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::PermuteDescriptor> verifier(
             layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializePooling2d)
+TEST_CASE("SerializePooling2d")
 {
     const std::string layerName("pooling2d");
     const armnn::TensorInfo inputInfo({1, 2, 2, 1}, armnn::DataType::Float32);
@@ -1690,14 +1690,14 @@
     pooling2dLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::Pooling2dDescriptor> verifier(
             layerName, {inputInfo}, {outputInfo}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeQuantize)
+TEST_CASE("SerializeQuantize")
 {
     const std::string layerName("quantize");
     const armnn::TensorInfo info({ 1, 2, 2, 3 }, armnn::DataType::Float32);
@@ -1714,13 +1714,13 @@
     quantizeLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {info}, {info});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeRank)
+TEST_CASE("SerializeRank")
 {
     const std::string layerName("rank");
     const armnn::TensorInfo inputInfo({1, 9}, armnn::DataType::Float32);
@@ -1738,13 +1738,13 @@
     rankLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {inputInfo}, {outputInfo});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeReduceSum)
+TEST_CASE("SerializeReduceSum")
 {
     const std::string layerName("Reduce_Sum");
     const armnn::TensorInfo inputInfo({1, 1, 3, 2}, armnn::DataType::Float32);
@@ -1766,13 +1766,13 @@
     reduceSumLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::ReduceDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeReshape)
+TEST_CASE("SerializeReshape")
 {
     const std::string layerName("reshape");
     const armnn::TensorInfo inputInfo({1, 9}, armnn::DataType::Float32);
@@ -1792,14 +1792,14 @@
     reshapeLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::ReshapeDescriptor> verifier(
             layerName, {inputInfo}, {outputInfo}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeResize)
+TEST_CASE("SerializeResize")
 {
     const std::string layerName("resize");
     const armnn::TensorInfo inputInfo  = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
@@ -1824,7 +1824,7 @@
     resizeLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::ResizeDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
@@ -1856,12 +1856,12 @@
                 VerifyNameAndConnections(layer, name);
                 const armnn::ResizeDescriptor& layerDescriptor =
                         static_cast<const armnn::ResizeDescriptor&>(descriptor);
-                BOOST_CHECK(layerDescriptor.m_Method             == armnn::ResizeMethod::Bilinear);
-                BOOST_CHECK(layerDescriptor.m_TargetWidth        == m_Descriptor.m_TargetWidth);
-                BOOST_CHECK(layerDescriptor.m_TargetHeight       == m_Descriptor.m_TargetHeight);
-                BOOST_CHECK(layerDescriptor.m_DataLayout         == m_Descriptor.m_DataLayout);
-                BOOST_CHECK(layerDescriptor.m_AlignCorners       == m_Descriptor.m_AlignCorners);
-                BOOST_CHECK(layerDescriptor.m_HalfPixelCenters   == m_Descriptor.m_HalfPixelCenters);
+                CHECK(layerDescriptor.m_Method             == armnn::ResizeMethod::Bilinear);
+                CHECK(layerDescriptor.m_TargetWidth        == m_Descriptor.m_TargetWidth);
+                CHECK(layerDescriptor.m_TargetHeight       == m_Descriptor.m_TargetHeight);
+                CHECK(layerDescriptor.m_DataLayout         == m_Descriptor.m_DataLayout);
+                CHECK(layerDescriptor.m_AlignCorners       == m_Descriptor.m_AlignCorners);
+                CHECK(layerDescriptor.m_HalfPixelCenters   == m_Descriptor.m_HalfPixelCenters);
                 break;
             }
             default:
@@ -1876,7 +1876,7 @@
 // NOTE: Until the deprecated AddResizeBilinearLayer disappears this test checks that
 //       calling AddResizeBilinearLayer places a ResizeLayer into the serialized format
 //       and that when this deserialises we have a ResizeLayer
-BOOST_AUTO_TEST_CASE(SerializeResizeBilinear)
+TEST_CASE("SerializeResizeBilinear")
 {
     const std::string layerName("resizeBilinear");
     const armnn::TensorInfo inputInfo  = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
@@ -1902,13 +1902,13 @@
     resizeLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     ResizeBilinearLayerVerifier verifier(layerName, {inputInfo}, {outputInfo}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(EnsureResizeBilinearBackwardCompatibility)
+TEST_CASE("EnsureResizeBilinearBackwardCompatibility")
 {
     // The hex data below is a flat buffer containing a simple network with an input,
     // a ResizeBilinearLayer (now deprecated) and an output
@@ -1951,7 +1951,7 @@
 
     armnn::INetworkPtr deserializedNetwork =
         DeserializeNetwork(std::string(resizeBilinearModel.begin(), resizeBilinearModel.end()));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const armnn::TensorInfo inputInfo  = armnn::TensorInfo({1, 3, 5, 5}, armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo = armnn::TensorInfo({1, 3, 2, 4}, armnn::DataType::Float32);
@@ -1964,7 +1964,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeSlice)
+TEST_CASE("SerializeSlice")
 {
     const std::string layerName{"slice"};
 
@@ -1986,13 +1986,13 @@
     sliceLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::SliceDescriptor> verifier(layerName, {inputInfo}, {outputInfo}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeSoftmax)
+TEST_CASE("SerializeSoftmax")
 {
     const std::string layerName("softmax");
     const armnn::TensorInfo info({1, 10}, armnn::DataType::Float32);
@@ -2012,13 +2012,13 @@
     softmaxLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::SoftmaxDescriptor> verifier(layerName, {info}, {info}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeSpaceToBatchNd)
+TEST_CASE("SerializeSpaceToBatchNd")
 {
     const std::string layerName("spaceToBatchNd");
     const armnn::TensorInfo inputInfo({2, 1, 2, 4}, armnn::DataType::Float32);
@@ -2041,14 +2041,14 @@
     spaceToBatchNdLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::SpaceToBatchNdDescriptor> verifier(
             layerName, {inputInfo}, {outputInfo}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeSpaceToDepth)
+TEST_CASE("SerializeSpaceToDepth")
 {
     const std::string layerName("spaceToDepth");
 
@@ -2071,14 +2071,14 @@
     spaceToDepthLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::SpaceToDepthDescriptor> verifier(
             layerName, {inputInfo}, {outputInfo}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeSplitter)
+TEST_CASE("SerializeSplitter")
 {
     const unsigned int numViews = 3;
     const unsigned int numDimensions = 4;
@@ -2125,14 +2125,14 @@
     splitterLayer->GetOutputSlot(2).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::ViewsDescriptor> verifier(
             layerName, {inputInfo}, {outputInfo, outputInfo, outputInfo}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeStack)
+TEST_CASE("SerializeStack")
 {
     const std::string layerName("stack");
 
@@ -2156,14 +2156,14 @@
     stackLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::StackDescriptor> verifier(
             layerName, {inputTensorInfo, inputTensorInfo}, {outputTensorInfo}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeStandIn)
+TEST_CASE("SerializeStandIn")
 {
     const std::string layerName("standIn");
 
@@ -2190,14 +2190,14 @@
     standInLayer->GetOutputSlot(1).SetTensorInfo(tensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::StandInDescriptor> verifier(
             layerName, { tensorInfo, tensorInfo }, { tensorInfo, tensorInfo }, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeStridedSlice)
+TEST_CASE("SerializeStridedSlice")
 {
     const std::string layerName("stridedSlice");
     const armnn::TensorInfo inputInfo = armnn::TensorInfo({3, 2, 3, 1}, armnn::DataType::Float32);
@@ -2220,14 +2220,14 @@
     stridedSliceLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::StridedSliceDescriptor> verifier(
             layerName, {inputInfo}, {outputInfo}, desc);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeSubtraction)
+TEST_CASE("SerializeSubtraction")
 {
     const std::string layerName("subtraction");
     const armnn::TensorInfo info({ 1, 4 }, armnn::DataType::Float32);
@@ -2247,13 +2247,13 @@
     subtractionLayer->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBase verifier(layerName, {info, info}, {info});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeSwitch)
+TEST_CASE("SerializeSwitch")
 {
     class SwitchLayerVerifier : public LayerVerifierBase
     {
@@ -2312,13 +2312,13 @@
     switchLayer->GetOutputSlot(1).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     SwitchLayerVerifier verifier(layerName, {info, info}, {info, info});
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeTranspose)
+TEST_CASE("SerializeTranspose")
 {
     const std::string layerName("transpose");
     const armnn::TensorInfo inputTensorInfo({4, 3, 2, 1}, armnn::DataType::Float32);
@@ -2338,14 +2338,14 @@
     transposeLayer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     LayerVerifierBaseWithDescriptor<armnn::TransposeDescriptor> verifier(
             layerName, {inputTensorInfo}, {outputTensorInfo}, descriptor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeTransposeConvolution2d)
+TEST_CASE("SerializeTransposeConvolution2d")
 {
     const std::string layerName("transposeConvolution2d");
     const armnn::TensorInfo inputInfo ({ 1, 7, 7, 1 }, armnn::DataType::Float32);
@@ -2386,7 +2386,7 @@
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     const std::vector<armnn::ConstTensor> constants {weights, biases};
     LayerVerifierBaseWithDescriptorAndConstants<armnn::TransposeConvolution2dDescriptor> verifier(
@@ -2394,7 +2394,7 @@
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_CASE(SerializeDeserializeNonLinearNetwork)
+TEST_CASE("SerializeDeserializeNonLinearNetwork")
 {
     class ConstantLayerVerifier : public LayerVerifierBase
     {
@@ -2456,10 +2456,10 @@
     add->GetOutputSlot(0).SetTensorInfo(info);
 
     armnn::INetworkPtr deserializedNetwork = DeserializeNetwork(SerializeNetwork(*network));
-    BOOST_CHECK(deserializedNetwork);
+    CHECK(deserializedNetwork);
 
     ConstantLayerVerifier verifier(layerName, {}, {info}, constTensor);
     deserializedNetwork->ExecuteStrategy(verifier);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Activations.cpp b/src/armnnTfLiteParser/test/Activations.cpp
index f74c22d..980edc4 100644
--- a/src/armnnTfLiteParser/test/Activations.cpp
+++ b/src/armnnTfLiteParser/test/Activations.cpp
@@ -3,12 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
+#include <doctest/doctest.h>
 
+TEST_SUITE("TensorflowLiteParser_Activations")
+{
 struct ActivationFixture : ParserFlatbuffersFixture
 {
 
@@ -68,7 +69,7 @@
 {
     ReLuFixture() : ActivationFixture("RELU", "FLOAT32") {}
 };
-BOOST_FIXTURE_TEST_CASE(ParseReLu, ReLuFixture)
+TEST_CASE_FIXTURE(ReLuFixture, "ParseReLu")
 {
     RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 1.25f, -3.0f, 0.0f, 0.5f, -0.75f },
                                          { 0.0f, 0.0f, 1.25f, 0.0f, 0.0f, 0.5f, 0.0f });
@@ -78,7 +79,7 @@
 {
     ReLu6Fixture() : ActivationFixture("RELU6", "FLOAT32") {}
 };
-BOOST_FIXTURE_TEST_CASE(ParseReLu6, ReLu6Fixture)
+TEST_CASE_FIXTURE(ReLu6Fixture, "ParseReLu6")
 {
     RunTest<2, armnn::DataType::Float32>(0, { -1.0f, -0.5f, 7.25f, -3.0f, 0.0f, 0.5f, -0.75f },
                                          { 0.0f, 0.0f, 6.0f, 0.0f, 0.0f, 0.5f, 0.0f });
@@ -88,7 +89,7 @@
 {
     SigmoidFixture() : ActivationFixture("LOGISTIC", "FLOAT32") {}
 };
-BOOST_FIXTURE_TEST_CASE(ParseLogistic, SigmoidFixture)
+TEST_CASE_FIXTURE(SigmoidFixture, "ParseLogistic")
 {
     RunTest<2, armnn::DataType::Float32>(0, { -1.0f,     -0.5f,      4.0f,       -4.0f,  0.0f,      0.5f,     -0.75f },
                                          {0.268941f, 0.377541f, 0.982013f,  0.0179862f,  0.5f, 0.622459f,  0.320821f });
@@ -99,7 +100,7 @@
     TanHFixture() : ActivationFixture("TANH", "FLOAT32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseTanH, TanHFixture)
+TEST_CASE_FIXTURE(TanHFixture, "ParseTanH")
 {
     RunTest<2, armnn::DataType::Float32>(0,
         { -0.1f,       -0.2f,         -0.3f,       -0.4f,    0.1f,         0.2f,              0.3f },
@@ -111,7 +112,7 @@
     EluFixture() : ActivationFixture("ELU", "FLOAT32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseElu, EluFixture)
+TEST_CASE_FIXTURE(EluFixture, "ParseElu")
 {
     RunTest<2, armnn::DataType::Float32>(0,
                                          { -2.0f,           -1.0f,           -0.0f, 0.0f, 1.0f, 2.0f, 3.0f },
@@ -123,10 +124,11 @@
     HardSwishFixture() : ActivationFixture("HARD_SWISH", "FLOAT32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseHardSwish, HardSwishFixture)
+TEST_CASE_FIXTURE(HardSwishFixture, "ParseHardSwish")
 {
     RunTest<2, armnn::DataType::Float32>(0,
                                          { -4.0f, -3.0f,        -2.9f,  1.2f,        2.2f, 3.0f, 4.0f },
                                          { -0.0f, -0.0f, -0.04833334f, 0.84f, 1.90666667f, 3.0f, 4.0f });
 }
-BOOST_AUTO_TEST_SUITE_END()
+
+}
diff --git a/src/armnnTfLiteParser/test/Addition.cpp b/src/armnnTfLiteParser/test/Addition.cpp
index deeb707..d7c207f 100644
--- a/src/armnnTfLiteParser/test/Addition.cpp
+++ b/src/armnnTfLiteParser/test/Addition.cpp
@@ -3,15 +3,16 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
+#include <doctest/doctest.h>
+
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Addition")
+{
 struct AddFixture : public ParserFlatbuffersFixture
 {
     explicit AddFixture(const std::string & inputShape1,
@@ -95,7 +96,7 @@
                                     "[ 2, 2 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleAdd, SimpleAddFixture)
+TEST_CASE_FIXTURE(SimpleAddFixture, "SimpleAdd")
 {
   RunTest<2, armnn::DataType::QAsymmU8>(
       0,
@@ -104,4 +105,4 @@
       {{"outputTensor", { 4, 6, 8, 10 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/ArgMinMax.cpp b/src/armnnTfLiteParser/test/ArgMinMax.cpp
index ad99b48..77574b1 100644
--- a/src/armnnTfLiteParser/test/ArgMinMax.cpp
+++ b/src/armnnTfLiteParser/test/ArgMinMax.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <iostream>
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_ArgMinMax")
+{
 struct ArgMinMaxFixture : public ParserFlatbuffersFixture
 {
     explicit ArgMinMaxFixture(const std::string& operatorCode,
@@ -93,7 +92,7 @@
                                              "[ 3, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseSimpleArgMax, SimpleArgMaxFixture)
+TEST_CASE_FIXTURE(SimpleArgMaxFixture, "ParseSimpleArgMax")
 {
     RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32>(
             0,
@@ -109,7 +108,7 @@
                                        "[ 0, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseArgMax, ArgMaxFixture)
+TEST_CASE_FIXTURE(ArgMaxFixture, "ParseArgMax")
 {
     RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32>(
             0,
@@ -131,7 +130,7 @@
                                              "[ 3, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseSimpleArgMin, SimpleArgMinFixture)
+TEST_CASE_FIXTURE(SimpleArgMinFixture, "ParseSimpleArgMin")
 {
     RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32>(
             0,
@@ -147,7 +146,7 @@
                                        "[ 0, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseArgMin, ArgMinFixture)
+TEST_CASE_FIXTURE(ArgMinFixture, "ParseArgMin")
 {
     RunTest<3, armnn::DataType::Float32, armnn::DataType::Signed32>(
             0,
@@ -161,4 +160,4 @@
                                  0, 0, 0, 0 } } });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/AvgPool2D.cpp b/src/armnnTfLiteParser/test/AvgPool2D.cpp
index a56e7e7..fdab4da 100644
--- a/src/armnnTfLiteParser/test/AvgPool2D.cpp
+++ b/src/armnnTfLiteParser/test/AvgPool2D.cpp
@@ -2,12 +2,11 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
 #include "armnnTfLiteParser/ITfLiteParser.hpp"
 #include "ParserFlatbuffersFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_AvgPool2D")
+{
 struct AvgPool2DFixture : public ParserFlatbuffersFixture
 {
     explicit AvgPool2DFixture(std::string inputdim, std::string outputdim, std::string dataType)
@@ -96,25 +95,25 @@
     AvgPoolLiteFixture2DOutput() : AvgPool2DFixture("[ 1, 4, 4, 1 ]", "[ 1, 2, 2, 1 ]", "UINT8") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(AvgPoolLite1DOutput, AvgPoolLiteFixtureUint1DOutput)
+TEST_CASE_FIXTURE(AvgPoolLiteFixtureUint1DOutput, "AvgPoolLite1DOutput")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 });
 }
 
-BOOST_FIXTURE_TEST_CASE(AvgPoolLiteFloat1DOutput, AvgPoolLiteFixtureFloat1DOutput)
+TEST_CASE_FIXTURE(AvgPoolLiteFixtureFloat1DOutput, "AvgPoolLiteFloat1DOutput")
 {
     RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f },  { 3.0f });
 }
 
-BOOST_FIXTURE_TEST_CASE(AvgPoolLite2DOutput, AvgPoolLiteFixture2DOutput)
+TEST_CASE_FIXTURE(AvgPoolLiteFixture2DOutput, "AvgPoolLite2DOutput")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 4, 5, 2, 2 });
 }
 
-BOOST_FIXTURE_TEST_CASE(IncorrectDataTypeError, AvgPoolLiteFixtureFloat1DOutput)
+TEST_CASE_FIXTURE(AvgPoolLiteFixtureFloat1DOutput, "IncorrectDataTypeError")
 {
-    BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
+    CHECK_THROWS_AS((RunTest<4, armnn::DataType::QAsymmU8>(0, {2, 3, 5, 2 }, { 3 })), armnn::Exception);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/BatchToSpaceND.cpp b/src/armnnTfLiteParser/test/BatchToSpaceND.cpp
index 97f1828..f5285f8 100644
--- a/src/armnnTfLiteParser/test/BatchToSpaceND.cpp
+++ b/src/armnnTfLiteParser/test/BatchToSpaceND.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_BatchToSpaceND")
+{
 struct BatchToSpaceNDFixture : public ParserFlatbuffersFixture
 {
     explicit BatchToSpaceNDFixture(const std::string & inputShape,
@@ -105,7 +104,7 @@
                                                          "[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(BatchToSpaceNDTest1, BatchToSpaceNDFixtureTest1)
+TEST_CASE_FIXTURE(BatchToSpaceNDFixtureTest1, "BatchToSpaceNDTest1")
 {
     RunTest<4, armnn::DataType::Float32>
         (0,
@@ -142,7 +141,7 @@
                                                          "[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseBatchToSpaceNDTest2, BatchToSpaceNDFixtureTest2)
+TEST_CASE_FIXTURE(BatchToSpaceNDFixtureTest2, "ParseBatchToSpaceNDTest2")
 {
     RunTest<4, armnn::DataType::Float32>
         (0,
@@ -159,7 +158,7 @@
                                                          "[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseBatchToSpaceNDTest3, BatchToSpaceNDFixtureTest3)
+TEST_CASE_FIXTURE(BatchToSpaceNDFixtureTest3, "ParseBatchToSpaceNDTest3")
 {
     RunTest<4, armnn::DataType::Float32>
         (0,
@@ -167,4 +166,4 @@
          {{ "outputTensor", { 1.0f, 2.0f, 3.0f, 4.0f, 5.0f, 6.0f, 7.0f, 8.0f, 9.0f, 10.0f, 11.0f, 12.0f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Cast.cpp b/src/armnnTfLiteParser/test/Cast.cpp
index e0f9c63..9971ee8 100644
--- a/src/armnnTfLiteParser/test/Cast.cpp
+++ b/src/armnnTfLiteParser/test/Cast.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Cast")
+{
 struct CastFixture : public ParserFlatbuffersFixture
 {
     explicit CastFixture(const std::string& inputShape,
@@ -76,7 +75,7 @@
                                       "FLOAT32") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleCast, SimpleCastFixture)
+TEST_CASE_FIXTURE(SimpleCastFixture, "SimpleCast")
 {
 RunTest<2, armnn::DataType::Signed32 , armnn::DataType::Float32>(
 0,
@@ -84,5 +83,4 @@
 {{"outputTensor", { 0.0f, -1.0f, 5.0f, -100.0f, 200.0f, -255.0f }}});
 }
 
-
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Concatenation.cpp b/src/armnnTfLiteParser/test/Concatenation.cpp
index 8e31a3e..2407794 100644
--- a/src/armnnTfLiteParser/test/Concatenation.cpp
+++ b/src/armnnTfLiteParser/test/Concatenation.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Concatenation")
+{
 struct ConcatenationFixture : public ParserFlatbuffersFixture
 {
     explicit ConcatenationFixture(const std::string & inputShape1,
@@ -98,7 +97,7 @@
                                                              "-3" ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationNegativeDim, ConcatenationFixtureNegativeDim)
+TEST_CASE_FIXTURE(ConcatenationFixtureNegativeDim, "ParseConcatenationNegativeDim")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -112,7 +111,7 @@
     ConcatenationFixtureNCHW() : ConcatenationFixture("[ 1, 1, 2, 2 ]", "[ 1, 1, 2, 2 ]", "[ 1, 2, 2, 2 ]", "1" ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationNCHW, ConcatenationFixtureNCHW)
+TEST_CASE_FIXTURE(ConcatenationFixtureNCHW, "ParseConcatenationNCHW")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -126,7 +125,7 @@
     ConcatenationFixtureNHWC() : ConcatenationFixture("[ 1, 1, 2, 2 ]", "[ 1, 1, 2, 2 ]", "[ 1, 1, 2, 4 ]", "3" ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationNHWC, ConcatenationFixtureNHWC)
+TEST_CASE_FIXTURE(ConcatenationFixtureNHWC, "ParseConcatenationNHWC")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -140,7 +139,7 @@
     ConcatenationFixtureDim1() : ConcatenationFixture("[ 1, 2, 3, 4 ]", "[ 1, 2, 3, 4 ]", "[ 1, 4, 3, 4 ]", "1" ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim1, ConcatenationFixtureDim1)
+TEST_CASE_FIXTURE(ConcatenationFixtureDim1, "ParseConcatenationDim1")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -159,7 +158,7 @@
     ConcatenationFixtureDim3() : ConcatenationFixture("[ 1, 2, 3, 4 ]", "[ 1, 2, 3, 4 ]", "[ 1, 2, 3, 8 ]", "3" ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseConcatenationDim3, ConcatenationFixtureDim3)
+TEST_CASE_FIXTURE(ConcatenationFixtureDim3, "ParseConcatenationDim3")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -194,7 +193,7 @@
     ConcatenationFixture3DDim0() : ConcatenationFixture("[ 1, 2, 3]", "[ 2, 2, 3]", "[ 3, 2, 3]", "0" ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim0, ConcatenationFixture3DDim0)
+TEST_CASE_FIXTURE(ConcatenationFixture3DDim0, "ParseConcatenation3DDim0")
 {
     RunTest<3, armnn::DataType::QAsymmU8>(
         0,
@@ -211,7 +210,7 @@
     ConcatenationFixture3DDim1() : ConcatenationFixture("[ 1, 2, 3]", "[ 1, 4, 3]", "[ 1, 6, 3]", "1" ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim1, ConcatenationFixture3DDim1)
+TEST_CASE_FIXTURE(ConcatenationFixture3DDim1, "ParseConcatenation3DDim1")
 {
     RunTest<3, armnn::DataType::QAsymmU8>(
         0,
@@ -228,7 +227,7 @@
     ConcatenationFixture3DDim2() : ConcatenationFixture("[ 1, 2, 3]", "[ 1, 2, 6]", "[ 1, 2, 9]", "2" ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseConcatenation3DDim2, ConcatenationFixture3DDim2)
+TEST_CASE_FIXTURE(ConcatenationFixture3DDim2, "ParseConcatenation3DDim2")
 {
     RunTest<3, armnn::DataType::QAsymmU8>(
         0,
@@ -240,4 +239,4 @@
                               3,  4,  5, 12, 13, 14, 15, 16, 17 } } });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Constant.cpp b/src/armnnTfLiteParser/test/Constant.cpp
index bfb76a9..641fd7b 100644
--- a/src/armnnTfLiteParser/test/Constant.cpp
+++ b/src/armnnTfLiteParser/test/Constant.cpp
@@ -3,7 +3,6 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
@@ -12,8 +11,8 @@
 
 using armnnTfLiteParser::TfLiteParserImpl;
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Constant")
+{
 struct ConstantAddFixture : public ParserFlatbuffersFixture
 {
     explicit ConstantAddFixture(const std::string & inputShape,
@@ -101,7 +100,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleConstantAdd, SimpleConstantAddFixture)
+TEST_CASE_FIXTURE(SimpleConstantAddFixture, "SimpleConstantAdd")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
                 0,
@@ -110,4 +109,4 @@
                 );
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Conv2D.cpp b/src/armnnTfLiteParser/test/Conv2D.cpp
index a480a4e..dc5e697 100644
--- a/src/armnnTfLiteParser/test/Conv2D.cpp
+++ b/src/armnnTfLiteParser/test/Conv2D.cpp
@@ -3,13 +3,12 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 #include <sstream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Conv2D")
+{
 struct SimpleConv2DFixture : public ParserFlatbuffersFixture
 {
     explicit SimpleConv2DFixture()
@@ -87,7 +86,7 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseSimpleConv2D, SimpleConv2DFixture )
+TEST_CASE_FIXTURE(SimpleConv2DFixture, "ParseSimpleConv2D")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -217,7 +216,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseConv2DWithBias, SimpleConv2DWithBiasesFixture )
+TEST_CASE_FIXTURE(SimpleConv2DWithBiasesFixture, "ParseConv2DWithBias")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -247,7 +246,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseDynamicConv2DWithBias, DynamicConv2DWithBiasesFixture )
+TEST_CASE_FIXTURE(DynamicConv2DWithBiasesFixture, "ParseDynamicConv2DWithBias")
 {
     RunTest<4,
         armnn::DataType::QAsymmU8,
@@ -288,7 +287,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseConv2D_112x112_out, Conv2DShapeTestFixture )
+TEST_CASE_FIXTURE(Conv2DShapeTestFixture, "ParseConv2D_112x112_out")
 {
 }
 
@@ -310,7 +309,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseConv2DAndReluWithBias, ReluConv2DWithBiasesFixture )
+TEST_CASE_FIXTURE(ReluConv2DWithBiasesFixture, "ParseConv2DAndReluWithBias")
 {
     uint8_t bias = 16;
     uint8_t outZero = 20;
@@ -353,7 +352,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseConv2DAndRelu6WithBias, Relu6Conv2DWithBiasesFixture )
+TEST_CASE_FIXTURE(Relu6Conv2DWithBiasesFixture, "ParseConv2DAndRelu6WithBias")
 {
     uint8_t relu6Min = 6 / 2; // divide by output scale
 
@@ -642,7 +641,7 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE( ParsePerChannelConv2D, PerChannelConv2DFixture )
+TEST_CASE_FIXTURE(PerChannelConv2DFixture, "ParsePerChannelConv2D")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -664,4 +663,4 @@
         });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/DepthToSpace.cpp b/src/armnnTfLiteParser/test/DepthToSpace.cpp
index efd1207..6b7e9c5 100644
--- a/src/armnnTfLiteParser/test/DepthToSpace.cpp
+++ b/src/armnnTfLiteParser/test/DepthToSpace.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_DepthToSpace")
+{
 struct DepthToSpaceFixture : public ParserFlatbuffersFixture
 {
     explicit DepthToSpaceFixture(const std::string& inputShape,
@@ -81,7 +80,7 @@
     SimpleDepthToSpaceFixture() : DepthToSpaceFixture("[ 1, 2, 2, 4 ]", "[ 1, 4, 4, 1 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseDepthToSpace, SimpleDepthToSpaceFixture)
+TEST_CASE_FIXTURE(SimpleDepthToSpaceFixture, "ParseDepthToSpace")
 {
     RunTest<4, armnn::DataType::Float32>
         (0,
@@ -95,4 +94,4 @@
                               11.f,  12.f,  15.f,  16.f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
index 95ad2d5..757b23e 100644
--- a/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
+++ b/src/armnnTfLiteParser/test/DepthwiseConvolution2D.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_DepthwiseConvolution2D")
+{
 struct DepthwiseConvolution2dFixture : public ParserFlatbuffersFixture
 {
     explicit DepthwiseConvolution2dFixture(const std::string& inputShape,
@@ -131,7 +130,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSame, DepthwiseConvolution2dSameFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dSameFixture, "ParseDepthwiseConv2DSame")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -158,7 +157,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DValid, DepthwiseConvolution2dValidFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dValidFixture, "ParseDepthwiseConv2DValid")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -183,7 +182,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DSameBias, DepthwiseConvolution2dSameBiasFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dSameBiasFixture, "ParseDepthwiseConv2DSameBias")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -210,7 +209,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseDynamicDepthwiseConv2DSameBias, DynamicDepthwiseConvolution2dSameBiasFixture)
+TEST_CASE_FIXTURE(DynamicDepthwiseConvolution2dSameBiasFixture, "ParseDynamicDepthwiseConv2DSameBias")
 {
     RunTest<4, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(0,
                                                       { { "inputTensor", { 0, 1, 2,
@@ -365,7 +364,7 @@
 };
 
 // No quantization meaning scale=1.0 and offset=0.0 and tensor quantization
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DNoQuant, DepthwiseConvolution2dNoQuantFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dNoQuantFixture, "ParseDepthwiseConv2DNoQuant")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -397,7 +396,7 @@
 };
 
 // Uses per channel quantization on weights but with scales = 1.0 and offsets = 0.0
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterNoChannelQuant, DepthwiseConvolution2dNoChannelQuantFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dNoChannelQuantFixture, "ParseDepthwiseConv2DFilterNoChannelQuant")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -432,8 +431,8 @@
 };
 
 // Weights are per channel quantized but all scales are set to the same value
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant,
-                        DepthwiseConvolution2dWeightsPerChannelQuantFixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuantFixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -468,8 +467,8 @@
 };
 
 // Uses per channel quantization on weights all scales are different in this test
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant1,
-                        DepthwiseConvolution2dWeightsPerChannelQuant1Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -506,8 +505,8 @@
 
 // Uses per channel quantization on weights all scales are different in this test
 // Uses different shape for weights and input compared to the other tests above
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant2,
-                        DepthwiseConvolution2dWeightsPerChannelQuant2Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant2Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant2")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -555,8 +554,8 @@
 };
 
 // Test for depthwise_multiplier different to one (M > 1)
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4,
-                        DepthwiseConvolution2dWeightsPerChannelQuant4Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -616,8 +615,8 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant6,
-                        DepthwiseConvolution2dWeightsPerChannelQuant6Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant6Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant6")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -672,8 +671,8 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_1,
-                        DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1_1Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_1")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -712,8 +711,8 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_2,
-                        DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant1_2Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant1_2")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -759,8 +758,8 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_1,
-                        DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_1Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_1")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -821,8 +820,8 @@
 };
 
 
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_2,
-                        DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_2Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_2")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -887,8 +886,8 @@
 };
 
 // Test for depthwise_multiplier different to one (M > 1)
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_5,
-                        DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_5Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_5")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -947,8 +946,8 @@
 };
 
 // Test for depthwise_multiplier different to one (M > 1)
-BOOST_FIXTURE_TEST_CASE(ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_3_1,
-                        DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture)
+TEST_CASE_FIXTURE(DepthwiseConvolution2dWeightsPerChannelQuant4_3_1Fixture,
+                  "ParseDepthwiseConv2DFilterWeightsPerChannelQuant4_3_1")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -974,4 +973,4 @@
           3, 4, 1, 1, 1, 3, 3, 2, 0, 0, 0, 0, 2, 4, 4, 8});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Dequantize.cpp b/src/armnnTfLiteParser/test/Dequantize.cpp
index 663f2ca..6de6fe5 100644
--- a/src/armnnTfLiteParser/test/Dequantize.cpp
+++ b/src/armnnTfLiteParser/test/Dequantize.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Dequantize")
+{
     struct DequantizeFixture : public ParserFlatbuffersFixture
     {
         explicit DequantizeFixture(const std::string & inputShape,
@@ -80,7 +79,7 @@
                                                              "UINT8") {}
     };
 
-    BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymm8, SimpleDequantizeFixtureQAsymm8)
+    TEST_CASE_FIXTURE(SimpleDequantizeFixtureQAsymm8, "SimpleDequantizeQAsymm8")
     {
         RunTest<2, armnn::DataType::QAsymmU8 , armnn::DataType::Float32>(
                 0,
@@ -95,7 +94,7 @@
                                                              "INT16") {}
     };
 
-    BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQsymm16, SimpleDequantizeFixtureQSymm16)
+    TEST_CASE_FIXTURE(SimpleDequantizeFixtureQSymm16, "SimpleDequantizeQsymm16")
     {
         RunTest<2, armnn::DataType::QSymmS16 , armnn::DataType::Float32>(
                 0,
@@ -110,7 +109,7 @@
                                                              "INT8") {}
     };
 
-    BOOST_FIXTURE_TEST_CASE(SimpleDequantizeQAsymmS8, SimpleDequantizeFixtureQAsymmS8)
+    TEST_CASE_FIXTURE(SimpleDequantizeFixtureQAsymmS8, "SimpleDequantizeQAsymmS8")
     {
         RunTest<2, armnn::DataType::QAsymmS8 , armnn::DataType::Float32>(
                 0,
@@ -118,4 +117,4 @@
                 {{"outputTensor", { 0.0f, 1.5f, 7.5f, 190.5f, -192.0f, -1.5f }}});
     }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
index 304520c..e7ef740 100644
--- a/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
+++ b/src/armnnTfLiteParser/test/DetectionPostProcess.cpp
@@ -12,10 +12,8 @@
 #include <armnn/utility/PolymorphicDowncast.hpp>
 #include <QuantizeHelper.hpp>
 
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_DetectionPostProcess")
+{
 struct DetectionPostProcessFixture : ParserFlatbuffersFixture
 {
     explicit DetectionPostProcessFixture(const std::string& custom_options)
@@ -161,7 +159,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseDetectionPostProcess, ParseDetectionPostProcessCustomOptions )
+TEST_CASE_FIXTURE(ParseDetectionPostProcessCustomOptions, "ParseDetectionPostProcess")
 {
     Setup();
 
@@ -223,7 +221,7 @@
     RunTest<armnn::DataType::QAsymmU8, armnn::DataType::Float32>(0, input, output);
 }
 
-BOOST_FIXTURE_TEST_CASE(DetectionPostProcessGraphStructureTest, ParseDetectionPostProcessCustomOptions)
+TEST_CASE_FIXTURE(ParseDetectionPostProcessCustomOptions, "DetectionPostProcessGraphStructureTest")
 {
     /*
        Inputs:            box_encodings  scores
@@ -244,47 +242,47 @@
     armnn::Graph& graph = GetGraphForTesting(optimized.get());
 
     // Check the number of layers in the graph
-    BOOST_TEST((graph.GetNumInputs() == 2));
-    BOOST_TEST((graph.GetNumOutputs() == 4));
-    BOOST_TEST((graph.GetNumLayers() == 7));
+    CHECK((graph.GetNumInputs() == 2));
+    CHECK((graph.GetNumOutputs() == 4));
+    CHECK((graph.GetNumLayers() == 7));
 
     // Input layers
     armnn::Layer* boxEncodingLayer = GetFirstLayerWithName(graph, "box_encodings");
-    BOOST_TEST((boxEncodingLayer->GetType() == armnn::LayerType::Input));
-    BOOST_TEST(CheckNumberOfInputSlot(boxEncodingLayer, 0));
-    BOOST_TEST(CheckNumberOfOutputSlot(boxEncodingLayer, 1));
+    CHECK((boxEncodingLayer->GetType() == armnn::LayerType::Input));
+    CHECK(CheckNumberOfInputSlot(boxEncodingLayer, 0));
+    CHECK(CheckNumberOfOutputSlot(boxEncodingLayer, 1));
 
     armnn::Layer* scoresLayer = GetFirstLayerWithName(graph, "scores");
-    BOOST_TEST((scoresLayer->GetType() == armnn::LayerType::Input));
-    BOOST_TEST(CheckNumberOfInputSlot(scoresLayer, 0));
-    BOOST_TEST(CheckNumberOfOutputSlot(scoresLayer, 1));
+    CHECK((scoresLayer->GetType() == armnn::LayerType::Input));
+    CHECK(CheckNumberOfInputSlot(scoresLayer, 0));
+    CHECK(CheckNumberOfOutputSlot(scoresLayer, 1));
 
     // DetectionPostProcess layer
     armnn::Layer* detectionPostProcessLayer = GetFirstLayerWithName(graph, "DetectionPostProcess:0:0");
-    BOOST_TEST((detectionPostProcessLayer->GetType() == armnn::LayerType::DetectionPostProcess));
-    BOOST_TEST(CheckNumberOfInputSlot(detectionPostProcessLayer, 2));
-    BOOST_TEST(CheckNumberOfOutputSlot(detectionPostProcessLayer, 4));
+    CHECK((detectionPostProcessLayer->GetType() == armnn::LayerType::DetectionPostProcess));
+    CHECK(CheckNumberOfInputSlot(detectionPostProcessLayer, 2));
+    CHECK(CheckNumberOfOutputSlot(detectionPostProcessLayer, 4));
 
     // Output layers
     armnn::Layer* detectionBoxesLayer = GetFirstLayerWithName(graph, "detection_boxes");
-    BOOST_TEST((detectionBoxesLayer->GetType() == armnn::LayerType::Output));
-    BOOST_TEST(CheckNumberOfInputSlot(detectionBoxesLayer, 1));
-    BOOST_TEST(CheckNumberOfOutputSlot(detectionBoxesLayer, 0));
+    CHECK((detectionBoxesLayer->GetType() == armnn::LayerType::Output));
+    CHECK(CheckNumberOfInputSlot(detectionBoxesLayer, 1));
+    CHECK(CheckNumberOfOutputSlot(detectionBoxesLayer, 0));
 
     armnn::Layer* detectionClassesLayer = GetFirstLayerWithName(graph, "detection_classes");
-    BOOST_TEST((detectionClassesLayer->GetType() == armnn::LayerType::Output));
-    BOOST_TEST(CheckNumberOfInputSlot(detectionClassesLayer, 1));
-    BOOST_TEST(CheckNumberOfOutputSlot(detectionClassesLayer, 0));
+    CHECK((detectionClassesLayer->GetType() == armnn::LayerType::Output));
+    CHECK(CheckNumberOfInputSlot(detectionClassesLayer, 1));
+    CHECK(CheckNumberOfOutputSlot(detectionClassesLayer, 0));
 
     armnn::Layer* detectionScoresLayer = GetFirstLayerWithName(graph, "detection_scores");
-    BOOST_TEST((detectionScoresLayer->GetType() == armnn::LayerType::Output));
-    BOOST_TEST(CheckNumberOfInputSlot(detectionScoresLayer, 1));
-    BOOST_TEST(CheckNumberOfOutputSlot(detectionScoresLayer, 0));
+    CHECK((detectionScoresLayer->GetType() == armnn::LayerType::Output));
+    CHECK(CheckNumberOfInputSlot(detectionScoresLayer, 1));
+    CHECK(CheckNumberOfOutputSlot(detectionScoresLayer, 0));
 
     armnn::Layer* numDetectionsLayer = GetFirstLayerWithName(graph, "num_detections");
-    BOOST_TEST((numDetectionsLayer->GetType() == armnn::LayerType::Output));
-    BOOST_TEST(CheckNumberOfInputSlot(numDetectionsLayer, 1));
-    BOOST_TEST(CheckNumberOfOutputSlot(numDetectionsLayer, 0));
+    CHECK((numDetectionsLayer->GetType() == armnn::LayerType::Output));
+    CHECK(CheckNumberOfInputSlot(numDetectionsLayer, 1));
+    CHECK(CheckNumberOfOutputSlot(numDetectionsLayer, 0));
 
     // Check the connections
     armnn::TensorInfo boxEncodingTensor(armnn::TensorShape({ 1, 6, 4 }), armnn::DataType::QAsymmU8, 1, 1);
@@ -296,12 +294,12 @@
     armnn::TensorInfo detectionScoresTensor(armnn::TensorShape({ 1, 3 }), armnn::DataType::Float32, 0, 0);
     armnn::TensorInfo numDetectionsTensor(armnn::TensorShape({ 1} ), armnn::DataType::Float32, 0, 0);
 
-    BOOST_TEST(IsConnected(boxEncodingLayer, detectionPostProcessLayer, 0, 0, boxEncodingTensor));
-    BOOST_TEST(IsConnected(scoresLayer, detectionPostProcessLayer, 0, 1, scoresTensor));
-    BOOST_TEST(IsConnected(detectionPostProcessLayer, detectionBoxesLayer, 0, 0, detectionBoxesTensor));
-    BOOST_TEST(IsConnected(detectionPostProcessLayer, detectionClassesLayer, 1, 0, detectionClassesTensor));
-    BOOST_TEST(IsConnected(detectionPostProcessLayer, detectionScoresLayer, 2, 0, detectionScoresTensor));
-    BOOST_TEST(IsConnected(detectionPostProcessLayer, numDetectionsLayer, 3, 0, numDetectionsTensor));
+    CHECK(IsConnected(boxEncodingLayer, detectionPostProcessLayer, 0, 0, boxEncodingTensor));
+    CHECK(IsConnected(scoresLayer, detectionPostProcessLayer, 0, 1, scoresTensor));
+    CHECK(IsConnected(detectionPostProcessLayer, detectionBoxesLayer, 0, 0, detectionBoxesTensor));
+    CHECK(IsConnected(detectionPostProcessLayer, detectionClassesLayer, 1, 0, detectionClassesTensor));
+    CHECK(IsConnected(detectionPostProcessLayer, detectionScoresLayer, 2, 0, detectionScoresTensor));
+    CHECK(IsConnected(detectionPostProcessLayer, numDetectionsLayer, 3, 0, numDetectionsTensor));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Div.cpp b/src/armnnTfLiteParser/test/Div.cpp
index 10be29d..736e821 100644
--- a/src/armnnTfLiteParser/test/Div.cpp
+++ b/src/armnnTfLiteParser/test/Div.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Div")
+{
 struct DivFixture : public ParserFlatbuffersFixture
 {
     explicit DivFixture(const std::string & inputShape1,
@@ -92,7 +91,7 @@
     SimpleDivFixture() : DivFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseDiv, SimpleDivFixture)
+TEST_CASE_FIXTURE(SimpleDivFixture, "ParseDiv")
 {
     using armnn::DataType;
     float Inf = std::numeric_limits<float>::infinity();
@@ -118,7 +117,7 @@
     DynamicDivFixture() : DivFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[  ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseDynamicDiv, DynamicDivFixture)
+TEST_CASE_FIXTURE(DynamicDivFixture, "ParseDynamicDiv")
 {
     using armnn::DataType;
     float Inf = std::numeric_limits<float>::infinity();
@@ -138,4 +137,4 @@
                                                          1.0f,  1.0f, -1.0f } } }, true);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/ElementWiseUnary.cpp b/src/armnnTfLiteParser/test/ElementWiseUnary.cpp
index dc236d2..21718d8 100644
--- a/src/armnnTfLiteParser/test/ElementWiseUnary.cpp
+++ b/src/armnnTfLiteParser/test/ElementWiseUnary.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_ElementwiseUnary")
+{
 struct ElementWiseUnaryFixture : public ParserFlatbuffersFixture
 {
     explicit ElementWiseUnaryFixture(const std::string& operatorCode,
@@ -75,7 +74,7 @@
     SimpleAbsFixture() : ElementWiseUnaryFixture("ABS", "FLOAT32", "[ 2, 2 ]", "[ 2, 2 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseAbs, SimpleAbsFixture)
+TEST_CASE_FIXTURE(SimpleAbsFixture, "ParseAbs")
 {
     std::vector<float> inputValues
     {
@@ -99,7 +98,7 @@
     SimpleExpFixture() : ElementWiseUnaryFixture("EXP", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseExp, SimpleExpFixture)
+TEST_CASE_FIXTURE(SimpleExpFixture, "ParseExp")
 {
     RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 0.0f,  1.0f,  2.0f,
                                                                 3.0f,  4.0f,  5.0f} }},
@@ -112,7 +111,7 @@
     SimpleLogicalNotFixture() : ElementWiseUnaryFixture("LOGICAL_NOT", "BOOL", "[ 1, 1, 1, 4 ]", "[ 1, 1, 1, 4 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseLogicalNot, SimpleLogicalNotFixture)
+TEST_CASE_FIXTURE(SimpleLogicalNotFixture, "ParseLogicalNot")
 {
     RunTest<4, armnn::DataType::Boolean>(0, {{ "inputTensor", { 0, 1, 0, 1 } }},
                                             {{ "outputTensor",{ 1, 0, 1, 0 } } });
@@ -123,7 +122,7 @@
     SimpleNegFixture() : ElementWiseUnaryFixture("NEG", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseNeg, SimpleNegFixture)
+TEST_CASE_FIXTURE(SimpleNegFixture, "ParseNeg")
 {
     RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 0.0f, 1.0f, -2.0f,
                                                                 20.0855185f, -54.5980834f, 5.0f} }},
@@ -136,7 +135,7 @@
     SimpleRsqrtFixture() : ElementWiseUnaryFixture("RSQRT", "FLOAT32", "[ 1, 2, 3, 1 ]", "[ 1, 2, 3, 1 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseRsqrt, SimpleRsqrtFixture)
+TEST_CASE_FIXTURE(SimpleRsqrtFixture, "ParseRsqrt")
 {
     RunTest<4, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 4.0f, 16.0f,
                                                                 25.0f, 64.0f, 100.0f } }},
@@ -144,5 +143,4 @@
                                                                 0.2f, 0.125f, 0.1f} }});
 }
 
-
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/FullyConnected.cpp b/src/armnnTfLiteParser/test/FullyConnected.cpp
index 1ce1b2f..521ab34 100644
--- a/src/armnnTfLiteParser/test/FullyConnected.cpp
+++ b/src/armnnTfLiteParser/test/FullyConnected.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_FullyConnected")
+{
 struct FullyConnectedFixture : public ParserFlatbuffersFixture
 {
     explicit FullyConnectedFixture(const std::string& inputShape,
@@ -122,7 +121,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(FullyConnectedWithNoBias, FullyConnectedWithNoBiasFixture)
+TEST_CASE_FIXTURE(FullyConnectedWithNoBiasFixture, "FullyConnectedWithNoBias")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
         0,
@@ -142,7 +141,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedWithBias, FullyConnectedWithBiasFixture)
+TEST_CASE_FIXTURE(FullyConnectedWithBiasFixture, "ParseFullyConnectedWithBias")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
         0,
@@ -162,7 +161,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(FullyConnectedWithBiasMultipleOutputs, FullyConnectedWithBiasMultipleOutputsFixture)
+TEST_CASE_FIXTURE(FullyConnectedWithBiasMultipleOutputsFixture, "FullyConnectedWithBiasMultipleOutputs")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
             0,
@@ -182,9 +181,8 @@
     { }
 };
 
-BOOST_FIXTURE_TEST_CASE(
-    DynamicFullyConnectedWithBiasMultipleOutputs,
-    DynamicFullyConnectedWithBiasMultipleOutputsFixture)
+TEST_CASE_FIXTURE(
+    DynamicFullyConnectedWithBiasMultipleOutputsFixture, "DynamicFullyConnectedWithBiasMultipleOutputs")
 {
     RunTest<2,
             armnn::DataType::QAsymmU8,
@@ -327,7 +325,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedNonConstWeights, FullyConnectedNonConstWeights)
+TEST_CASE_FIXTURE(FullyConnectedNonConstWeights, "ParseFullyConnectedNonConstWeights")
 {
     RunTest<2, armnn::DataType::QAsymmS8,
             armnn::DataType::Signed32,
@@ -348,7 +346,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseFullyConnectedNonConstWeightsNoBias, FullyConnectedNonConstWeightsNoBias)
+TEST_CASE_FIXTURE(FullyConnectedNonConstWeightsNoBias, "ParseFullyConnectedNonConstWeightsNoBias")
 {
     RunTest<2, armnn::DataType::QAsymmS8,
             armnn::DataType::QAsymmS8>(
@@ -357,4 +355,4 @@
             {{"output", { 20 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Gather.cpp b/src/armnnTfLiteParser/test/Gather.cpp
index 498d56d..3c0bd9d 100644
--- a/src/armnnTfLiteParser/test/Gather.cpp
+++ b/src/armnnTfLiteParser/test/Gather.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Gather")
+{
 struct GatherFixture : public ParserFlatbuffersFixture
 {
     explicit GatherFixture(const std::string& paramsShape,
@@ -95,7 +94,7 @@
     SimpleGatherFixture() : GatherFixture("[ 5, 2 ]", "[ 3, 2 ]", "[ 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseGather, SimpleGatherFixture)
+TEST_CASE_FIXTURE(SimpleGatherFixture, "ParseGather")
 {
     RunTest<2, armnn::DataType::Float32, armnn::DataType::Signed32, armnn::DataType::Float32>
         (0,
@@ -109,7 +108,7 @@
     GatherUint8Fixture() : GatherFixture("[ 8 ]", "[ 3 ]", "[ 3 ]", "UINT8") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseGatherUint8, GatherUint8Fixture)
+TEST_CASE_FIXTURE(GatherUint8Fixture, "ParseGatherUint8")
 {
     RunTest<1, armnn::DataType::QAsymmU8, armnn::DataType::Signed32, armnn::DataType::QAsymmU8>
         (0,
@@ -118,4 +117,4 @@
          {{ "outputTensor", { 8, 7, 6 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/GetBuffer.cpp b/src/armnnTfLiteParser/test/GetBuffer.cpp
index 0e72522..9dfc9ff 100644
--- a/src/armnnTfLiteParser/test/GetBuffer.cpp
+++ b/src/armnnTfLiteParser/test/GetBuffer.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 #include <sstream>
 
 using armnnTfLiteParser::TfLiteParserImpl;
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_GetBuffer")
+{
 struct GetBufferFixture : public ParserFlatbuffersFixture
 {
     explicit GetBufferFixture()
@@ -93,12 +92,12 @@
     {
         for(long unsigned int i=0; i<bufferValues.size(); i++)
         {
-            BOOST_CHECK_EQUAL(TfLiteParserImpl::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
+            CHECK_EQ(TfLiteParserImpl::GetBuffer(model, bufferIndex)->data[i], bufferValues[i]);
         }
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(GetBufferCheckContents, GetBufferFixture)
+TEST_CASE_FIXTURE(GetBufferFixture, "GetBufferCheckContents")
 {
     //Check contents of buffer are correct
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
@@ -107,23 +106,23 @@
     CheckBufferContents(model, bufferValues, 2);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetBufferCheckEmpty, GetBufferFixture)
+TEST_CASE_FIXTURE(GetBufferFixture, "GetBufferCheckEmpty")
 {
     //Check if test fixture buffers are empty or not
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 0)->data.empty());
-    BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 1)->data.empty());
-    BOOST_CHECK(!TfLiteParserImpl::GetBuffer(model, 2)->data.empty());
-    BOOST_CHECK(TfLiteParserImpl::GetBuffer(model, 3)->data.empty());
+    CHECK(TfLiteParserImpl::GetBuffer(model, 0)->data.empty());
+    CHECK(TfLiteParserImpl::GetBuffer(model, 1)->data.empty());
+    CHECK(!TfLiteParserImpl::GetBuffer(model, 2)->data.empty());
+    CHECK(TfLiteParserImpl::GetBuffer(model, 3)->data.empty());
 }
 
-BOOST_FIXTURE_TEST_CASE(GetBufferCheckParseException, GetBufferFixture)
+TEST_CASE_FIXTURE(GetBufferFixture, "GetBufferCheckParseException")
 {
     //Check if armnn::ParseException thrown when invalid buffer index used
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetBuffer(model, 4), armnn::Exception);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetBuffer(model, 4), armnn::Exception);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
index 894de0c..398217f 100644
--- a/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
+++ b/src/armnnTfLiteParser/test/GetInputsOutputs.cpp
@@ -2,15 +2,15 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 using armnnTfLiteParser::TfLiteParserImpl;
 using ModelPtr = TfLiteParserImpl::ModelPtr;
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_GetInputsOutputs")
+{
 struct GetInputsOutputsMainFixture : public ParserFlatbuffersFixture
 {
     explicit GetInputsOutputsMainFixture(const std::string& inputs, const std::string& outputs)
@@ -150,100 +150,100 @@
     GetInputsOutputsFixture() : GetInputsOutputsMainFixture("[ 1 ]", "[ 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(GetEmptyInputs, GetEmptyInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetEmptyInputsOutputsFixture, "GetEmptyInputs")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
-    BOOST_CHECK_EQUAL(0, tensors.size());
+    CHECK_EQ(0, tensors.size());
 }
 
-BOOST_FIXTURE_TEST_CASE(GetEmptyOutputs, GetEmptyInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetEmptyInputsOutputsFixture, "GetEmptyOutputs")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
-    BOOST_CHECK_EQUAL(0, tensors.size());
+    CHECK_EQ(0, tensors.size());
 }
 
-BOOST_FIXTURE_TEST_CASE(GetInputs, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetInputs")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 0, 0);
-    BOOST_CHECK_EQUAL(1, tensors.size());
+    CHECK_EQ(1, tensors.size());
     CheckTensors(tensors[0], 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
                       "InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
 }
 
-BOOST_FIXTURE_TEST_CASE(GetOutputs, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetOutputs")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 0, 0);
-    BOOST_CHECK_EQUAL(1, tensors.size());
+    CHECK_EQ(1, tensors.size());
     CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
                       "OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
 }
 
-BOOST_FIXTURE_TEST_CASE(GetInputsMultipleInputs, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetInputsMultipleInputs")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetInputs(model, 1, 0);
-    BOOST_CHECK_EQUAL(2, tensors.size());
+    CHECK_EQ(2, tensors.size());
     CheckTensors(tensors[0], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
                       "ConvInputTensor", { }, { }, { 1.0f }, { 0 });
     CheckTensors(tensors[1], 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 2,
                       "filterTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
 }
 
-BOOST_FIXTURE_TEST_CASE(GetOutputs2, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetOutputs2")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorRawPtrVector tensors = TfLiteParserImpl::GetOutputs(model, 1, 0);
-    BOOST_CHECK_EQUAL(1, tensors.size());
+    CHECK_EQ(1, tensors.size());
     CheckTensors(tensors[0], 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
                       "ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
 }
 
-BOOST_AUTO_TEST_CASE(GetInputsNullModel)
+TEST_CASE("GetInputsNullModel")
 {
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(nullptr, 0, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetInputs(nullptr, 0, 0), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_CASE(GetOutputsNullModel)
+TEST_CASE("GetOutputsNullModel")
 {
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(nullptr, 0, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetOutputs(nullptr, 0, 0), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetInputsInvalidSubgraph, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetInputsInvalidSubgraph")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 2, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetInputs(model, 2, 0), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidSubgraph, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetOutputsInvalidSubgraph")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 2, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetOutputs(model, 2, 0), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetInputsInvalidOperator, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetInputsInvalidOperator")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputs(model, 0, 1), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetInputs(model, 0, 1), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetOutputsInvalidOperator, GetInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetInputsOutputsFixture, "GetOutputsInvalidOperator")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputs(model, 0, 1), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetOutputs(model, 0, 1), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
index 100e8e9..5c64449 100644
--- a/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
+++ b/src/armnnTfLiteParser/test/GetSubgraphInputsOutputs.cpp
@@ -2,7 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
@@ -10,8 +10,8 @@
 using ModelPtr = TfLiteParserImpl::ModelPtr;
 using TensorRawPtr = TfLiteParserImpl::TensorRawPtr;
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_GetSubgraphInputsOutputs")
+{
 struct GetSubgraphInputsOutputsMainFixture : public ParserFlatbuffersFixture
 {
     explicit GetSubgraphInputsOutputsMainFixture(const std::string& inputs, const std::string& outputs)
@@ -151,88 +151,88 @@
     GetSubgraphInputsOutputsFixture() : GetSubgraphInputsOutputsMainFixture("[ 1 ]", "[ 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphInputs, GetEmptySubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetEmptySubgraphInputsOutputsFixture, "GetEmptySubgraphInputs")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 0);
-    BOOST_CHECK_EQUAL(0, subgraphTensors.size());
+    CHECK_EQ(0, subgraphTensors.size());
 }
 
-BOOST_FIXTURE_TEST_CASE(GetEmptySubgraphOutputs, GetEmptySubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetEmptySubgraphInputsOutputsFixture, "GetEmptySubgraphOutputs")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 0);
-    BOOST_CHECK_EQUAL(0, subgraphTensors.size());
+    CHECK_EQ(0, subgraphTensors.size());
 }
 
-BOOST_FIXTURE_TEST_CASE(GetSubgraphInputs, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphInputs")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 0);
-    BOOST_CHECK_EQUAL(1, subgraphTensors.size());
-    BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
+    CHECK_EQ(1, subgraphTensors.size());
+    CHECK_EQ(1, subgraphTensors[0].first);
     CheckTensors(subgraphTensors[0].second, 4, { 1, 2, 2, 1 }, tflite::TensorType::TensorType_UINT8, 1,
                       "InputTensor", { -1.2f }, { 25.5f }, { 0.25f }, { 10 });
 }
 
-BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsSimpleQuantized, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphOutputsSimpleQuantized")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 0);
-    BOOST_CHECK_EQUAL(1, subgraphTensors.size());
-    BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
+    CHECK_EQ(1, subgraphTensors.size());
+    CHECK_EQ(0, subgraphTensors[0].first);
     CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 0,
                       "OutputTensor", { 0.0f }, { 255.0f }, { 1.0f }, { 0 });
 }
 
-BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsEmptyMinMax, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphInputsEmptyMinMax")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphInputs(model, 1);
-    BOOST_CHECK_EQUAL(1, subgraphTensors.size());
-    BOOST_CHECK_EQUAL(0, subgraphTensors[0].first);
+    CHECK_EQ(1, subgraphTensors.size());
+    CHECK_EQ(0, subgraphTensors[0].first);
     CheckTensors(subgraphTensors[0].second, 4, { 1, 3, 3, 1 }, tflite::TensorType::TensorType_UINT8, 0,
                       "ConvInputTensor", { }, { }, { 1.0f }, { 0 });
 }
 
-BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputs, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphOutputs")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     TfLiteParserImpl::TensorIdRawPtrVector subgraphTensors = TfLiteParserImpl::GetSubgraphOutputs(model, 1);
-    BOOST_CHECK_EQUAL(1, subgraphTensors.size());
-    BOOST_CHECK_EQUAL(1, subgraphTensors[0].first);
+    CHECK_EQ(1, subgraphTensors.size());
+    CHECK_EQ(1, subgraphTensors[0].first);
     CheckTensors(subgraphTensors[0].second, 4, { 1, 1, 1, 1 }, tflite::TensorType::TensorType_UINT8, 1,
                       "ConvOutputTensor", { 0.0f }, { 511.0f }, { 2.0f }, { 0 });
 }
 
-BOOST_AUTO_TEST_CASE(GetSubgraphInputsNullModel)
+TEST_CASE("GetSubgraphInputsNullModel")
 {
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetSubgraphInputs(nullptr, 0), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_CASE(GetSubgraphOutputsNullModel)
+TEST_CASE("GetSubgraphOutputsNullModel")
 {
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetSubgraphOutputs(nullptr, 0), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetSubgraphInputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphInputsInvalidSubgraph")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphInputs(model, 2), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetSubgraphInputs(model, 2), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetSubgraphOutputsInvalidSubgraph, GetSubgraphInputsOutputsFixture)
+TEST_CASE_FIXTURE(GetSubgraphInputsOutputsFixture, "GetSubgraphOutputsInvalidSubgraph")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetSubgraphOutputs(model, 2), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetSubgraphOutputs(model, 2), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/GetTensorIds.cpp b/src/armnnTfLiteParser/test/GetTensorIds.cpp
index f45f6e6..5b17dcd 100644
--- a/src/armnnTfLiteParser/test/GetTensorIds.cpp
+++ b/src/armnnTfLiteParser/test/GetTensorIds.cpp
@@ -2,15 +2,15 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 using armnnTfLiteParser::TfLiteParserImpl;
 using ModelPtr = TfLiteParserImpl::ModelPtr;
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_GetTensorIds")
+{
 struct GetTensorIdsFixture : public ParserFlatbuffersFixture
 {
     explicit GetTensorIdsFixture(const std::string& inputs, const std::string& outputs)
@@ -89,82 +89,82 @@
     GetInputOutputTensorIdsFixture() : GetTensorIdsFixture("[ 0, 1, 2 ]", "[ 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(GetEmptyInputTensorIds, GetEmptyTensorIdsFixture)
+TEST_CASE_FIXTURE(GetEmptyTensorIdsFixture, "GetEmptyInputTensorIds")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     std::vector<int32_t> expectedIds = { };
     std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
-    BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
-                                  inputTensorIds.begin(), inputTensorIds.end());
+    CHECK(std::equal(expectedIds.begin(), expectedIds.end(),
+                                  inputTensorIds.begin(), inputTensorIds.end()));
 }
 
-BOOST_FIXTURE_TEST_CASE(GetEmptyOutputTensorIds, GetEmptyTensorIdsFixture)
+TEST_CASE_FIXTURE(GetEmptyTensorIdsFixture, "GetEmptyOutputTensorIds")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     std::vector<int32_t> expectedIds = { };
     std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
-    BOOST_CHECK_EQUAL_COLLECTIONS(expectedIds.begin(), expectedIds.end(),
-                                  outputTensorIds.begin(), outputTensorIds.end());
+    CHECK(std::equal(expectedIds.begin(), expectedIds.end(),
+                                  outputTensorIds.begin(), outputTensorIds.end()));
 }
 
-BOOST_FIXTURE_TEST_CASE(GetInputTensorIds, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetInputTensorIds")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     std::vector<int32_t> expectedInputIds = { 0, 1, 2 };
     std::vector<int32_t> inputTensorIds = TfLiteParserImpl::GetInputTensorIds(model, 0, 0);
-    BOOST_CHECK_EQUAL_COLLECTIONS(expectedInputIds.begin(), expectedInputIds.end(),
-                                  inputTensorIds.begin(), inputTensorIds.end());
+    CHECK(std::equal(expectedInputIds.begin(), expectedInputIds.end(),
+                                  inputTensorIds.begin(), inputTensorIds.end()));
 }
 
-BOOST_FIXTURE_TEST_CASE(GetOutputTensorIds, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetOutputTensorIds")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
     std::vector<int32_t> expectedOutputIds = { 3 };
     std::vector<int32_t> outputTensorIds = TfLiteParserImpl::GetOutputTensorIds(model, 0, 0);
-    BOOST_CHECK_EQUAL_COLLECTIONS(expectedOutputIds.begin(), expectedOutputIds.end(),
-                                  outputTensorIds.begin(), outputTensorIds.end());
+    CHECK(std::equal(expectedOutputIds.begin(), expectedOutputIds.end(),
+                                  outputTensorIds.begin(), outputTensorIds.end()));
 }
 
-BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetInputTensorIdsNullModel")
 {
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetInputTensorIds(nullptr, 0, 0), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsNullModel, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetOutputTensorIdsNullModel")
 {
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetOutputTensorIds(nullptr, 0, 0), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetInputTensorIdsInvalidSubgraph")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 1, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetInputTensorIds(model, 1, 0), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidSubgraph, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE( GetInputOutputTensorIdsFixture, "GetOutputTensorIdsInvalidSubgraph")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetOutputTensorIds(model, 1, 0), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetInputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetInputTensorIdsInvalidOperator")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetInputTensorIds(model, 0, 1), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetInputTensorIds(model, 0, 1), armnn::ParseException);
 }
 
-BOOST_FIXTURE_TEST_CASE(GetOutputTensorIdsInvalidOperator, GetInputOutputTensorIdsFixture)
+TEST_CASE_FIXTURE(GetInputOutputTensorIdsFixture, "GetOutputTensorIdsInvalidOperator")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
-    BOOST_CHECK_THROW(TfLiteParserImpl::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
+    CHECK_THROWS_AS(TfLiteParserImpl::GetOutputTensorIds(model, 0, 1), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp b/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
index d7a4371..97d9381 100644
--- a/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
+++ b/src/armnnTfLiteParser/test/InputOutputTensorNames.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_InputOutputTensorNames")
+{
 struct EmptyNetworkFixture : public ParserFlatbuffersFixture
 {
     explicit EmptyNetworkFixture() {
@@ -21,12 +20,12 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(EmptyNetworkHasNoInputsAndOutputs, EmptyNetworkFixture)
+TEST_CASE_FIXTURE(EmptyNetworkFixture, "EmptyNetworkHasNoInputsAndOutputs")
 {
     Setup();
-    BOOST_TEST(m_Parser->GetSubgraphCount() == 1);
-    BOOST_TEST(m_Parser->GetSubgraphInputTensorNames(0).size() == 0);
-    BOOST_TEST(m_Parser->GetSubgraphOutputTensorNames(0).size() == 0);
+    CHECK(m_Parser->GetSubgraphCount() == 1);
+    CHECK(m_Parser->GetSubgraphInputTensorNames(0).size() == 0);
+    CHECK(m_Parser->GetSubgraphOutputTensorNames(0).size() == 0);
 }
 
 struct MissingTensorsFixture : public ParserFlatbuffersFixture
@@ -45,10 +44,10 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(MissingTensorsThrowException, MissingTensorsFixture)
+TEST_CASE_FIXTURE(MissingTensorsFixture, "MissingTensorsThrowException")
 {
     // this throws because it cannot do the input output tensor connections
-    BOOST_CHECK_THROW(Setup(), armnn::ParseException);
+    CHECK_THROWS_AS(Setup(), armnn::ParseException);
 }
 
 struct InvalidTensorsFixture : public ParserFlatbuffersFixture
@@ -78,11 +77,11 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(InvalidTensorsThrowException, InvalidTensorsFixture)
+TEST_CASE_FIXTURE(InvalidTensorsFixture, "InvalidTensorsThrowException")
 {
     // Tensor numDimensions must be less than or equal to MaxNumOfTensorDimensions
     static_assert(armnn::MaxNumOfTensorDimensions == 5, "Please update InvalidTensorsFixture");
-    BOOST_CHECK_THROW(Setup(), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(Setup(), armnn::InvalidArgumentException);
 }
 
 struct ValidTensorsFixture : public ParserFlatbuffersFixture
@@ -128,22 +127,22 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(GetValidInputOutputTensorNames, ValidTensorsFixture)
+TEST_CASE_FIXTURE(ValidTensorsFixture, "GetValidInputOutputTensorNames")
 {
     Setup();
-    BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0).size(), 1u);
-    BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
-    BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0)[0], "In");
-    BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
+    CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0).size(), 1u);
+    CHECK_EQ(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
+    CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0)[0], "In");
+    CHECK_EQ(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
 }
 
-BOOST_FIXTURE_TEST_CASE(ThrowIfSubgraphIdInvalidForInOutNames, ValidTensorsFixture)
+TEST_CASE_FIXTURE(ValidTensorsFixture, "ThrowIfSubgraphIdInvalidForInOutNames")
 {
     Setup();
 
     // these throw because of the invalid subgraph id
-    BOOST_CHECK_THROW(m_Parser->GetSubgraphInputTensorNames(1), armnn::ParseException);
-    BOOST_CHECK_THROW(m_Parser->GetSubgraphOutputTensorNames(1), armnn::ParseException);
+    CHECK_THROWS_AS(m_Parser->GetSubgraphInputTensorNames(1), armnn::ParseException);
+    CHECK_THROWS_AS(m_Parser->GetSubgraphOutputTensorNames(1), armnn::ParseException);
 }
 
 struct Rank0TensorFixture : public ParserFlatbuffersFixture
@@ -185,14 +184,14 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(Rank0Tensor, Rank0TensorFixture)
+TEST_CASE_FIXTURE(Rank0TensorFixture, "Rank0Tensor")
 {
     Setup();
-    BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0).size(), 2u);
-    BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
-    BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0)[0], "In0");
-    BOOST_CHECK_EQUAL(m_Parser->GetSubgraphInputTensorNames(0)[1], "In1");
-    BOOST_CHECK_EQUAL(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
+    CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0).size(), 2u);
+    CHECK_EQ(m_Parser->GetSubgraphOutputTensorNames(0).size(), 1u);
+    CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0)[0], "In0");
+    CHECK_EQ(m_Parser->GetSubgraphInputTensorNames(0)[1], "In1");
+    CHECK_EQ(m_Parser->GetSubgraphOutputTensorNames(0)[0], "Out");
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/L2Normalization.cpp b/src/armnnTfLiteParser/test/L2Normalization.cpp
index 0dd5eef..f4eeaac 100644
--- a/src/armnnTfLiteParser/test/L2Normalization.cpp
+++ b/src/armnnTfLiteParser/test/L2Normalization.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_L2Normalization")
+{
 struct L2NormalizationFixture : public ParserFlatbuffersFixture
 {
     explicit L2NormalizationFixture(const std::string & inputOutputShape)
@@ -82,7 +81,7 @@
     L2NormalizationFixture4D() : L2NormalizationFixture("[ 1, 1, 4, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseL2Normalization4D, L2NormalizationFixture4D)
+TEST_CASE_FIXTURE(L2NormalizationFixture4D, "ParseL2Normalization4D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -113,7 +112,7 @@
     L2NormalizationSimpleFixture4D() : L2NormalizationFixture("[ 1, 1, 1, 4 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseL2NormalizationEps4D, L2NormalizationSimpleFixture4D)
+TEST_CASE_FIXTURE(L2NormalizationSimpleFixture4D, "ParseL2NormalizationEps4D")
 {
       RunTest<4, armnn::DataType::Float32>(
       0,
@@ -125,4 +124,4 @@
                           0.00000004f / CalcL2Norm({ 0.00000001f, 0.00000002f, 0.00000003f, 0.00000004f }) }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/LeakyRelu.cpp b/src/armnnTfLiteParser/test/LeakyRelu.cpp
index 471c014..20f95ab 100644
--- a/src/armnnTfLiteParser/test/LeakyRelu.cpp
+++ b/src/armnnTfLiteParser/test/LeakyRelu.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_LeakyRelu")
+{
 struct LeakyReluFixture : public ParserFlatbuffersFixture
 {
     explicit LeakyReluFixture()
@@ -69,11 +68,11 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseLeakyRelu, LeakyReluFixture)
+TEST_CASE_FIXTURE(LeakyReluFixture, "ParseLeakyRelu")
 {
     RunTest<2, armnn::DataType::Float32>(0,
                                          {{ "inputTensor",  { -0.1f, -0.2f, -0.3f, -0.4f, 0.1f, 0.2f, 0.3f }}},
                                          {{ "outputTensor", { -0.001f, -0.002f, -0.003f, -0.004f, 0.1f, 0.2f, 0.3f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/LoadModel.cpp b/src/armnnTfLiteParser/test/LoadModel.cpp
index 1afb5f1..e09de68 100644
--- a/src/armnnTfLiteParser/test/LoadModel.cpp
+++ b/src/armnnTfLiteParser/test/LoadModel.cpp
@@ -2,7 +2,7 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
@@ -13,8 +13,8 @@
 using SubgraphPtr = TfLiteParserImpl::SubgraphPtr;
 using OperatorPtr = TfLiteParserImpl::OperatorPtr;
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_LoadModel")
+{
 struct LoadModelFixture : public ParserFlatbuffersFixture
 {
     explicit LoadModelFixture()
@@ -137,53 +137,53 @@
                     const std::vector<tflite::BuiltinOperator>& opcodes,
                     size_t subgraphs, const std::string desc, size_t buffers)
     {
-        BOOST_CHECK(model);
-        BOOST_CHECK_EQUAL(version, model->version);
-        BOOST_CHECK_EQUAL(opcodeSize, model->operator_codes.size());
+        CHECK(model);
+        CHECK_EQ(version, model->version);
+        CHECK_EQ(opcodeSize, model->operator_codes.size());
         CheckBuiltinOperators(opcodes, model->operator_codes);
-        BOOST_CHECK_EQUAL(subgraphs, model->subgraphs.size());
-        BOOST_CHECK_EQUAL(desc, model->description);
-        BOOST_CHECK_EQUAL(buffers, model->buffers.size());
+        CHECK_EQ(subgraphs, model->subgraphs.size());
+        CHECK_EQ(desc, model->description);
+        CHECK_EQ(buffers, model->buffers.size());
     }
 
     void CheckBuiltinOperators(const std::vector<tflite::BuiltinOperator>& expectedOperators,
                                const std::vector<std::unique_ptr<tflite::OperatorCodeT>>& result)
     {
-        BOOST_CHECK_EQUAL(expectedOperators.size(), result.size());
+        CHECK_EQ(expectedOperators.size(), result.size());
         for (size_t i = 0; i < expectedOperators.size(); i++)
         {
-            BOOST_CHECK_EQUAL(expectedOperators[i], result[i]->builtin_code);
+            CHECK_EQ(expectedOperators[i], result[i]->builtin_code);
         }
     }
 
     void CheckSubgraph(const SubgraphPtr& subgraph, size_t tensors, const std::vector<int32_t>& inputs,
                        const std::vector<int32_t>& outputs, size_t operators, const std::string& name)
     {
-        BOOST_CHECK(subgraph);
-        BOOST_CHECK_EQUAL(tensors, subgraph->tensors.size());
-        BOOST_CHECK_EQUAL_COLLECTIONS(inputs.begin(), inputs.end(), subgraph->inputs.begin(), subgraph->inputs.end());
-        BOOST_CHECK_EQUAL_COLLECTIONS(outputs.begin(), outputs.end(),
-                                      subgraph->outputs.begin(), subgraph->outputs.end());
-        BOOST_CHECK_EQUAL(operators, subgraph->operators.size());
-        BOOST_CHECK_EQUAL(name, subgraph->name);
+        CHECK(subgraph);
+        CHECK_EQ(tensors, subgraph->tensors.size());
+        CHECK(std::equal(inputs.begin(), inputs.end(), subgraph->inputs.begin(), subgraph->inputs.end()));
+        CHECK(std::equal(outputs.begin(), outputs.end(),
+                                      subgraph->outputs.begin(), subgraph->outputs.end()));
+        CHECK_EQ(operators, subgraph->operators.size());
+        CHECK_EQ(name, subgraph->name);
     }
 
     void CheckOperator(const OperatorPtr& operatorPtr, uint32_t opcode,  const std::vector<int32_t>& inputs,
                        const std::vector<int32_t>& outputs, tflite::BuiltinOptions optionType,
                        tflite::CustomOptionsFormat custom_options_format)
     {
-        BOOST_CHECK(operatorPtr);
-        BOOST_CHECK_EQUAL(opcode, operatorPtr->opcode_index);
-        BOOST_CHECK_EQUAL_COLLECTIONS(inputs.begin(), inputs.end(),
-                                      operatorPtr->inputs.begin(), operatorPtr->inputs.end());
-        BOOST_CHECK_EQUAL_COLLECTIONS(outputs.begin(), outputs.end(),
-                                      operatorPtr->outputs.begin(), operatorPtr->outputs.end());
-        BOOST_CHECK_EQUAL(optionType, operatorPtr->builtin_options.type);
-        BOOST_CHECK_EQUAL(custom_options_format, operatorPtr->custom_options_format);
+        CHECK(operatorPtr);
+        CHECK_EQ(opcode, operatorPtr->opcode_index);
+        CHECK(std::equal(inputs.begin(), inputs.end(),
+                                      operatorPtr->inputs.begin(), operatorPtr->inputs.end()));
+        CHECK(std::equal(outputs.begin(), outputs.end(),
+                                      operatorPtr->outputs.begin(), operatorPtr->outputs.end()));
+        CHECK_EQ(optionType, operatorPtr->builtin_options.type);
+        CHECK_EQ(custom_options_format, operatorPtr->custom_options_format);
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(LoadModelFromBinary, LoadModelFixture)
+TEST_CASE_FIXTURE(LoadModelFixture, "LoadModelFromBinary")
 {
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromBinary(m_GraphBinary.data(),
                                                                              m_GraphBinary.size());
@@ -197,14 +197,14 @@
                   tflite::CustomOptionsFormat_FLEXBUFFERS);
 }
 
-BOOST_FIXTURE_TEST_CASE(LoadModelFromFile, LoadModelFixture)
+TEST_CASE_FIXTURE(LoadModelFixture, "LoadModelFromFile")
 {
     using namespace fs;
     fs::path fname = armnnUtils::Filesystem::NamedTempFile("Armnn-tfLite-LoadModelFromFile-TempFile.csv");
     bool saved = flatbuffers::SaveFile(fname.c_str(),
                                        reinterpret_cast<char *>(m_GraphBinary.data()),
                                        m_GraphBinary.size(), true);
-    BOOST_CHECK_MESSAGE(saved, "Cannot save test file");
+    CHECK_MESSAGE(saved, "Cannot save test file");
 
     TfLiteParserImpl::ModelPtr model = TfLiteParserImpl::LoadModelFromFile(fname.c_str());
     CheckModel(model, 3, 2, { tflite::BuiltinOperator_AVERAGE_POOL_2D, tflite::BuiltinOperator_CONV_2D },
@@ -218,26 +218,26 @@
     remove(fname);
 }
 
-BOOST_AUTO_TEST_CASE(LoadNullBinary)
+TEST_CASE("LoadNullBinary")
 {
-    BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(TfLiteParserImpl::LoadModelFromBinary(nullptr, 0), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(LoadInvalidBinary)
+TEST_CASE("LoadInvalidBinary")
 {
     std::string testData = "invalid data";
-    BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
+    CHECK_THROWS_AS(TfLiteParserImpl::LoadModelFromBinary(reinterpret_cast<const uint8_t*>(&testData),
                                                         testData.length()), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_CASE(LoadFileNotFound)
+TEST_CASE("LoadFileNotFound")
 {
-    BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
+    CHECK_THROWS_AS(TfLiteParserImpl::LoadModelFromFile("invalidfile.tflite"), armnn::FileNotFoundException);
 }
 
-BOOST_AUTO_TEST_CASE(LoadNullPtrFile)
+TEST_CASE("LoadNullPtrFile")
 {
-    BOOST_CHECK_THROW(TfLiteParserImpl::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(TfLiteParserImpl::LoadModelFromFile(nullptr), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp b/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
index 89a6640..a6e9c88 100644
--- a/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
+++ b/src/armnnTfLiteParser/test/LoadScopeDynamicTensor.cpp
@@ -8,10 +8,8 @@
 
 #include <string>
 
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_LoadScopeDynamicTensor")
+{
 struct LoadScopeDynamicTensorFixture : public ParserFlatbuffersFixture
 {
     explicit LoadScopeDynamicTensorFixture(const std::string& shape0,
@@ -144,7 +142,7 @@
     LoadScopeDynamicTensor2Fixture() : LoadScopeDynamicTensorFixture("[ 1, 3, 3, 2 ]", "[ ]", "[ 1, 1, 1, 2 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor0, LoadScopeDynamicTensor0Fixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensor0Fixture, "LoadScopeDynamicTensor0")
 {
     RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
         0,
@@ -153,7 +151,7 @@
         true);
 }
 
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor1, LoadScopeDynamicTensor1Fixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensor1Fixture, "LoadScopeDynamicTensor1")
 {
     RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
         0,
@@ -162,7 +160,7 @@
         true);
 }
 
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensor2, LoadScopeDynamicTensor2Fixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensor2Fixture, "LoadScopeDynamicTensor2")
 {
   RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
         0,
@@ -340,7 +338,7 @@
                                                                                               "[ 1, 2, 3, 2 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting3D, LoadScopeDynamicTensorBroadcasting3DFixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensorBroadcasting3DFixture, "LoadScopeDynamicTensorBroadcasting3D")
 {
     RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
         0,
@@ -352,7 +350,7 @@
         true);
 }
 
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting2D, LoadScopeDynamicTensorBroadcasting2DFixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensorBroadcasting2DFixture, "LoadScopeDynamicTensorBroadcasting2D")
 {
     RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
         0,
@@ -364,7 +362,7 @@
         true);
 }
 
-BOOST_FIXTURE_TEST_CASE(LoadScopeDynamicTensorBroadcasting1D, LoadScopeDynamicTensorBroadcasting1DFixture)
+TEST_CASE_FIXTURE(LoadScopeDynamicTensorBroadcasting1DFixture, "LoadScopeDynamicTensorBroadcasting1D")
 {
     RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>(
         0,
@@ -376,4 +374,4 @@
         true);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/MaxPool2D.cpp b/src/armnnTfLiteParser/test/MaxPool2D.cpp
index 8cbef97..bcafac6 100644
--- a/src/armnnTfLiteParser/test/MaxPool2D.cpp
+++ b/src/armnnTfLiteParser/test/MaxPool2D.cpp
@@ -2,12 +2,12 @@
 // Copyright © 2017 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
-#include <boost/test/unit_test.hpp>
+
 #include "armnnTfLiteParser/ITfLiteParser.hpp"
 #include "ParserFlatbuffersFixture.hpp"
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_MaxPool2D")
+{
 struct MaxPool2DFixture : public ParserFlatbuffersFixture
 {
     explicit MaxPool2DFixture(std::string inputdim, std::string outputdim, std::string dataType)
@@ -96,25 +96,25 @@
     MaxPoolLiteFixtureUint2DOutput() : MaxPool2DFixture("[ 1, 4, 4, 1 ]", "[ 1, 2, 2, 1 ]", "UINT8") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint1DOutput, MaxPoolLiteFixtureUint1DOutput)
+TEST_CASE_FIXTURE(MaxPoolLiteFixtureUint1DOutput, "MaxPoolLiteUint1DOutput")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 });
 }
 
-BOOST_FIXTURE_TEST_CASE(MaxPoolLiteFloat1DOutput, MaxPoolLiteFixtureFloat1DOutput)
+TEST_CASE_FIXTURE(MaxPoolLiteFixtureFloat1DOutput, "MaxPoolLiteFloat1DOutput")
 {
     RunTest<4, armnn::DataType::Float32>(0, { 2.0f, 3.0f, 5.0f, 2.0f },  { 5.0f });
 }
 
-BOOST_FIXTURE_TEST_CASE(MaxPoolLiteUint2DOutput, MaxPoolLiteFixtureUint2DOutput)
+TEST_CASE_FIXTURE(MaxPoolLiteFixtureUint2DOutput, "MaxPoolLiteUint2DOutput")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0, { 1, 2, 2, 3, 5, 6, 7, 8, 3, 2, 1, 0, 1, 2, 3, 4 }, { 6, 8, 3, 4 });
 }
 
-BOOST_FIXTURE_TEST_CASE(MaxPoolIncorrectDataTypeError, MaxPoolLiteFixtureFloat1DOutput)
+TEST_CASE_FIXTURE(MaxPoolLiteFixtureFloat1DOutput, "MaxPoolIncorrectDataTypeError")
 {
-    BOOST_CHECK_THROW((RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
+    CHECK_THROWS_AS((RunTest<4, armnn::DataType::QAsymmU8>(0, { 2, 3, 5, 2 }, { 5 })), armnn::Exception);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Maximum.cpp b/src/armnnTfLiteParser/test/Maximum.cpp
index 56a1ecf..caf1c70 100644
--- a/src/armnnTfLiteParser/test/Maximum.cpp
+++ b/src/armnnTfLiteParser/test/Maximum.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Maximum")
+{
 struct MaximumFixture : public ParserFlatbuffersFixture
 {
     explicit MaximumFixture(const std::string & inputShape1,
@@ -90,7 +89,7 @@
                                           "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMaximum4D4D, MaximumFixture4D4D)
+TEST_CASE_FIXTURE(MaximumFixture4D4D, "ParseMaximum4D4D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -115,7 +114,7 @@
                                                    "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast4D4D, MaximumBroadcastFixture4D4D)
+TEST_CASE_FIXTURE(MaximumBroadcastFixture4D4D, "ParseMaximumBroadcast4D4D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -135,7 +134,7 @@
                                                    "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast4D1D, MaximumBroadcastFixture4D1D)
+TEST_CASE_FIXTURE(MaximumBroadcastFixture4D1D, "ParseMaximumBroadcast4D1D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -157,7 +156,7 @@
                                                    "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMaximumBroadcast1D4D, MaximumBroadcastFixture1D4D)
+TEST_CASE_FIXTURE(MaximumBroadcastFixture1D4D, "ParseMaximumBroadcast1D4D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -172,4 +171,4 @@
                           9.0f, 10.0f, 11.0f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Mean.cpp b/src/armnnTfLiteParser/test/Mean.cpp
index 3f0fdf1..9351182 100644
--- a/src/armnnTfLiteParser/test/Mean.cpp
+++ b/src/armnnTfLiteParser/test/Mean.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Mean")
+{
 struct MeanNoReduceFixture : public ParserFlatbuffersFixture
 {
     explicit MeanNoReduceFixture(const std::string & inputShape,
@@ -89,10 +88,10 @@
     SimpleMeanNoReduceFixture() : MeanNoReduceFixture("[ 2, 2 ]", "[ 1, 1 ]", "[ 0 ]", "[ ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMeanNoReduce, SimpleMeanNoReduceFixture)
+TEST_CASE_FIXTURE(SimpleMeanNoReduceFixture, "ParseMeanNoReduce")
 {
     RunTest<2, armnn::DataType::Float32>(0, {{ "inputTensor", { 1.0f, 1.0f, 2.0f, 2.0f } } },
                                             {{ "outputTensor", { 1.5f } } });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Minimum.cpp b/src/armnnTfLiteParser/test/Minimum.cpp
index 8c6db68..7aec638 100644
--- a/src/armnnTfLiteParser/test/Minimum.cpp
+++ b/src/armnnTfLiteParser/test/Minimum.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Minimum")
+{
 struct MinimumFixture : public ParserFlatbuffersFixture
 {
     explicit MinimumFixture(const std::string & inputShape1,
@@ -90,7 +89,7 @@
                                         "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMinimum4D, MinimumFixture4D)
+TEST_CASE_FIXTURE(MinimumFixture4D, "ParseMinimum4D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -115,7 +114,7 @@
                                                  "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMinimumBroadcast4D, MinimumBroadcastFixture4D)
+TEST_CASE_FIXTURE(MinimumBroadcastFixture4D, "ParseMinimumBroadcast4D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -136,7 +135,7 @@
                                                    "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMinimumBroadcast4D1D, MinimumBroadcastFixture4D1D)
+TEST_CASE_FIXTURE(MinimumBroadcastFixture4D1D, "ParseMinimumBroadcast4D1D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -158,7 +157,7 @@
                                                    "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMinimumBroadcast1D4D, MinimumBroadcastFixture1D4D)
+TEST_CASE_FIXTURE(MinimumBroadcastFixture1D4D, "ParseMinimumBroadcast1D4D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -242,7 +241,7 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMinimumBroadcast2D0D, MinimumBroadcastFixture2D0D)
+TEST_CASE_FIXTURE(MinimumBroadcastFixture2D0D, "ParseMinimumBroadcast2D0D")
 {
     RunTest<2, armnn::DataType::Float32>(
             0,
@@ -250,4 +249,4 @@
             {{"output", { 1.0f, 2.0f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Multiplication.cpp b/src/armnnTfLiteParser/test/Multiplication.cpp
index 329649a..6075627 100644
--- a/src/armnnTfLiteParser/test/Multiplication.cpp
+++ b/src/armnnTfLiteParser/test/Multiplication.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Multiplication")
+{
 struct MultiplicationFixture : public ParserFlatbuffersFixture
 {
     explicit MultiplicationFixture(const std::string & inputShape1,
@@ -92,7 +91,7 @@
     SimpleMultiplicationFixture() : MultiplicationFixture("[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMultiplication, SimpleMultiplicationFixture)
+TEST_CASE_FIXTURE(SimpleMultiplicationFixture, "ParseMultiplication")
 {
     using armnn::DataType;
     RunTest<4, DataType::Float32>(0, {{ "inputTensor1", { 0.0f,  1.0f,  2.0f,
@@ -114,7 +113,7 @@
     MultiplicationBroadcastFixture4D1D() : MultiplicationFixture("[ 1, 2, 2, 3 ]", "[ 1 ]", "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMultiplicationBroadcast4D1D, MultiplicationBroadcastFixture4D1D)
+TEST_CASE_FIXTURE(MultiplicationBroadcastFixture4D1D, "ParseMultiplicationBroadcast4D1D")
 {
     using armnn::DataType;
     RunTest<4, DataType::Float32>(0, {{ "inputTensor1", { 0.0f,  1.0f,  2.0f,
@@ -133,7 +132,7 @@
     MultiplicationBroadcastFixture1D4D() : MultiplicationFixture("[ 1 ]", "[ 1, 2, 2, 3 ]", "[ 1, 2, 2, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseMultiplicationBroadcast1D4D, MultiplicationBroadcastFixture1D4D)
+TEST_CASE_FIXTURE(MultiplicationBroadcastFixture1D4D, "ParseMultiplicationBroadcast1D4D")
 {
     using armnn::DataType;
     RunTest<4, DataType::Float32>(0, {{ "inputTensor1", { 3.0f } },
@@ -147,4 +146,4 @@
                                                          27.0f, 30.0f, 33.0f } } });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
index e616158..395038d 100644
--- a/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
+++ b/src/armnnTfLiteParser/test/OutputShapeOfSqueeze.cpp
@@ -3,11 +3,15 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "../TfLiteParser.hpp"
 #include <iostream>
 #include <string>
 
+#include <doctest/doctest.h>
+
+TEST_SUITE("TensorflowLiteParser_OutputShapeOfSqueeze")
+{
+
 struct TfLiteParserFixture
 {
 
@@ -19,41 +23,38 @@
 
 };
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser);
-
-
-BOOST_FIXTURE_TEST_CASE( EmptySqueezeDims_OutputWithAllDimensionsSqueezed, TfLiteParserFixture )
+TEST_CASE_FIXTURE(TfLiteParserFixture, "EmptySqueezeDims_OutputWithAllDimensionsSqueezed")
 {
 
     std::vector<uint32_t> squeezeDims = {  };
 
     armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
     armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
-    BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
-    BOOST_TEST(outputTensorInfo.GetNumDimensions() == 2);
-    BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 2, 2 })));
+    CHECK(outputTensorInfo.GetNumElements() == 4);
+    CHECK(outputTensorInfo.GetNumDimensions() == 2);
+    CHECK((outputTensorInfo.GetShape() == armnn::TensorShape({ 2, 2 })));
 };
 
-BOOST_FIXTURE_TEST_CASE( SqueezeDimsNotIncludingSizeOneDimensions_NoDimensionsSqueezedInOutput, TfLiteParserFixture )
+TEST_CASE_FIXTURE(TfLiteParserFixture, "SqueezeDimsNotIncludingSizeOneDimensions_NoDimensionsSqueezedInOutput")
 {
     std::vector<uint32_t> squeezeDims = { 1, 2 };
 
     armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
     armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
-    BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
-    BOOST_TEST(outputTensorInfo.GetNumDimensions() == 4);
-    BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
+    CHECK(outputTensorInfo.GetNumElements() == 4);
+    CHECK(outputTensorInfo.GetNumDimensions() == 4);
+    CHECK((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2, 1 })));
 };
 
-BOOST_FIXTURE_TEST_CASE( SqueezeDimsRangePartial_OutputWithDimensionsWithinRangeSqueezed, TfLiteParserFixture )
+TEST_CASE_FIXTURE(TfLiteParserFixture, "SqueezeDimsRangePartial_OutputWithDimensionsWithinRangeSqueezed")
 {
     std::vector<uint32_t> squeezeDims = { 1, 3 };
 
     armnn::TensorInfo inputTensorInfo = armnn::TensorInfo(4, m_InputShape, armnn::DataType::Float32);
     armnn::TensorInfo outputTensorInfo = m_Parser.OutputShapeOfSqueeze(squeezeDims, inputTensorInfo);
-    BOOST_TEST(outputTensorInfo.GetNumElements() == 4);
-    BOOST_TEST(outputTensorInfo.GetNumDimensions() == 3);
-    BOOST_TEST((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2 })));
+    CHECK(outputTensorInfo.GetNumElements() == 4);
+    CHECK(outputTensorInfo.GetNumDimensions() == 3);
+    CHECK((outputTensorInfo.GetShape() == armnn::TensorShape({ 1, 2, 2 })));
 };
 
-BOOST_AUTO_TEST_SUITE_END();
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Pack.cpp b/src/armnnTfLiteParser/test/Pack.cpp
index 011312f..4aff8fe 100644
--- a/src/armnnTfLiteParser/test/Pack.cpp
+++ b/src/armnnTfLiteParser/test/Pack.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Pack")
+{
 struct PackFixture : public ParserFlatbuffersFixture
 {
     explicit PackFixture(const std::string & inputShape,
@@ -103,7 +102,7 @@
                                       "3") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParsePack, SimplePackFixture)
+TEST_CASE_FIXTURE(SimplePackFixture, "ParsePack")
 {
     RunTest<4, armnn::DataType::Float32>(
     0,
@@ -150,4 +149,4 @@
                          18, 36 } } });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Pad.cpp b/src/armnnTfLiteParser/test/Pad.cpp
index aab1536..1ac0627 100644
--- a/src/armnnTfLiteParser/test/Pad.cpp
+++ b/src/armnnTfLiteParser/test/Pad.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Pad")
+{
 struct PadFixture : public ParserFlatbuffersFixture
 {
     explicit PadFixture(const std::string& inputShape,
@@ -93,7 +92,7 @@
                                     "[  1,0,0,0, 1,0,0,0, 2,0,0,0, 2,0,0,0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParsePad, SimplePadFixture)
+TEST_CASE_FIXTURE(SimplePadFixture, "ParsePad")
 {
     RunTest<2, armnn::DataType::Float32>
         (0,
@@ -111,7 +110,7 @@
                                   "UINT8", "-2.0", "3") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParsePadUint8, Uint8PadFixture)
+TEST_CASE_FIXTURE(Uint8PadFixture, "ParsePadUint8")
 {
     RunTest<2, armnn::DataType::QAsymmU8>
         (0,
@@ -129,7 +128,7 @@
                                     "INT8", "-2.0", "3") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParsePadInt8, Int8PadFixture)
+TEST_CASE_FIXTURE(Int8PadFixture, "ParsePadInt8")
 {
     RunTest<2, armnn::DataType::QAsymmS8>
         (0,
@@ -140,4 +139,4 @@
                               3, 3, 3, 3, 3, 3, 3 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
index 196af19..b0bfdfc 100644
--- a/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
+++ b/src/armnnTfLiteParser/test/ParserFlatbuffersFixture.hpp
@@ -20,6 +20,7 @@
 #include <test/TensorHelpers.hpp>
 
 #include <fmt/format.h>
+#include <doctest/doctest.h>
 
 #include "flatbuffers/idl.h"
 #include "flatbuffers/util.h"
@@ -207,22 +208,22 @@
                       const std::vector<float>& min, const std::vector<float>& max,
                       const std::vector<float>& scale, const std::vector<int64_t>& zeroPoint)
     {
-        BOOST_CHECK(tensors);
-        BOOST_CHECK_EQUAL(shapeSize, tensors->shape.size());
-        BOOST_CHECK_EQUAL_COLLECTIONS(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end());
-        BOOST_CHECK_EQUAL(tensorType, tensors->type);
-        BOOST_CHECK_EQUAL(buffer, tensors->buffer);
-        BOOST_CHECK_EQUAL(name, tensors->name);
-        BOOST_CHECK(tensors->quantization);
-        BOOST_CHECK_EQUAL_COLLECTIONS(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
-                                      tensors->quantization.get()->min.end());
-        BOOST_CHECK_EQUAL_COLLECTIONS(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
-                                      tensors->quantization.get()->max.end());
-        BOOST_CHECK_EQUAL_COLLECTIONS(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
-                                      tensors->quantization.get()->scale.end());
-        BOOST_CHECK_EQUAL_COLLECTIONS(zeroPoint.begin(), zeroPoint.end(),
+        CHECK(tensors);
+        CHECK_EQ(shapeSize, tensors->shape.size());
+        CHECK(std::equal(shape.begin(), shape.end(), tensors->shape.begin(), tensors->shape.end()));
+        CHECK_EQ(tensorType, tensors->type);
+        CHECK_EQ(buffer, tensors->buffer);
+        CHECK_EQ(name, tensors->name);
+        CHECK(tensors->quantization);
+        CHECK(std::equal(min.begin(), min.end(), tensors->quantization.get()->min.begin(),
+                                      tensors->quantization.get()->min.end()));
+        CHECK(std::equal(max.begin(), max.end(), tensors->quantization.get()->max.begin(),
+                                      tensors->quantization.get()->max.end()));
+        CHECK(std::equal(scale.begin(), scale.end(), tensors->quantization.get()->scale.begin(),
+                                      tensors->quantization.get()->scale.end()));
+        CHECK(std::equal(zeroPoint.begin(), zeroPoint.end(),
                                       tensors->quantization.get()->zero_point.begin(),
-                                      tensors->quantization.get()->zero_point.end());
+                                      tensors->quantization.get()->zero_point.end()));
     }
 
 private:
@@ -302,7 +303,7 @@
 
         // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
         auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
-        BOOST_CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
+        CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
             fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
                         NumOutputDimensions,
                         outputNumDimensions,
@@ -324,7 +325,7 @@
         auto result = CompareTensors(outputExpected, outputStorage[it.first],
                                      bindingInfo.second.GetShape(), bindingInfo.second.GetShape(),
                                      false, isDynamic);
-        BOOST_TEST(result.m_Result, result.m_Message.str());
+        CHECK_MESSAGE(result.m_Result, result.m_Message.str());
     }
 }
 
@@ -368,7 +369,7 @@
         {
             for (unsigned int i = 0; i < out.size(); ++i)
             {
-                BOOST_TEST(it.second[i] == out[i], boost::test_tools::tolerance(0.000001f));
+                CHECK(doctest::Approx(it.second[i]).epsilon(0.000001f) == out[i]);
             }
         }
     }
@@ -404,7 +405,7 @@
 
         // Check that output tensors have correct number of dimensions (NumOutputDimensions specified in test)
         auto outputNumDimensions = outputTensorInfo.GetNumDimensions();
-        BOOST_CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
+        CHECK_MESSAGE((outputNumDimensions == NumOutputDimensions),
             fmt::format("Number of dimensions expected {}, but got {} for output layer {}",
                         NumOutputDimensions,
                         outputNumDimensions,
@@ -425,6 +426,6 @@
         auto outputExpected = it.second;
         auto result = CompareTensors(outputExpected, outputStorage[it.first],
                                      bindingInfo.second.GetShape(), bindingInfo.second.GetShape(), false);
-        BOOST_TEST(result.m_Result, result.m_Message.str());
+        CHECK_MESSAGE(result.m_Result, result.m_Message.str());
     }
 }
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Prelu.cpp b/src/armnnTfLiteParser/test/Prelu.cpp
index 83c4088..6c70ff6 100644
--- a/src/armnnTfLiteParser/test/Prelu.cpp
+++ b/src/armnnTfLiteParser/test/Prelu.cpp
@@ -3,14 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Prelu")
+{
 struct PreluFixture : public ParserFlatbuffersFixture
 {
     explicit PreluFixture(const std::string& inputShape,
@@ -356,7 +356,7 @@
                                                "\"data\": [ 0, 0, 128, 62 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimplePrelu, SimplePreluFixture)
+TEST_CASE_FIXTURE(SimplePreluFixture, "SimplePrelu")
 {
   RunTest<2, armnn::DataType::Float32>(
       0,
@@ -364,7 +364,7 @@
       {{"output", { -3.5f, 2.f, 0.f, 1.f, -1.25f, 14.f }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(PreluConstAlpha, PreluConstAlphaFixture)
+TEST_CASE_FIXTURE(PreluConstAlphaFixture, "PreluConstAlpha")
 {
   RunTest<3, armnn::DataType::Float32>(
       0,
@@ -372,7 +372,7 @@
       {{"output", { -3.5f, 2.f, 0.f, 1.f, -1.25f, 14.f }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(PreluBroadcastAlpha, PreluBroadcastAlphaFixture)
+TEST_CASE_FIXTURE(PreluBroadcastAlphaFixture, "PreluBroadcastAlpha")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -380,7 +380,7 @@
       {{"output", { -3.5f, 2.f, 0.f, 1.f, -1.25f, 14.f }}});
 }
 
-BOOST_FIXTURE_TEST_CASE(PreluDynamicTensor, PreluDynamicTensorFixture)
+TEST_CASE_FIXTURE(PreluDynamicTensorFixture, "PreluDynamicTensor")
 {
   RunTest<2, armnn::DataType::Float32, armnn::DataType::Float32>(
       0,
@@ -389,7 +389,7 @@
       true);
 }
 
-BOOST_FIXTURE_TEST_CASE(PreluNetwork, PreluNetworkFixture)
+TEST_CASE_FIXTURE(PreluNetworkFixture, "PreluNetwork")
 {
   RunTest<3, armnn::DataType::Float32>(
       0,
@@ -397,4 +397,4 @@
       {{"output", { -21.f, 12.f, 0.f, 6.f, -7.5f, 84.f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Quantize.cpp b/src/armnnTfLiteParser/test/Quantize.cpp
index ca5e6d5..c7c936e 100644
--- a/src/armnnTfLiteParser/test/Quantize.cpp
+++ b/src/armnnTfLiteParser/test/Quantize.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Quantize")
+{
     struct QuantizeFixture : public ParserFlatbuffersFixture
     {
         explicit QuantizeFixture(const std::string & inputShape,
@@ -80,7 +79,7 @@
                                                          "UINT8") {}
     };
 
-    BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQAsymm8, SimpleQuantizeFixtureQAsymm8)
+    TEST_CASE_FIXTURE(SimpleQuantizeQAsymm8, SimpleQuantizeFixtureQAsymm8)
     {
         RunTest<2, armnn::DataType::Float32, armnn::DataType::QuantisedAsymm8>(
                 0,
@@ -95,7 +94,7 @@
                                                          "INT16") {}
     };
 
-    BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQsymm16, SimpleQuantizeFixtureQSymm16)
+    TEST_CASE_FIXTURE(SimpleQuantizeFixtureQSymm16, "SimpleQuantizeQsymm16")
     {
         RunTest<2, armnn::DataType::Float32, armnn::DataType::QuantisedSymm16>(
                 0,
@@ -110,7 +109,7 @@
                                                          "INT8") {}
     };
 
-    BOOST_FIXTURE_TEST_CASE(SimpleQuantizeQSymmS8, SimpleQuantizeFixtureQSymmS8)
+    TEST_CASE_FIXTURE(SimpleQuantizeFixtureQSymmS8, "SimpleQuantizeQSymmS8")
     {
         RunTest<2, armnn::DataType::Float32, armnn::DataType::QSymmS8>(
                 0,
@@ -118,4 +117,4 @@
                 {{"outputTensor", { 0,    1,    5,    127,    -128,   -1 }}});
     }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Reduce.cpp b/src/armnnTfLiteParser/test/Reduce.cpp
index c2a22f0..cde9d09 100644
--- a/src/armnnTfLiteParser/test/Reduce.cpp
+++ b/src/armnnTfLiteParser/test/Reduce.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Reduce")
+{
 struct ReduceMaxFixture : public ParserFlatbuffersFixture
 {
     explicit ReduceMaxFixture(const std::string& inputShape,
@@ -93,7 +92,7 @@
     SimpleReduceMaxFixture() : ReduceMaxFixture("[ 1, 1, 2, 3 ]", "[ 1, 1, 1, 3 ]", "[ 1 ]", "[  2,0,0,0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseReduceMax, SimpleReduceMaxFixture)
+TEST_CASE_FIXTURE(SimpleReduceMaxFixture, "ParseReduceMax")
 {
     RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>
         (0, {{ "inputTensor",  { 1001.0f, 11.0f,   1003.0f,
@@ -182,7 +181,7 @@
     SimpleReduceMinFixture() : ReduceMinFixture("[ 1, 1, 2, 3 ]", "[ 1, 1, 1, 3 ]", "[ 1 ]", "[ 2, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseReduceMin, SimpleReduceMinFixture)
+TEST_CASE_FIXTURE(SimpleReduceMinFixture, "ParseReduceMin")
 {
     RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>
         (0, {{ "inputTensor",  { 1001.0f, 11.0f,   1003.0f,
@@ -190,4 +189,4 @@
             {{ "outputTensor", { 10.0f, 11.0f, 12.0f } } });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Reshape.cpp b/src/armnnTfLiteParser/test/Reshape.cpp
index 025612f..0824a27 100644
--- a/src/armnnTfLiteParser/test/Reshape.cpp
+++ b/src/armnnTfLiteParser/test/Reshape.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Reshape")
+{
 struct ReshapeFixture : public ParserFlatbuffersFixture
 {
     explicit ReshapeFixture(const std::string& inputShape,
@@ -83,13 +82,13 @@
     ReshapeFixtureWithReshapeDims() : ReshapeFixture("[ 1, 9 ]", "[ 3, 3 ]", "[ 3, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDims, ReshapeFixtureWithReshapeDims)
+TEST_CASE_FIXTURE(ReshapeFixtureWithReshapeDims, "ParseReshapeWithReshapeDims")
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
     RunTest<2, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
                                                  { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({3,3})));
 }
 
@@ -98,13 +97,13 @@
     ReshapeFixtureWithReshapeDimsFlatten() : ReshapeFixture("[ 3, 3 ]", "[ 9 ]", "[ -1 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlatten, ReshapeFixtureWithReshapeDimsFlatten)
+TEST_CASE_FIXTURE(ReshapeFixtureWithReshapeDimsFlatten, "ParseReshapeWithReshapeDimsFlatten")
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
     RunTest<1, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 2, 3, 4, 5, 6, 7, 8, 9 },
                                                  { 1, 2, 3, 4, 5, 6, 7, 8, 9 });
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({9})));
 }
 
@@ -113,13 +112,13 @@
     ReshapeFixtureWithReshapeDimsFlattenTwoDims() : ReshapeFixture("[ 3, 2, 3 ]", "[ 2, 9 ]", "[ 2, -1 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenTwoDims, ReshapeFixtureWithReshapeDimsFlattenTwoDims)
+TEST_CASE_FIXTURE(ReshapeFixtureWithReshapeDimsFlattenTwoDims, "ParseReshapeWithReshapeDimsFlattenTwoDims")
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
     RunTest<2, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
                                                  { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({2,9})));
 }
 
@@ -128,13 +127,13 @@
     ReshapeFixtureWithReshapeDimsFlattenOneDim() : ReshapeFixture("[ 2, 9 ]", "[ 2, 3, 3 ]", "[ 2, -1, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseReshapeWithReshapeDimsFlattenOneDim, ReshapeFixtureWithReshapeDimsFlattenOneDim)
+TEST_CASE_FIXTURE(ReshapeFixtureWithReshapeDimsFlattenOneDim, "ParseReshapeWithReshapeDimsFlattenOneDim")
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
     RunTest<3, armnn::DataType::QAsymmU8>(0,
                                                  { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 },
                                                  { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 });
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({2,3,3})));
 }
 
@@ -145,7 +144,7 @@
                                                                          "[ 2, -1, 3 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(DynParseReshapeWithReshapeDimsFlattenOneDim, DynamicReshapeFixtureWithReshapeDimsFlattenOneDim)
+TEST_CASE_FIXTURE(DynamicReshapeFixtureWithReshapeDimsFlattenOneDim, "DynParseReshapeWithReshapeDimsFlattenOneDim")
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
      RunTest<3,
@@ -156,4 +155,4 @@
                                    true);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/ResizeBilinear.cpp b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
index 8af5612..dce9e1d 100644
--- a/src/armnnTfLiteParser/test/ResizeBilinear.cpp
+++ b/src/armnnTfLiteParser/test/ResizeBilinear.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_ResizeBilinear")
+{
 struct ResizeBilinearFixture : public ParserFlatbuffersFixture
 {
     explicit ResizeBilinearFixture(const std::string & inputShape,
@@ -98,7 +97,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseResizeBilinear, SimpleResizeBilinearFixture)
+TEST_CASE_FIXTURE(SimpleResizeBilinearFixture, "ParseResizeBilinear")
 {
     RunTest<4, armnn::DataType::Float32>(
                 0,
@@ -113,4 +112,4 @@
                 );
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp b/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
index 7add5f2..948f4fe 100644
--- a/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
+++ b/src/armnnTfLiteParser/test/ResizeNearestNeighbor.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_ResizeNearestNeighbor")
+{
 struct ResizeNearestNeighborFixture : public ParserFlatbuffersFixture
 {
     explicit ResizeNearestNeighborFixture(const std::string & inputShape,
@@ -98,7 +97,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseResizeNearestNeighbor, SimpleResizeNearestNeighborFixture)
+TEST_CASE_FIXTURE(SimpleResizeNearestNeighborFixture, "ParseResizeNearestNeighbor")
 {
     RunTest<4, armnn::DataType::Float32>(
                 0,
@@ -106,4 +105,4 @@
                 {{"OutputTensor", {  1.0f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Slice.cpp b/src/armnnTfLiteParser/test/Slice.cpp
index b94a983..80dff73 100644
--- a/src/armnnTfLiteParser/test/Slice.cpp
+++ b/src/armnnTfLiteParser/test/Slice.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Slice")
+{
 struct SliceFixture : public ParserFlatbuffersFixture
 {
     explicit SliceFixture(const std::string & inputShape,
@@ -124,14 +123,14 @@
                                            "[ 1, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SliceSingleDim, SliceFixtureSingleDim)
+TEST_CASE_FIXTURE(SliceFixtureSingleDim, "SliceSingleDim")
 {
     RunTest<3, armnn::DataType::Float32>(
       0,
       {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
       {{"outputTensor", { 3, 3, 3 }}});
 
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({1,1,3})));
 }
 
@@ -143,14 +142,14 @@
                                       "[ 1, 0, 0, 0, 2, 0, 0, 0, 3, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SliceD123, SliceFixtureD123)
+TEST_CASE_FIXTURE(SliceFixtureD123, "SliceD123")
 {
     RunTest<3, armnn::DataType::Float32>(
         0,
         {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
         {{"outputTensor", { 3, 3, 3, 4, 4, 4 }}});
 
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({1,2,3})));
 }
 
@@ -162,14 +161,14 @@
                                       "[ 2, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SliceD213, SliceFixtureD213)
+TEST_CASE_FIXTURE(SliceFixtureD213, "SliceD213")
 {
     RunTest<3, armnn::DataType::Float32>(
         0,
         {{"inputTensor", { 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6 }}},
         {{"outputTensor", { 3, 3, 3, 5, 5, 5 }}});
 
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({2,1,3})));
 }
 
@@ -181,7 +180,7 @@
                                                 "[ 2, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(DynamicSliceD213, DynamicSliceFixtureD213)
+TEST_CASE_FIXTURE(DynamicSliceFixtureD213, "DynamicSliceD213")
 {
     RunTest<3, armnn::DataType::Float32, armnn::DataType::Float32>(
         0,
@@ -190,4 +189,4 @@
         true);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/Softmax.cpp b/src/armnnTfLiteParser/test/Softmax.cpp
index c4d19f2..11a2a0d 100644
--- a/src/armnnTfLiteParser/test/Softmax.cpp
+++ b/src/armnnTfLiteParser/test/Softmax.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Softmax")
+{
 struct SoftmaxFixture : public ParserFlatbuffersFixture
 {
     explicit SoftmaxFixture()
@@ -69,9 +68,9 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseSoftmaxLite, SoftmaxFixture)
+TEST_CASE_FIXTURE(SoftmaxFixture, "ParseSoftmaxLite")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(0, { 0, 0, 100, 0, 0, 0, 0 }, { 0, 0, 255, 0, 0, 0, 0 });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/SpaceToBatchND.cpp b/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
index 6ff4f53..b99713c 100644
--- a/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
+++ b/src/armnnTfLiteParser/test/SpaceToBatchND.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_SpaceToBatchND")
+{
 struct SpaceToBatchNDFixture : public ParserFlatbuffersFixture
 {
     explicit SpaceToBatchNDFixture(const std::string & inputShape,
@@ -105,7 +104,7 @@
                                                               "[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdSimpleTest, SpaceToBatchNDFixtureSimpleTest)
+TEST_CASE_FIXTURE(SpaceToBatchNDFixtureSimpleTest, "SpaceToBatchNdSimpleTest")
 {
     RunTest<4, armnn::DataType::Float32>
         (0,
@@ -128,7 +127,7 @@
                                                                             "[ 0,0,0,0, 0,0,0,0, 0,0,0,0, 0,0,0,0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdMultipleInputBatchesTest, SpaceToBatchNDFixtureMultipleInputBatchesTest)
+TEST_CASE_FIXTURE(SpaceToBatchNDFixtureMultipleInputBatchesTest, "SpaceToBatchNdMultipleInputBatchesTest")
 {
     RunTest<4, armnn::DataType::Float32>
         (0,
@@ -150,7 +149,7 @@
                                                                "[ 1,0,0,0, 0,0,0,0, 2,0,0,0, 0,0,0,0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SpaceToBatchNdPaddingTest, SpaceToBatchNDFixturePaddingTest)
+TEST_CASE_FIXTURE(SpaceToBatchNDFixturePaddingTest, "SpaceToBatchNdPaddingTest")
 {
     RunTest<4, armnn::DataType::Float32>
         (0,
@@ -175,4 +174,4 @@
                                0.0f, 10.0f, }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Split.cpp b/src/armnnTfLiteParser/test/Split.cpp
index 5f23799..97f8f12 100644
--- a/src/armnnTfLiteParser/test/Split.cpp
+++ b/src/armnnTfLiteParser/test/Split.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Split")
+{
 struct SplitFixture : public ParserFlatbuffersFixture
 {
     explicit SplitFixture(const std::string& inputShape,
@@ -108,7 +107,7 @@
         {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitTwoFloat32, SimpleSplitFixtureFloat32)
+TEST_CASE_FIXTURE(SimpleSplitFixtureFloat32, "ParseAxisOneSplitTwoFloat32")
 {
 
     RunTest<4, armnn::DataType::Float32>(
@@ -126,7 +125,7 @@
         {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitTwoFloat32, SimpleSplitAxisThreeFixtureFloat32)
+TEST_CASE_FIXTURE(SimpleSplitAxisThreeFixtureFloat32, "ParseAxisThreeSplitTwoFloat32")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -143,7 +142,7 @@
         {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleSplit2DFloat32, SimpleSplit2DFixtureFloat32)
+TEST_CASE_FIXTURE(SimpleSplit2DFixtureFloat32, "SimpleSplit2DFloat32")
 {
     RunTest<2, armnn::DataType::Float32>(
         0,
@@ -159,7 +158,7 @@
         {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleSplit3DFloat32, SimpleSplit3DFixtureFloat32)
+TEST_CASE_FIXTURE(SimpleSplit3DFixtureFloat32, "SimpleSplit3DFloat32")
 {
     RunTest<3, armnn::DataType::Float32>(
         0,
@@ -176,7 +175,7 @@
         {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitTwoUint8, SimpleSplitFixtureUint8)
+TEST_CASE_FIXTURE(SimpleSplitFixtureUint8, "ParseAxisOneSplitTwoUint8")
 {
 
     RunTest<4, armnn::DataType::QAsymmU8>(
@@ -194,7 +193,7 @@
         {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitTwoUint8, SimpleSplitAxisThreeFixtureUint8)
+TEST_CASE_FIXTURE(SimpleSplitAxisThreeFixtureUint8, "ParseAxisThreeSplitTwoUint8")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -211,7 +210,7 @@
         {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleSplit2DUint8, SimpleSplit2DFixtureUint8)
+TEST_CASE_FIXTURE(SimpleSplit2DFixtureUint8, "SimpleSplit2DUint8")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
             0,
@@ -227,7 +226,7 @@
         {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleSplit3DUint8, SimpleSplit3DFixtureUint8)
+TEST_CASE_FIXTURE(SimpleSplit3DFixtureUint8, "SimpleSplit3DUint8")
 {
     RunTest<3, armnn::DataType::QAsymmU8>(
         0,
@@ -237,4 +236,4 @@
           {"outputTensor2", { 9, 10, 11, 12, 13, 14, 15, 16 } } } );
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/SplitV.cpp b/src/armnnTfLiteParser/test/SplitV.cpp
index 9541114..51b75fa 100644
--- a/src/armnnTfLiteParser/test/SplitV.cpp
+++ b/src/armnnTfLiteParser/test/SplitV.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser")
+{
 struct SplitVFixture : public ParserFlatbuffersFixture
 {
     explicit SplitVFixture(const std::string& inputShape,
@@ -126,7 +125,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseAxisOneSplitVTwo, SimpleSplitVAxisOneFixture)
+TEST_CASE_FIXTURE(SimpleSplitVAxisOneFixture, "ParseAxisOneSplitVTwo")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -148,7 +147,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseAxisTwoSplitVTwo, SimpleSplitVAxisTwoFixture)
+TEST_CASE_FIXTURE(SimpleSplitVAxisTwoFixture, "ParseAxisTwoSplitVTwo")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -170,7 +169,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseAxisThreeSplitVTwo, SimpleSplitVAxisThreeFixture)
+TEST_CASE_FIXTURE(SimpleSplitVAxisThreeFixture, "ParseAxisThreeSplitVTwo")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -192,7 +191,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseAxisFourSplitVTwo, SimpleSplitVAxisFourFixture)
+TEST_CASE_FIXTURE(SimpleSplitVAxisFourFixture, "ParseAxisFourSplitVTwo")
 {
     RunTest<4, armnn::DataType::Float32>(
         0,
@@ -206,4 +205,4 @@
           {"outputTensor2", { 4.0f, 8.0f, 12.0f, 16.0f, 20.0f, 24.0f, 28.0f, 32.0f } } } );
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Squeeze.cpp b/src/armnnTfLiteParser/test/Squeeze.cpp
index 86a1966..da870fd 100644
--- a/src/armnnTfLiteParser/test/Squeeze.cpp
+++ b/src/armnnTfLiteParser/test/Squeeze.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Squeeze")
+{
 struct SqueezeFixture : public ParserFlatbuffersFixture
 {
     explicit SqueezeFixture(const std::string& inputShape,
@@ -82,11 +81,11 @@
     SqueezeFixtureWithSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]", "[ 2, 2, 1 ]", "[ 0, 1, 2 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithSqueezeDims, SqueezeFixtureWithSqueezeDims)
+TEST_CASE_FIXTURE(SqueezeFixtureWithSqueezeDims, "ParseSqueezeWithSqueezeDims")
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
     RunTest<3, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
         == armnn::TensorShape({2,2,1})));
 
 }
@@ -96,11 +95,11 @@
     SqueezeFixtureWithoutSqueezeDims() : SqueezeFixture("[ 1, 2, 2, 1 ]", "[ 2, 2 ]", "") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeWithoutSqueezeDims, SqueezeFixtureWithoutSqueezeDims)
+TEST_CASE_FIXTURE(SqueezeFixtureWithoutSqueezeDims, "ParseSqueezeWithoutSqueezeDims")
 {
     SetupSingleInputSingleOutput("inputTensor", "outputTensor");
     RunTest<2, armnn::DataType::QAsymmU8>(0, { 1, 2, 3, 4 }, { 1, 2, 3, 4 });
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
         == armnn::TensorShape({2,2})));
 }
 
@@ -109,10 +108,10 @@
     SqueezeFixtureWithInvalidInput() : SqueezeFixture("[ 1, 2, 2, 1, 2, 2 ]", "[ 1, 2, 2, 1, 2 ]", "[ ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeInvalidInput, SqueezeFixtureWithInvalidInput)
+TEST_CASE_FIXTURE(SqueezeFixtureWithInvalidInput, "ParseSqueezeInvalidInput")
 {
     static_assert(armnn::MaxNumOfTensorDimensions == 5, "Please update SqueezeFixtureWithInvalidInput");
-    BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")),
+    CHECK_THROWS_AS((SetupSingleInputSingleOutput("inputTensor", "outputTensor")),
                       armnn::InvalidArgumentException);
 }
 
@@ -123,9 +122,9 @@
                                                                 "[ 1, 2, 2, 2, 2 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeInvalidSqueezeDims, SqueezeFixtureWithSqueezeDimsSizeInvalid)
+TEST_CASE_FIXTURE(SqueezeFixtureWithSqueezeDimsSizeInvalid, "ParseSqueezeInvalidSqueezeDims")
 {
-    BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
+    CHECK_THROWS_AS((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
 }
 
 
@@ -136,10 +135,10 @@
                                                              "[ -2 , 2 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseSqueezeNegativeSqueezeDims, SqueezeFixtureWithNegativeSqueezeDims)
+TEST_CASE_FIXTURE(SqueezeFixtureWithNegativeSqueezeDims, "ParseSqueezeNegativeSqueezeDims")
 {
-    BOOST_CHECK_THROW((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
+    CHECK_THROWS_AS((SetupSingleInputSingleOutput("inputTensor", "outputTensor")), armnn::ParseException);
 }
 
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/StridedSlice.cpp b/src/armnnTfLiteParser/test/StridedSlice.cpp
index 91427a6..2951b88 100644
--- a/src/armnnTfLiteParser/test/StridedSlice.cpp
+++ b/src/armnnTfLiteParser/test/StridedSlice.cpp
@@ -3,14 +3,13 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_StridedSlice")
+{
 struct StridedSliceFixture : public ParserFlatbuffersFixture
 {
     explicit StridedSliceFixture(const std::string & inputShape,
@@ -115,7 +114,7 @@
                                                  ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(StridedSlice4D, StridedSlice4DFixture)
+TEST_CASE_FIXTURE(StridedSlice4DFixture, "StridedSlice4D")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -147,7 +146,7 @@
                                                         ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(StridedSlice4DReverse, StridedSlice4DReverseFixture)
+TEST_CASE_FIXTURE(StridedSlice4DReverseFixture, "StridedSlice4DReverse")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -170,7 +169,7 @@
                                                  ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(StridedSliceSimpleStride, StridedSliceSimpleStrideFixture)
+TEST_CASE_FIXTURE(StridedSliceSimpleStrideFixture, "StridedSliceSimpleStride")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -197,7 +196,7 @@
                                                  ) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(StridedSliceSimpleRangeMask, StridedSliceSimpleRangeMaskFixture)
+TEST_CASE_FIXTURE(StridedSliceSimpleRangeMaskFixture, "StridedSliceSimpleRangeMask")
 {
   RunTest<4, armnn::DataType::Float32>(
       0,
@@ -214,4 +213,4 @@
                           5.0f, 5.0f, 5.0f, 6.0f, 6.0f, 6.0f }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Sub.cpp b/src/armnnTfLiteParser/test/Sub.cpp
index 2854d81..4e715ff 100644
--- a/src/armnnTfLiteParser/test/Sub.cpp
+++ b/src/armnnTfLiteParser/test/Sub.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Sub")
+{
 struct SubFixture : public ParserFlatbuffersFixture
 {
     explicit SubFixture(const std::string & inputShape1,
@@ -95,7 +94,7 @@
                                     "[ 1, 4 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(SimpleSub, SimpleSubFixture)
+TEST_CASE_FIXTURE(SimpleSubFixture, "SimpleSub")
 {
   RunTest<2, armnn::DataType::QAsymmU8>(
       0,
@@ -111,7 +110,7 @@
                                      "[  ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(DynamicSub, DynamicSubFixture)
+TEST_CASE_FIXTURE(DynamicSubFixture, "DynamicSub")
 {
     RunTest<2, armnn::DataType::QAsymmU8, armnn::DataType::QAsymmU8>(
         0,
@@ -121,4 +120,4 @@
         true);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Sum.cpp b/src/armnnTfLiteParser/test/Sum.cpp
index 177bcd5..09b20b6 100644
--- a/src/armnnTfLiteParser/test/Sum.cpp
+++ b/src/armnnTfLiteParser/test/Sum.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Sum")
+{
 struct SumFixture : public ParserFlatbuffersFixture
 {
     explicit SumFixture(const std::string& inputShape,
@@ -93,7 +92,7 @@
     SimpleSumFixture() : SumFixture("[ 1, 3, 2, 4 ]", "[ 1, 1, 1, 4 ]", "[ 2 ]", "[ 1, 0, 0, 0,  2, 0, 0, 0 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(ParseSum, SimpleSumFixture)
+TEST_CASE_FIXTURE(SimpleSumFixture, "ParseSum")
 {
     RunTest<4, armnn::DataType::Float32, armnn::DataType::Float32>
         (0, {{ "inputTensor", { 1.0f,   2.0f,   3.0f,   4.0f,
@@ -107,4 +106,4 @@
             {{ "outputTensor", { 666.0f, 888.0f, 1110.0f, 1332.0f } } });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/TfLiteParser.cpp b/src/armnnTfLiteParser/test/TfLiteParser.cpp
index 36827c0..53fe4a3 100644
--- a/src/armnnTfLiteParser/test/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/test/TfLiteParser.cpp
@@ -3,18 +3,18 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
-BOOST_AUTO_TEST_CASE(ParseEmptyBinaryData)
+TEST_SUITE("TensorflowLiteParser")
+{
+TEST_CASE("ParseEmptyBinaryData")
 {
     ITfLiteParser::TfLiteParserOptions options;
     ITfLiteParserPtr m_Parser(ITfLiteParser::Create(armnn::Optional<ITfLiteParser::TfLiteParserOptions>(options)));
     // Should throw armnn::ParseException: Buffer doesn't conform to the expected Tensorflow Lite flatbuffers format.
-    BOOST_CHECK_THROW(m_Parser->CreateNetworkFromBinary({0}), armnn::ParseException);
+    CHECK_THROWS_AS(m_Parser->CreateNetworkFromBinary({0}), armnn::ParseException);
 }
 
 struct NoInputBindingsFixture : public ParserFlatbuffersFixture
@@ -32,10 +32,10 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseBadInputBindings, NoInputBindingsFixture )
+TEST_CASE_FIXTURE(NoInputBindingsFixture, "ParseBadInputBindings")
 {
     // Should throw armnn::ParseException: No input binding found for subgraph:0 and name:inputTensor.
-    BOOST_CHECK_THROW( (RunTest<4, armnn::DataType::QAsymmU8>(0, { }, { 0 })), armnn::ParseException);
+    CHECK_THROWS_AS((RunTest<4, armnn::DataType::QAsymmU8>(0, { }, { 0 })), armnn::ParseException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Transpose.cpp b/src/armnnTfLiteParser/test/Transpose.cpp
index b2f953e..5429e56 100644
--- a/src/armnnTfLiteParser/test/Transpose.cpp
+++ b/src/armnnTfLiteParser/test/Transpose.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Transpose")
+{
 struct TransposeFixture : public ParserFlatbuffersFixture
 {
     explicit TransposeFixture(const std::string & inputShape,
@@ -118,14 +117,14 @@
                                                          "[ 2, 3, 2 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(TransposeWithPermuteData, TransposeFixtureWithPermuteData)
+TEST_CASE_FIXTURE(TransposeFixtureWithPermuteData, "TransposeWithPermuteData")
 {
     RunTest<3, armnn::DataType::Float32>(
       0,
       {{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}},
       {{"outputTensor", { 1, 4, 2, 5, 3, 6, 7, 10, 8, 11, 9, 12 }}});
 
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({2,3,2})));
 }
 
@@ -139,15 +138,15 @@
                                                             "[ 3, 2, 2 ]") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(TransposeWithoutPermuteDims, TransposeFixtureWithoutPermuteData)
+TEST_CASE_FIXTURE(TransposeFixtureWithoutPermuteData, "TransposeWithoutPermuteDims")
 {
     RunTest<3, armnn::DataType::Float32>(
         0,
         {{"inputTensor", { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12 }}},
         {{"outputTensor", { 1, 7, 4, 10, 2, 8, 5, 11, 3, 9, 6, 12 }}});
 
-    BOOST_TEST((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
+    CHECK((m_Parser->GetNetworkOutputBindingInfo(0, "outputTensor").second.GetShape()
                 == armnn::TensorShape({3,2,2})));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/armnnTfLiteParser/test/TransposeConv.cpp b/src/armnnTfLiteParser/test/TransposeConv.cpp
index f990941..0f53e73 100644
--- a/src/armnnTfLiteParser/test/TransposeConv.cpp
+++ b/src/armnnTfLiteParser/test/TransposeConv.cpp
@@ -3,12 +3,11 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_TransposeConv")
+{
 struct TransposeConvFixture : public ParserFlatbuffersFixture
 {
     explicit TransposeConvFixture(const std::string& inputShape,
@@ -116,7 +115,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseSimpleTransposeConv, SimpleTransposeConvFixture )
+TEST_CASE_FIXTURE(SimpleTransposeConvFixture, "ParseSimpleTransposeConv")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -255,7 +254,7 @@
     {}
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseSimpleTransposeConvWithBias, SimpleTransposeConvFixtureWithBias )
+TEST_CASE_FIXTURE(SimpleTransposeConvFixtureWithBias, "ParseSimpleTransposeConvWithBias")
 {
     RunTest<4, armnn::DataType::QAsymmU8>(
         0,
@@ -571,7 +570,7 @@
     }
 };
 
-BOOST_FIXTURE_TEST_CASE( ParseTransposeConvPerChannel, TransposeConvPerChannelFixture )
+TEST_CASE_FIXTURE(TransposeConvPerChannelFixture, "ParseTransposeConvPerChannel")
 {
     RunTest<4, armnn::DataType::QAsymmS8>(
         0,
@@ -601,4 +600,4 @@
         });
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Unpack.cpp b/src/armnnTfLiteParser/test/Unpack.cpp
index 4fcd74f..991352b 100644
--- a/src/armnnTfLiteParser/test/Unpack.cpp
+++ b/src/armnnTfLiteParser/test/Unpack.cpp
@@ -3,15 +3,14 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
 #include "ParserFlatbuffersFixture.hpp"
 #include "../TfLiteParser.hpp"
 
 #include <string>
 #include <iostream>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Unpack")
+{
 struct UnpackFixture : public ParserFlatbuffersFixture
 {
     explicit UnpackFixture(const std::string& inputShape,
@@ -110,7 +109,7 @@
     DefaultUnpackAxisZeroUint8Fixture() : UnpackFixture("[ 4, 1, 6 ]", 4, "[ 1, 6 ]", "0", "", "UINT8", "0.1", "0") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecified, DefaultUnpackAxisZeroFixture)
+TEST_CASE_FIXTURE(DefaultUnpackAxisZeroFixture, "UnpackAxisZeroNumIsDefaultNotSpecified")
 {
     RunTest<2, armnn::DataType::Float32>(
         0,
@@ -124,7 +123,7 @@
           {"outputTensor4", { 19.0f, 20.0f, 21.0f, 22.0f, 23.0f, 24.0f }} });
 }
 
-BOOST_FIXTURE_TEST_CASE(UnpackAxisZeroNumIsDefaultNotSpecifiedUint8, DefaultUnpackAxisZeroUint8Fixture)
+TEST_CASE_FIXTURE(DefaultUnpackAxisZeroUint8Fixture, "UnpackAxisZeroNumIsDefaultNotSpecifiedUint8")
 {
     RunTest<2, armnn::DataType::QAsymmU8>(
         0,
@@ -148,7 +147,7 @@
     DefaultUnpackLastAxisUint8Fixture() : UnpackFixture("[ 4, 1, 6 ]", 6, "[ 4, 1 ]", "2", "6", "UINT8", "0.1", "0") {}
 };
 
-BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSix, DefaultUnpackLastAxisFixture)
+TEST_CASE_FIXTURE(DefaultUnpackLastAxisFixture, "UnpackLastAxisNumSix")
 {
     RunTest<2, armnn::DataType::Float32>(
         0,
@@ -164,7 +163,7 @@
           {"outputTensor6", { 6.0f, 12.0f, 18.0f, 24.0f }} });
 }
 
-BOOST_FIXTURE_TEST_CASE(UnpackLastAxisNumSixUint8, DefaultUnpackLastAxisUint8Fixture) {
+TEST_CASE_FIXTURE(DefaultUnpackLastAxisUint8Fixture, "UnpackLastAxisNumSixUint8") {
     RunTest<2, armnn::DataType::QAsymmU8>(
         0,
         {{"inputTensor", { 1, 2, 3, 4, 5, 6,
@@ -179,4 +178,4 @@
          {"outputTensor6", { 60, 120, 180, 240 }}});
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnTfLiteParser/test/Unsupported.cpp b/src/armnnTfLiteParser/test/Unsupported.cpp
index b0ac2d6..69744f4 100644
--- a/src/armnnTfLiteParser/test/Unsupported.cpp
+++ b/src/armnnTfLiteParser/test/Unsupported.cpp
@@ -13,14 +13,12 @@
 
 #include <layers/StandInLayer.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <sstream>
 #include <string>
 #include <vector>
 
-BOOST_AUTO_TEST_SUITE(TensorflowLiteParser)
-
+TEST_SUITE("TensorflowLiteParser_Unsupported")
+{
 using namespace armnn;
 
 class StandInLayerVerifier : public LayerVisitorBase<VisitorThrowingPolicy>
@@ -41,27 +39,27 @@
                            const char*) override
     {
         unsigned int numInputs = armnn::numeric_cast<unsigned int>(m_InputInfos.size());
-        BOOST_CHECK(descriptor.m_NumInputs    == numInputs);
-        BOOST_CHECK(layer->GetNumInputSlots() == numInputs);
+        CHECK(descriptor.m_NumInputs    == numInputs);
+        CHECK(layer->GetNumInputSlots() == numInputs);
 
         unsigned int numOutputs = armnn::numeric_cast<unsigned int>(m_OutputInfos.size());
-        BOOST_CHECK(descriptor.m_NumOutputs    == numOutputs);
-        BOOST_CHECK(layer->GetNumOutputSlots() == numOutputs);
+        CHECK(descriptor.m_NumOutputs    == numOutputs);
+        CHECK(layer->GetNumOutputSlots() == numOutputs);
 
         const StandInLayer* standInLayer = PolymorphicDowncast<const StandInLayer*>(layer);
         for (unsigned int i = 0u; i < numInputs; ++i)
         {
             const OutputSlot* connectedSlot = standInLayer->GetInputSlot(i).GetConnectedOutputSlot();
-            BOOST_CHECK(connectedSlot != nullptr);
+            CHECK(connectedSlot != nullptr);
 
             const TensorInfo& inputInfo = connectedSlot->GetTensorInfo();
-            BOOST_CHECK(inputInfo == m_InputInfos[i]);
+            CHECK(inputInfo == m_InputInfos[i]);
         }
 
         for (unsigned int i = 0u; i < numOutputs; ++i)
         {
             const TensorInfo& outputInfo = layer->GetOutputSlot(i).GetTensorInfo();
-            BOOST_CHECK(outputInfo == m_OutputInfos[i]);
+            CHECK(outputInfo == m_OutputInfos[i]);
         }
     }
 
@@ -237,14 +235,14 @@
                              { TensorInfo({ 3, 3 }, DataType::Float32) }) {}
 };
 
-BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator1Input1Output, DummyCustom1Input1OutputFixture)
+TEST_CASE_FIXTURE(DummyCustom1Input1OutputFixture, "UnsupportedCustomOperator1Input1Output")
 {
     RunTest();
 }
 
-BOOST_FIXTURE_TEST_CASE(UnsupportedCustomOperator2Inputs1Output, DummyCustom2Inputs1OutputFixture)
+TEST_CASE_FIXTURE(DummyCustom2Inputs1OutputFixture, "UnsupportedCustomOperator2Inputs1Output")
 {
     RunTest();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnUtils/ParserPrototxtFixture.hpp b/src/armnnUtils/ParserPrototxtFixture.hpp
index 0ff7e59..08ac3ae 100644
--- a/src/armnnUtils/ParserPrototxtFixture.hpp
+++ b/src/armnnUtils/ParserPrototxtFixture.hpp
@@ -11,6 +11,7 @@
 #include <Network.hpp>
 #include <VerificationHelpers.hpp>
 
+#include <doctest/doctest.h>
 #include <fmt/format.h>
 
 #include <iomanip>
@@ -257,12 +258,12 @@
         if (std::is_same<T, uint8_t>::value)
         {
             auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape, true);
-            BOOST_TEST(result.m_Result, result.m_Message.str());
+            CHECK_MESSAGE(result.m_Result, result.m_Message.str());
         }
         else
         {
             auto result = CompareTensors(outputExpected, outputStorage[it.first], shape, shape);
-            BOOST_TEST(result.m_Result, result.m_Message.str());
+            CHECK_MESSAGE(result.m_Result, result.m_Message.str());
         }
     }
 }
diff --git a/src/armnnUtils/test/FloatingPointComparisonTest.cpp b/src/armnnUtils/test/FloatingPointComparisonTest.cpp
index dab0deb..7b97404 100644
--- a/src/armnnUtils/test/FloatingPointComparisonTest.cpp
+++ b/src/armnnUtils/test/FloatingPointComparisonTest.cpp
@@ -5,133 +5,133 @@
 
 #include <armnnUtils/FloatingPointComparison.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnnUtils;
 
-BOOST_AUTO_TEST_SUITE(FloatingPointComparisonSuite)
-
-BOOST_AUTO_TEST_CASE(FloatingPointComparisonDefaultTolerance)
+TEST_SUITE("FloatingPointComparisonSuite")
+{
+TEST_CASE("FloatingPointComparisonDefaultTolerance")
 {
     // 1% range of 1.2 is 1.188 -> 1.212
     // Just below tolerance.
-    BOOST_TEST(!within_percentage_tolerance(1.2f, 1.17f));
+    CHECK(!within_percentage_tolerance(1.2f, 1.17f));
     // Just above tolerance.
-    BOOST_TEST(!within_percentage_tolerance(1.2f, 1.213f));
+    CHECK(!within_percentage_tolerance(1.2f, 1.213f));
     // Just inside the lower range.
-    BOOST_TEST(within_percentage_tolerance(1.2f, 1.189f));
+    CHECK(within_percentage_tolerance(1.2f, 1.189f));
     // Just inside the upper range.
-    BOOST_TEST(within_percentage_tolerance(1.2f, 1.210f));
+    CHECK(within_percentage_tolerance(1.2f, 1.210f));
     // Exact match
-    BOOST_TEST(within_percentage_tolerance(1.2f, 1.2f));
+    CHECK(within_percentage_tolerance(1.2f, 1.2f));
 
     // Negative value tests.
-    BOOST_TEST(!within_percentage_tolerance(-1.2f, -1.17f));
-    BOOST_TEST(!within_percentage_tolerance(-1.2f, -1.213f));
-    BOOST_TEST(within_percentage_tolerance(-1.2f, -1.189f));
-    BOOST_TEST(within_percentage_tolerance(-1.2f, -1.210f));
-    BOOST_TEST(within_percentage_tolerance(-1.2f, -1.2f));
+    CHECK(!within_percentage_tolerance(-1.2f, -1.17f));
+    CHECK(!within_percentage_tolerance(-1.2f, -1.213f));
+    CHECK(within_percentage_tolerance(-1.2f, -1.189f));
+    CHECK(within_percentage_tolerance(-1.2f, -1.210f));
+    CHECK(within_percentage_tolerance(-1.2f, -1.2f));
 
     // Negative & positive tests
-    BOOST_TEST(!within_percentage_tolerance(1.2f, -1.2f));
-    BOOST_TEST(!within_percentage_tolerance(-1.2f, 1.2f));
+    CHECK(!within_percentage_tolerance(1.2f, -1.2f));
+    CHECK(!within_percentage_tolerance(-1.2f, 1.2f));
 
     // Negative and positive test with large float values.
-    BOOST_TEST(!within_percentage_tolerance(3.3E+38f, -1.17549435e38f));
-    BOOST_TEST(!within_percentage_tolerance(-1.17549435e38f, 3.3E+38f));
+    CHECK(!within_percentage_tolerance(3.3E+38f, -1.17549435e38f));
+    CHECK(!within_percentage_tolerance(-1.17549435e38f, 3.3E+38f));
 
     // 1% range of 0.04 is 0.0396 -> 0.0404
     // Just below tolerance.
-    BOOST_TEST(!within_percentage_tolerance(0.04f, 0.039f));
+    CHECK(!within_percentage_tolerance(0.04f, 0.039f));
     // Just above tolerance.
-    BOOST_TEST(!within_percentage_tolerance(0.04f, 0.04041f));
+    CHECK(!within_percentage_tolerance(0.04f, 0.04041f));
     // Just inside the lower range.
-    BOOST_TEST(within_percentage_tolerance(0.04f, 0.0397f));
+    CHECK(within_percentage_tolerance(0.04f, 0.0397f));
     // Just inside the upper range.
-    BOOST_TEST(within_percentage_tolerance(0.04f, 0.04039f));
+    CHECK(within_percentage_tolerance(0.04f, 0.04039f));
     // Exact match
-    BOOST_TEST(within_percentage_tolerance(0.04f, 0.04f));
+    CHECK(within_percentage_tolerance(0.04f, 0.04f));
 }
 
-BOOST_AUTO_TEST_CASE(FloatingPointComparisonLargePositiveNumbersDefaultTolerance)
+TEST_CASE("FloatingPointComparisonLargePositiveNumbersDefaultTolerance")
 {
     // Just below tolerance.
-    BOOST_TEST(!within_percentage_tolerance(3.3E+38f, (3.3E+38f * 0.989f)));
+    CHECK(!within_percentage_tolerance(3.3E+38f, (3.3E+38f * 0.989f)));
     // Just above tolerance.
-    BOOST_TEST(!within_percentage_tolerance(3.3E+38f, (3.3E+38f * 1.011f)));
+    CHECK(!within_percentage_tolerance(3.3E+38f, (3.3E+38f * 1.011f)));
     // Just inside the lower range.
-    BOOST_TEST(within_percentage_tolerance(3.3E+38f, (3.3E+38f * 0.992f)));
+    CHECK(within_percentage_tolerance(3.3E+38f, (3.3E+38f * 0.992f)));
     // Just inside the upper range.
-    BOOST_TEST(within_percentage_tolerance(3.3E+38f, (3.3E+38f * 1.009f)));
+    CHECK(within_percentage_tolerance(3.3E+38f, (3.3E+38f * 1.009f)));
     // Exact match
-    BOOST_TEST(within_percentage_tolerance(3.3E+38f, 3.3E+38f));
+    CHECK(within_percentage_tolerance(3.3E+38f, 3.3E+38f));
 }
 
-BOOST_AUTO_TEST_CASE(FloatingPointComparisonLargeNegativeNumbersDefaultTolerance)
+TEST_CASE("FloatingPointComparisonLargeNegativeNumbersDefaultTolerance")
 {
     // Just below tolerance.
-    BOOST_TEST(!within_percentage_tolerance(-1.17549435e38f, (-1.17549435e38f * -1.009f)));
+    CHECK(!within_percentage_tolerance(-1.17549435e38f, (-1.17549435e38f * -1.009f)));
     // Just above tolerance.
-    BOOST_TEST(!within_percentage_tolerance(-1.17549435e38f, (-1.17549435e38f * 1.011f)));
+    CHECK(!within_percentage_tolerance(-1.17549435e38f, (-1.17549435e38f * 1.011f)));
     // Just inside the lower range.
-    BOOST_TEST(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f - (-1.17549435e38f * 0.0099f)));
+    CHECK(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f - (-1.17549435e38f * 0.0099f)));
     // Just inside the upper range.
-    BOOST_TEST(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f + (-1.17549435e38f * 0.0099f)));
+    CHECK(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f + (-1.17549435e38f * 0.0099f)));
     // Exact match
-    BOOST_TEST(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f));
+    CHECK(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f));
 }
 
-BOOST_AUTO_TEST_CASE(FloatingPointComparisonSpecifiedTolerance)
+TEST_CASE("FloatingPointComparisonSpecifiedTolerance")
 {
     // 2% range of 1.2 is 1.176 -> 1.224
     // Just below tolerance.
-    BOOST_TEST(!within_percentage_tolerance(1.2f, 1.175f, 2.0f));
+    CHECK(!within_percentage_tolerance(1.2f, 1.175f, 2.0f));
     // Just above tolerance.
-    BOOST_TEST(!within_percentage_tolerance(1.2f, 1.226f, 2.0f));
+    CHECK(!within_percentage_tolerance(1.2f, 1.226f, 2.0f));
     // Just inside the lower range.
-    BOOST_TEST(within_percentage_tolerance(1.2f, 1.18f, 2.0f));
+    CHECK(within_percentage_tolerance(1.2f, 1.18f, 2.0f));
     // Just inside the upper range.
-    BOOST_TEST(within_percentage_tolerance(1.2f, 1.22f, 2.0f));
+    CHECK(within_percentage_tolerance(1.2f, 1.22f, 2.0f));
     // Exact match.
-    BOOST_TEST(within_percentage_tolerance(1.2f, 1.2f, 2.0f));
+    CHECK(within_percentage_tolerance(1.2f, 1.2f, 2.0f));
 
     // 5% range of 6.2 is 5.89 -> 6.51
     // Just below tolerance.
-    BOOST_TEST(!within_percentage_tolerance(6.2f, 5.88f, 5.0f));
+    CHECK(!within_percentage_tolerance(6.2f, 5.88f, 5.0f));
     // Just above tolerance.
-    BOOST_TEST(!within_percentage_tolerance(6.2f, 6.52f, 5.0f));
+    CHECK(!within_percentage_tolerance(6.2f, 6.52f, 5.0f));
     // Just inside the lower range.
-    BOOST_TEST(within_percentage_tolerance(6.2f, 5.9f, 5.0f));
+    CHECK(within_percentage_tolerance(6.2f, 5.9f, 5.0f));
     // Just inside the upper range.
-    BOOST_TEST(within_percentage_tolerance(6.2f, 6.5f, 5.0f));
+    CHECK(within_percentage_tolerance(6.2f, 6.5f, 5.0f));
 
     // Larger tolerance (unlikely to be used).
-    BOOST_TEST(within_percentage_tolerance(10.0f, 9.01f, 10.0f));
-    BOOST_TEST(!within_percentage_tolerance(10.0f, 8.99f, 10.0f));
+    CHECK(within_percentage_tolerance(10.0f, 9.01f, 10.0f));
+    CHECK(!within_percentage_tolerance(10.0f, 8.99f, 10.0f));
 }
 
-BOOST_AUTO_TEST_CASE(FloatingPointComparisonLargePositiveNumbersSpecifiedTolerance)
+TEST_CASE("FloatingPointComparisonLargePositiveNumbersSpecifiedTolerance")
 {
     // Just below tolerance.
-    BOOST_TEST(!within_percentage_tolerance(3.3E+38f, (3.3E+38f * 0.979f), 2.0f));
+    CHECK(!within_percentage_tolerance(3.3E+38f, (3.3E+38f * 0.979f), 2.0f));
     // Just above tolerance.
-    BOOST_TEST(!within_percentage_tolerance(3.3E+38f, (3.3E+38f * 1.021f), 2.0f));
+    CHECK(!within_percentage_tolerance(3.3E+38f, (3.3E+38f * 1.021f), 2.0f));
     // Just inside the lower range.
-    BOOST_TEST(within_percentage_tolerance(3.3E+38f, (3.3E+38f * 0.982f), 2.0f));
+    CHECK(within_percentage_tolerance(3.3E+38f, (3.3E+38f * 0.982f), 2.0f));
     // Just inside the upper range.
-    BOOST_TEST(within_percentage_tolerance(3.3E+38f, (3.3E+38f * 1.019f), 2.0f));
+    CHECK(within_percentage_tolerance(3.3E+38f, (3.3E+38f * 1.019f), 2.0f));
 }
 
-BOOST_AUTO_TEST_CASE(FloatingPointComparisonLargeNegativeNumbersSpecifiedTolerance)
+TEST_CASE("FloatingPointComparisonLargeNegativeNumbersSpecifiedTolerance")
 {
     // Just below tolerance.
-    BOOST_TEST(!within_percentage_tolerance(-1.17549435e38f, (-1.17549435e38f * -1.019f), 2.0f));
+    CHECK(!within_percentage_tolerance(-1.17549435e38f, (-1.17549435e38f * -1.019f), 2.0f));
     // Just above tolerance.
-    BOOST_TEST(!within_percentage_tolerance(-1.17549435e38f, (-1.17549435e38f * 1.021f), 2.0f));
+    CHECK(!within_percentage_tolerance(-1.17549435e38f, (-1.17549435e38f * 1.021f), 2.0f));
     // Just inside the lower range.
-    BOOST_TEST(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f - (-1.17549435e38f * 0.0089f), 2.0f));
+    CHECK(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f - (-1.17549435e38f * 0.0089f), 2.0f));
     // Just inside the upper range.
-    BOOST_TEST(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f + (-1.17549435e38f * 0.0089f), 2.0f));
+    CHECK(within_percentage_tolerance(-1.17549435e38f, -1.17549435e38f + (-1.17549435e38f * 0.0089f), 2.0f));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnUtils/test/ParserHelperTest.cpp b/src/armnnUtils/test/ParserHelperTest.cpp
index dbf0673..2ed9bd1 100644
--- a/src/armnnUtils/test/ParserHelperTest.cpp
+++ b/src/armnnUtils/test/ParserHelperTest.cpp
@@ -8,15 +8,15 @@
 #include <armnn/Tensor.hpp>
 #include <armnn/Types.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 
 using namespace armnn;
 using namespace armnnUtils;
 
-BOOST_AUTO_TEST_SUITE(ParserHelperSuite)
-
-BOOST_AUTO_TEST_CASE(CalculateReducedOutputTensoInfoTest)
+TEST_SUITE("ParserHelperSuite")
+{
+TEST_CASE("CalculateReducedOutputTensoInfoTest")
 {
     bool keepDims = false;
 
@@ -29,8 +29,8 @@
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData1, keepDims, outputTensorInfo1);
 
-    BOOST_TEST(outputTensorInfo1.GetNumDimensions() == 1);
-    BOOST_TEST(outputTensorInfo1.GetShape()[0] == 1);
+    CHECK(outputTensorInfo1.GetNumDimensions() == 1);
+    CHECK(outputTensorInfo1.GetShape()[0] == 1);
 
     // Reducing dimension 0 results in a 3x4 size tensor (one dimension)
     std::set<unsigned int> axisData2 = { 0 };
@@ -38,8 +38,8 @@
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData2, keepDims, outputTensorInfo2);
 
-    BOOST_TEST(outputTensorInfo2.GetNumDimensions() == 1);
-    BOOST_TEST(outputTensorInfo2.GetShape()[0] == 12);
+    CHECK(outputTensorInfo2.GetNumDimensions() == 1);
+    CHECK(outputTensorInfo2.GetShape()[0] == 12);
 
     // Reducing dimensions 0,1 results in a 4 size tensor (one dimension)
     std::set<unsigned int> axisData3 = { 0, 1 };
@@ -47,8 +47,8 @@
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData3, keepDims, outputTensorInfo3);
 
-    BOOST_TEST(outputTensorInfo3.GetNumDimensions() == 1);
-    BOOST_TEST(outputTensorInfo3.GetShape()[0] == 4);
+    CHECK(outputTensorInfo3.GetNumDimensions() == 1);
+    CHECK(outputTensorInfo3.GetShape()[0] == 4);
 
     // Reducing dimension 0 results in a { 1, 3, 4 } dimension tensor
     keepDims = true;
@@ -58,10 +58,10 @@
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData4, keepDims, outputTensorInfo4);
 
-    BOOST_TEST(outputTensorInfo4.GetNumDimensions() == 3);
-    BOOST_TEST(outputTensorInfo4.GetShape()[0] == 1);
-    BOOST_TEST(outputTensorInfo4.GetShape()[1] == 3);
-    BOOST_TEST(outputTensorInfo4.GetShape()[2] == 4);
+    CHECK(outputTensorInfo4.GetNumDimensions() == 3);
+    CHECK(outputTensorInfo4.GetShape()[0] == 1);
+    CHECK(outputTensorInfo4.GetShape()[1] == 3);
+    CHECK(outputTensorInfo4.GetShape()[2] == 4);
 
     // Reducing dimension 1, 2 results in a { 2, 1, 1 } dimension tensor
     keepDims = true;
@@ -71,12 +71,12 @@
 
     CalculateReducedOutputTensoInfo(inputTensorInfo, axisData5,  keepDims, outputTensorInfo5);
 
-    BOOST_TEST(outputTensorInfo5.GetNumDimensions() == 3);
-    BOOST_TEST(outputTensorInfo5.GetShape()[0] == 2);
-    BOOST_TEST(outputTensorInfo5.GetShape()[1] == 1);
-    BOOST_TEST(outputTensorInfo5.GetShape()[2] == 1);
+    CHECK(outputTensorInfo5.GetNumDimensions() == 3);
+    CHECK(outputTensorInfo5.GetShape()[0] == 2);
+    CHECK(outputTensorInfo5.GetShape()[1] == 1);
+    CHECK(outputTensorInfo5.GetShape()[2] == 1);
 
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
diff --git a/src/armnnUtils/test/PrototxtConversionsTest.cpp b/src/armnnUtils/test/PrototxtConversionsTest.cpp
index d51c801..dc6b6a5 100644
--- a/src/armnnUtils/test/PrototxtConversionsTest.cpp
+++ b/src/armnnUtils/test/PrototxtConversionsTest.cpp
@@ -6,40 +6,40 @@
 #include <PrototxtConversions.hpp>
 #include "armnn/Tensor.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(PrototxtConversions)
-
-BOOST_AUTO_TEST_CASE(ConvertInt32ToOctalStringTest)
+TEST_SUITE("PrototxtConversions")
+{
+TEST_CASE("ConvertInt32ToOctalStringTest")
 {
     using armnnUtils::ConvertInt32ToOctalString;
 
     std::string octalString = ConvertInt32ToOctalString(1);
-    BOOST_TEST(octalString.compare("\\\\001\\\\000\\\\000\\\\000"));
+    CHECK(octalString.compare("\\\\001\\\\000\\\\000\\\\000"));
 
     octalString = ConvertInt32ToOctalString(256);
-    BOOST_TEST(octalString.compare("\\\\000\\\\100\\\\000\\\\000"));
+    CHECK(octalString.compare("\\\\000\\\\100\\\\000\\\\000"));
 
     octalString = ConvertInt32ToOctalString(65536);
-    BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\100\\\\000"));
+    CHECK(octalString.compare("\\\\000\\\\000\\\\100\\\\000"));
 
     octalString = ConvertInt32ToOctalString(16777216);
-    BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\000\\\\100"));
+    CHECK(octalString.compare("\\\\000\\\\000\\\\000\\\\100"));
 
     octalString = ConvertInt32ToOctalString(-1);
-    BOOST_TEST(octalString.compare("\\\\377\\\\377\\\\377\\\\377"));
+    CHECK(octalString.compare("\\\\377\\\\377\\\\377\\\\377"));
 
     octalString = ConvertInt32ToOctalString(-256);
-    BOOST_TEST(octalString.compare("\\\\000\\\\377\\\\377\\\\377"));
+    CHECK(octalString.compare("\\\\000\\\\377\\\\377\\\\377"));
 
     octalString = ConvertInt32ToOctalString(-65536);
-    BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\377\\\\377"));
+    CHECK(octalString.compare("\\\\000\\\\000\\\\377\\\\377"));
 
     octalString = ConvertInt32ToOctalString(-16777216);
-    BOOST_TEST(octalString.compare("\\\\000\\\\000\\\\000\\\\377"));
+    CHECK(octalString.compare("\\\\000\\\\000\\\\000\\\\377"));
 }
 
-BOOST_AUTO_TEST_CASE(ConvertTensorShapeToStringTest)
+TEST_CASE("ConvertTensorShapeToStringTest")
 {
     using armnnUtils::ConvertTensorShapeToString;
     using armnn::TensorShape;
@@ -51,13 +51,13 @@
     };
 
     auto output_string = createAndConvert({5});
-    BOOST_TEST(output_string.compare(
+    CHECK(output_string.compare(
         "dim {\n"
         "size: 5\n"
         "}"));
 
     output_string = createAndConvert({4, 5});
-    BOOST_TEST(output_string.compare(
+    CHECK(output_string.compare(
         "dim {\n"
             "size: 4\n"
         "}\n"
@@ -67,7 +67,7 @@
         ));
 
     output_string = createAndConvert({3, 4, 5});
-    BOOST_TEST(output_string.compare(
+    CHECK(output_string.compare(
         "dim {\n"
             "size: 3\n"
         "}\n"
@@ -80,7 +80,7 @@
         ));
 
     output_string = createAndConvert({2, 3, 4, 5});
-    BOOST_TEST(output_string.compare(
+    CHECK(output_string.compare(
         "dim {\n"
             "size: 2\n"
         "}\n"
@@ -96,7 +96,7 @@
         ));
 
     output_string = createAndConvert({1, 2, 3, 4, 5});
-    BOOST_TEST(output_string.compare(
+    CHECK(output_string.compare(
         "dim {\n"
             "size: 1\n"
         "}\n"
@@ -115,7 +115,7 @@
         ));
 
     output_string = createAndConvert({0xffffffff, 0xffffffff});
-    BOOST_TEST(output_string.compare(
+    CHECK(output_string.compare(
         "dim {\n"
             "size: 4294967295\n"
         "}\n"
@@ -125,7 +125,7 @@
         ));
 
     output_string = createAndConvert({1, 0});
-    BOOST_TEST(output_string.compare(
+    CHECK(output_string.compare(
         "dim {\n"
             "size: 1\n"
         "}\n"
@@ -135,4 +135,4 @@
         ));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnUtils/test/QuantizeHelperTest.cpp b/src/armnnUtils/test/QuantizeHelperTest.cpp
index 410fdfa..6df6d80 100644
--- a/src/armnnUtils/test/QuantizeHelperTest.cpp
+++ b/src/armnnUtils/test/QuantizeHelperTest.cpp
@@ -6,12 +6,12 @@
 #include <QuantizeHelper.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
-BOOST_AUTO_TEST_SUITE(QuantizeHelper)
-
+TEST_SUITE("QuantizeHelper")
+{
 namespace
 {
 
@@ -24,23 +24,23 @@
 
 } // anonymous namespace
 
-BOOST_AUTO_TEST_CASE(IsFloatIterFuncTest)
+TEST_CASE("IsFloatIterFuncTest")
 {
     std::vector<float> fArray;
-    BOOST_TEST(IsFloatIterFunc(fArray.begin()) == true);
-    BOOST_TEST(IsFloatIterFunc(fArray.cbegin()) == true);
+    CHECK(IsFloatIterFunc(fArray.begin()) == true);
+    CHECK(IsFloatIterFunc(fArray.cbegin()) == true);
 
     std::vector<double> dArray;
-    BOOST_TEST(IsFloatIterFunc(dArray.begin()) == true);
+    CHECK(IsFloatIterFunc(dArray.begin()) == true);
 
     std::vector<int> iArray;
-    BOOST_TEST(IsFloatIterFunc(iArray.begin()) == false);
+    CHECK(IsFloatIterFunc(iArray.begin()) == false);
 
     float floats[5];
-    BOOST_TEST(IsFloatIterFunc(&floats[0]) == true);
+    CHECK(IsFloatIterFunc(&floats[0]) == true);
 
     int ints[5];
-    BOOST_TEST(IsFloatIterFunc(&ints[0]) == false);
+    CHECK(IsFloatIterFunc(&ints[0]) == false);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnUtils/test/TensorUtilsTest.cpp b/src/armnnUtils/test/TensorUtilsTest.cpp
index d24740b..6d5f719 100644
--- a/src/armnnUtils/test/TensorUtilsTest.cpp
+++ b/src/armnnUtils/test/TensorUtilsTest.cpp
@@ -7,131 +7,131 @@
 
 #include <armnnUtils/TensorUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 using namespace armnnUtils;
 
-BOOST_AUTO_TEST_SUITE(TensorUtilsSuite)
-
-BOOST_AUTO_TEST_CASE(ExpandDimsAxis0Test)
+TEST_SUITE("TensorUtilsSuite")
+{
+TEST_CASE("ExpandDimsAxis0Test")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Expand dimension 0
     armnn::TensorShape outputShape = ExpandDims(inputShape, 0);
-    BOOST_TEST(outputShape.GetNumDimensions() == 4);
-    BOOST_TEST(outputShape[0] == 1);
-    BOOST_TEST(outputShape[1] == 2);
-    BOOST_TEST(outputShape[2] == 3);
-    BOOST_TEST(outputShape[3] == 4);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 1);
+    CHECK(outputShape[1] == 2);
+    CHECK(outputShape[2] == 3);
+    CHECK(outputShape[3] == 4);
 }
 
-BOOST_AUTO_TEST_CASE(ExpandDimsAxis1Test)
+TEST_CASE("ExpandDimsAxis1Test")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Expand dimension 1
     armnn::TensorShape outputShape = ExpandDims(inputShape, 1);
-    BOOST_TEST(outputShape.GetNumDimensions() == 4);
-    BOOST_TEST(outputShape[0] == 2);
-    BOOST_TEST(outputShape[1] == 1);
-    BOOST_TEST(outputShape[2] == 3);
-    BOOST_TEST(outputShape[3] == 4);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 2);
+    CHECK(outputShape[1] == 1);
+    CHECK(outputShape[2] == 3);
+    CHECK(outputShape[3] == 4);
 }
 
-BOOST_AUTO_TEST_CASE(ExpandDimsAxis2Test)
+TEST_CASE("ExpandDimsAxis2Test")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Expand dimension 2
     armnn::TensorShape outputShape = ExpandDims(inputShape, 2);
-    BOOST_TEST(outputShape.GetNumDimensions() == 4);
-    BOOST_TEST(outputShape[0] == 2);
-    BOOST_TEST(outputShape[1] == 3);
-    BOOST_TEST(outputShape[2] == 1);
-    BOOST_TEST(outputShape[3] == 4);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 2);
+    CHECK(outputShape[1] == 3);
+    CHECK(outputShape[2] == 1);
+    CHECK(outputShape[3] == 4);
 }
 
-BOOST_AUTO_TEST_CASE(ExpandDimsAxis3Test)
+TEST_CASE("ExpandDimsAxis3Test")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Expand dimension 3
     armnn::TensorShape outputShape = ExpandDims(inputShape, 3);
-    BOOST_TEST(outputShape.GetNumDimensions() == 4);
-    BOOST_TEST(outputShape[0] == 2);
-    BOOST_TEST(outputShape[1] == 3);
-    BOOST_TEST(outputShape[2] == 4);
-    BOOST_TEST(outputShape[3] == 1);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 2);
+    CHECK(outputShape[1] == 3);
+    CHECK(outputShape[2] == 4);
+    CHECK(outputShape[3] == 1);
 }
 
-BOOST_AUTO_TEST_CASE(ExpandDimsNegativeAxis1Test)
+TEST_CASE("ExpandDimsNegativeAxis1Test")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Expand dimension -1
     armnn::TensorShape outputShape = ExpandDims(inputShape, -1);
-    BOOST_TEST(outputShape.GetNumDimensions() == 4);
-    BOOST_TEST(outputShape[0] == 2);
-    BOOST_TEST(outputShape[1] == 3);
-    BOOST_TEST(outputShape[2] == 4);
-    BOOST_TEST(outputShape[3] == 1);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 2);
+    CHECK(outputShape[1] == 3);
+    CHECK(outputShape[2] == 4);
+    CHECK(outputShape[3] == 1);
 }
 
-BOOST_AUTO_TEST_CASE(ExpandDimsNegativeAxis2Test)
+TEST_CASE("ExpandDimsNegativeAxis2Test")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Expand dimension -2
     armnn::TensorShape outputShape = ExpandDims(inputShape, -2);
-    BOOST_TEST(outputShape.GetNumDimensions() == 4);
-    BOOST_TEST(outputShape[0] == 2);
-    BOOST_TEST(outputShape[1] == 3);
-    BOOST_TEST(outputShape[2] == 1);
-    BOOST_TEST(outputShape[3] == 4);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 2);
+    CHECK(outputShape[1] == 3);
+    CHECK(outputShape[2] == 1);
+    CHECK(outputShape[3] == 4);
 }
 
-BOOST_AUTO_TEST_CASE(ExpandDimsNegativeAxis3Test)
+TEST_CASE("ExpandDimsNegativeAxis3Test")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Expand dimension -3
     armnn::TensorShape outputShape = ExpandDims(inputShape, -3);
-    BOOST_TEST(outputShape.GetNumDimensions() == 4);
-    BOOST_TEST(outputShape[0] == 2);
-    BOOST_TEST(outputShape[1] == 1);
-    BOOST_TEST(outputShape[2] == 3);
-    BOOST_TEST(outputShape[3] == 4);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 2);
+    CHECK(outputShape[1] == 1);
+    CHECK(outputShape[2] == 3);
+    CHECK(outputShape[3] == 4);
 }
 
-BOOST_AUTO_TEST_CASE(ExpandDimsNegativeAxis4Test)
+TEST_CASE("ExpandDimsNegativeAxis4Test")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Expand dimension -4
     armnn::TensorShape outputShape = ExpandDims(inputShape, -4);
-    BOOST_TEST(outputShape.GetNumDimensions() == 4);
-    BOOST_TEST(outputShape[0] == 1);
-    BOOST_TEST(outputShape[1] == 2);
-    BOOST_TEST(outputShape[2] == 3);
-    BOOST_TEST(outputShape[3] == 4);
+    CHECK(outputShape.GetNumDimensions() == 4);
+    CHECK(outputShape[0] == 1);
+    CHECK(outputShape[1] == 2);
+    CHECK(outputShape[2] == 3);
+    CHECK(outputShape[3] == 4);
 }
 
-BOOST_AUTO_TEST_CASE(ExpandDimsInvalidAxisTest)
+TEST_CASE("ExpandDimsInvalidAxisTest")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Invalid expand dimension 4
-    BOOST_CHECK_THROW(ExpandDims(inputShape, 4), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(ExpandDims(inputShape, 4), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(ExpandDimsInvalidNegativeAxisTest)
+TEST_CASE("ExpandDimsInvalidNegativeAxisTest")
 {
     armnn::TensorShape inputShape({ 2, 3, 4 });
 
     // Invalid expand dimension -5
-    BOOST_CHECK_THROW(ExpandDims(inputShape, -5), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(ExpandDims(inputShape, -5), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/armnnUtils/test/TransformIteratorTest.cpp b/src/armnnUtils/test/TransformIteratorTest.cpp
index c44e454..2151153 100644
--- a/src/armnnUtils/test/TransformIteratorTest.cpp
+++ b/src/armnnUtils/test/TransformIteratorTest.cpp
@@ -5,13 +5,14 @@
 
 #include <armnn/utility/TransformIterator.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
+#include <vector>
 #include <iostream>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(TransformIteratorSuite)
-
+TEST_SUITE("TransformIteratorSuite")
+{
 namespace
 {
 
@@ -25,7 +26,7 @@
     return val + "a";
 }
 
-BOOST_AUTO_TEST_CASE(TransformIteratorTest)
+TEST_CASE("TransformIteratorTest")
 {
     struct WrapperTestClass
     {
@@ -63,14 +64,14 @@
 
     for(auto val : wrapperStringClass)
     {
-        BOOST_CHECK(val != "e");
+        CHECK(val != "e");
         i++;
     }
 
     i = 1;
     for(auto val : wrapperTestClass)
     {
-        BOOST_CHECK(val == square(i));
+        CHECK(val == square(i));
         i++;
     }
 
@@ -78,7 +79,7 @@
     // Check original vector is unchanged
     for(auto val : wrapperTestClass.m_Vec)
     {
-        BOOST_CHECK(val == i);
+        CHECK(val == i);
         i++;
     }
 
@@ -92,11 +93,11 @@
     i = 1;
     for(auto val : transformedVec)
     {
-        BOOST_CHECK(val == square(i));
+        CHECK(val == square(i));
         i++;
     }
 }
 
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp b/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
index 4ab7488..fa933a0 100644
--- a/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
+++ b/src/backends/aclCommon/test/ArmComputeTensorUtilsTests.cpp
@@ -5,13 +5,13 @@
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn::armcomputetensorutils;
 
-BOOST_AUTO_TEST_SUITE(ArmComputeTensorUtils)
-
-BOOST_AUTO_TEST_CASE(BuildArmComputeTensorInfoTest)
+TEST_SUITE("ArmComputeTensorUtils")
+{
+TEST_CASE("BuildArmComputeTensorInfoTest")
 {
 
     const armnn::TensorShape tensorShape = { 1, 2, 3, 4 };
@@ -26,21 +26,21 @@
     const arm_compute::TensorInfo aclTensorInfo0 = BuildArmComputeTensorInfo(tensorInfo0);
 
     const arm_compute::TensorShape& aclTensorShape = aclTensorInfo0.tensor_shape();
-    BOOST_CHECK(aclTensorShape.num_dimensions() == tensorShape.GetNumDimensions());
+    CHECK(aclTensorShape.num_dimensions() == tensorShape.GetNumDimensions());
     for(unsigned int i = 0u; i < tensorShape.GetNumDimensions(); ++i)
     {
         // NOTE: arm_compute tensor dimensions are stored in the opposite order
-        BOOST_CHECK(aclTensorShape[i] == tensorShape[tensorShape.GetNumDimensions() - i - 1]);
+        CHECK(aclTensorShape[i] == tensorShape[tensorShape.GetNumDimensions() - i - 1]);
     }
 
-    BOOST_CHECK(aclTensorInfo0.data_type() == arm_compute::DataType::QASYMM8);
-    BOOST_CHECK(aclTensorInfo0.quantization_info().scale()[0] == quantScale);
+    CHECK(aclTensorInfo0.data_type() == arm_compute::DataType::QASYMM8);
+    CHECK(aclTensorInfo0.quantization_info().scale()[0] == quantScale);
 
     // Tensor info with per-axis quantization
     const armnn::TensorInfo tensorInfo1(tensorShape, dataType, quantScales, 0);
     const arm_compute::TensorInfo aclTensorInfo1 = BuildArmComputeTensorInfo(tensorInfo1);
 
-    BOOST_CHECK(aclTensorInfo1.quantization_info().scale() == quantScales);
+    CHECK(aclTensorInfo1.quantization_info().scale() == quantScales);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
index 0a30907..bdae998 100644
--- a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
+++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
@@ -19,6 +19,8 @@
 #include <neon/NeonTensorHandle.hpp>
 #endif
 
+#include <doctest/doctest.h>
+
 using namespace armnn;
 
 namespace
@@ -92,23 +94,23 @@
     auto workload2 = MakeAndCheckWorkload<CopyMemGenericWorkload>(*layer2, refFactory);
 
     MemCopyQueueDescriptor queueDescriptor1 = workload1->GetData();
-    BOOST_TEST(queueDescriptor1.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor1.m_Outputs.size() == 1);
+    CHECK(queueDescriptor1.m_Inputs.size() == 1);
+    CHECK(queueDescriptor1.m_Outputs.size() == 1);
     auto inputHandle1  = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
     auto outputHandle1 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
-    BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
+    CHECK((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
     auto result = CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3});
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
 
     MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData();
-    BOOST_TEST(queueDescriptor2.m_Inputs.size() == 1);
-    BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1);
+    CHECK(queueDescriptor2.m_Inputs.size() == 1);
+    CHECK(queueDescriptor2.m_Outputs.size() == 1);
     auto inputHandle2  = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
     auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor2.m_Outputs[0]);
     result = CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3});
-    BOOST_TEST(result.m_Result, result.m_Message.str());
-    BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
+    CHECK((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
 }
 
 } //namespace
diff --git a/src/backends/aclCommon/test/MemCopyTests.cpp b/src/backends/aclCommon/test/MemCopyTests.cpp
index 7612cbf..1325503 100644
--- a/src/backends/aclCommon/test/MemCopyTests.cpp
+++ b/src/backends/aclCommon/test/MemCopyTests.cpp
@@ -15,71 +15,67 @@
 #include <neon/test/NeonWorkloadFactoryHelper.hpp>
 #endif
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(MemCopyCommon)
-
-BOOST_AUTO_TEST_CASE(AclTypeConversions)
+TEST_SUITE("MemCopyCommon")
+{
+TEST_CASE("AclTypeConversions")
 {
     arm_compute::Strides strides(1, 2, 3, 4);
     armnn::TensorShape convertedStrides = armnn::armcomputetensorutils::GetStrides(strides);
 
-    BOOST_TEST(convertedStrides[0] == 4);
-    BOOST_TEST(convertedStrides[1] == 3);
-    BOOST_TEST(convertedStrides[2] == 2);
-    BOOST_TEST(convertedStrides[3] == 1);
+    CHECK(convertedStrides[0] == 4);
+    CHECK(convertedStrides[1] == 3);
+    CHECK(convertedStrides[2] == 2);
+    CHECK(convertedStrides[3] == 1);
 
     arm_compute::TensorShape shape(5, 6, 7, 8);
     armnn::TensorShape convertedshape = armnn::armcomputetensorutils::GetShape(shape);
 
-    BOOST_TEST(convertedshape[0] == 8);
-    BOOST_TEST(convertedshape[1] == 7);
-    BOOST_TEST(convertedshape[2] == 6);
-    BOOST_TEST(convertedshape[3] == 5);
+    CHECK(convertedshape[0] == 8);
+    CHECK(convertedshape[1] == 7);
+    CHECK(convertedshape[2] == 6);
+    CHECK(convertedshape[3] == 5);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
 #if defined(ARMCOMPUTECL_ENABLED) && defined(ARMCOMPUTENEON_ENABLED)
 
-BOOST_FIXTURE_TEST_SUITE(MemCopyClNeon, ClContextControlFixture)
-
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpu)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenNeonAndGpu")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenGpuAndNeon")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenNeonAndGpuWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CopyBetweenGpuAndNeonWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData, result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
-
 #endif
diff --git a/src/backends/backendsCommon/test/BackendIdTests.cpp b/src/backends/backendsCommon/test/BackendIdTests.cpp
index e11c13e..de6bef4 100644
--- a/src/backends/backendsCommon/test/BackendIdTests.cpp
+++ b/src/backends/backendsCommon/test/BackendIdTests.cpp
@@ -6,23 +6,23 @@
 #include <armnn/BackendId.hpp>
 #include <armnn/Types.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(BackendIdTests)
-
-BOOST_AUTO_TEST_CASE(CreateBackendIdFromCompute)
+TEST_SUITE("BackendIdTests")
+{
+TEST_CASE("CreateBackendIdFromCompute")
 {
     BackendId fromCompute{Compute::GpuAcc};
-    BOOST_TEST(fromCompute.Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
+    CHECK(fromCompute.Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
 }
 
-BOOST_AUTO_TEST_CASE(CreateBackendIdVectorFromCompute)
+TEST_CASE("CreateBackendIdVectorFromCompute")
 {
     std::vector<BackendId> fromComputes = {Compute::GpuAcc, Compute::CpuRef};
-    BOOST_TEST(fromComputes[0].Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
-    BOOST_TEST(fromComputes[1].Get() == GetComputeDeviceAsCString(Compute::CpuRef));
+    CHECK(fromComputes[0].Get() == GetComputeDeviceAsCString(Compute::GpuAcc));
+    CHECK(fromComputes[1].Get() == GetComputeDeviceAsCString(Compute::CpuRef));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/BackendProfilingTests.cpp b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
index 91399b4..62c06fe 100644
--- a/src/backends/backendsCommon/test/BackendProfilingTests.cpp
+++ b/src/backends/backendsCommon/test/BackendProfilingTests.cpp
@@ -21,7 +21,7 @@
 #include <armnn/Logging.hpp>
 #include <armnn/profiling/ISendTimelinePacket.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include <vector>
 
 #include <cstdint>
@@ -111,9 +111,9 @@
     return {packetId, dataLength, uniqueData};
 }
 
-BOOST_AUTO_TEST_SUITE(BackendProfilingTestSuite)
-
-BOOST_AUTO_TEST_CASE(BackendProfilingCounterRegisterMockBackendTest)
+TEST_SUITE("BackendProfilingTestSuite")
+{
+TEST_CASE("BackendProfilingCounterRegisterMockBackendTest")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions options;
@@ -133,17 +133,17 @@
     // Check if the MockBackends 3 dummy counters {0, 1, 2-5 (four cores)} are registered
     armnn::BackendId mockId = armnn::MockBackendId();
     const armnn::profiling::ICounterMappings& counterMap = GetProfilingService(&runtime).GetCounterMappings();
-    BOOST_CHECK(counterMap.GetGlobalId(0, mockId) == 5 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(1, mockId) == 6 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(2, mockId) == 7 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(3, mockId) == 8 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(4, mockId) == 9 + shiftedId);
-    BOOST_CHECK(counterMap.GetGlobalId(5, mockId) == 10 + shiftedId);
+    CHECK(counterMap.GetGlobalId(0, mockId) == 5 + shiftedId);
+    CHECK(counterMap.GetGlobalId(1, mockId) == 6 + shiftedId);
+    CHECK(counterMap.GetGlobalId(2, mockId) == 7 + shiftedId);
+    CHECK(counterMap.GetGlobalId(3, mockId) == 8 + shiftedId);
+    CHECK(counterMap.GetGlobalId(4, mockId) == 9 + shiftedId);
+    CHECK(counterMap.GetGlobalId(5, mockId) == 10 + shiftedId);
     options.m_ProfilingOptions.m_EnableProfiling = false;
     GetProfilingService(&runtime).ResetExternalProfilingOptions(options.m_ProfilingOptions, true);
 }
 
-BOOST_AUTO_TEST_CASE(TestBackendCounters)
+TEST_CASE("TestBackendCounters")
 {
     Holder holder;
     arm::pipe::PacketVersionResolver packetVersionResolver;
@@ -220,18 +220,18 @@
     periodicCounterCapture.Stop();
 
     std::set<armnn::BackendId> activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 1);
-    BOOST_CHECK(activeIds.find(gpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 1);
+    CHECK((activeIds.find(gpuAccId) != activeIds.end()));
 
     std::vector<Timestamp> recievedTimestamp = sendCounterPacket.GetTimestamps();
 
-    BOOST_CHECK(recievedTimestamp[0].timestamp == period);
-    BOOST_CHECK(recievedTimestamp.size() == 1);
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == gpuCounters.size());
+    CHECK(recievedTimestamp[0].timestamp == period);
+    CHECK(recievedTimestamp.size() == 1);
+    CHECK(recievedTimestamp[0].counterValues.size() == gpuCounters.size());
     for (unsigned long i=0; i< gpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterId == gpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[0].counterValues[i].counterId == gpuCounters[i]);
+        CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
     }
     sendCounterPacket.ClearTimestamps();
 
@@ -240,18 +240,18 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 1);
-    BOOST_CHECK(activeIds.find(cpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 1);
+    CHECK((activeIds.find(cpuAccId) != activeIds.end()));
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
 
-    BOOST_CHECK(recievedTimestamp[0].timestamp == period);
-    BOOST_CHECK(recievedTimestamp.size() == 1);
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == cpuCounters.size());
+    CHECK(recievedTimestamp[0].timestamp == period);
+    CHECK(recievedTimestamp.size() == 1);
+    CHECK(recievedTimestamp[0].counterValues.size() == cpuCounters.size());
     for (unsigned long i=0; i< cpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterId == cpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[0].counterValues[i].counterId == cpuCounters[i]);
+        CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
     }
     sendCounterPacket.ClearTimestamps();
 
@@ -262,28 +262,28 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 2);
-    BOOST_CHECK(activeIds.find(cpuAccId) != activeIds.end());
-    BOOST_CHECK(activeIds.find(gpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 2);
+    CHECK((activeIds.find(cpuAccId) != activeIds.end()));
+    CHECK((activeIds.find(gpuAccId) != activeIds.end()));
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
+//
+    CHECK(recievedTimestamp[0].timestamp == period);
+    CHECK(recievedTimestamp[1].timestamp == period);
 
-    BOOST_CHECK(recievedTimestamp[0].timestamp == period);
-    BOOST_CHECK(recievedTimestamp[1].timestamp == period);
+    CHECK(recievedTimestamp.size() == 2);
+    CHECK(recievedTimestamp[0].counterValues.size() == 2);
+    CHECK(recievedTimestamp[1].counterValues.size() == gpuCounters.size());
 
-    BOOST_CHECK(recievedTimestamp.size() == 2);
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == 2);
-    BOOST_CHECK(recievedTimestamp[1].counterValues.size() == gpuCounters.size());
-
-    BOOST_CHECK(recievedTimestamp[0].counterValues[0].counterId == cpuCounters[0]);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[0].counterValue == 1u);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[1].counterId == cpuCounters[1]);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[1].counterValue == 2u);
+    CHECK(recievedTimestamp[0].counterValues[0].counterId == cpuCounters[0]);
+    CHECK(recievedTimestamp[0].counterValues[0].counterValue == 1u);
+    CHECK(recievedTimestamp[0].counterValues[1].counterId == cpuCounters[1]);
+    CHECK(recievedTimestamp[0].counterValues[1].counterValue == 2u);
 
     for (unsigned long i=0; i< gpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[1].counterValues[i].counterId == gpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[1].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[1].counterValues[i].counterId == gpuCounters[i]);
+        CHECK(recievedTimestamp[1].counterValues[i].counterValue == i + 1u);
     }
 
     sendCounterPacket.ClearTimestamps();
@@ -297,24 +297,24 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 2);
-    BOOST_CHECK(activeIds.find(cpuAccId) != activeIds.end());
-    BOOST_CHECK(activeIds.find(gpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 2);
+    CHECK((activeIds.find(cpuAccId) != activeIds.end()));
+    CHECK((activeIds.find(gpuAccId) != activeIds.end()));
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
 
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == cpuCounters.size());
+    CHECK(recievedTimestamp[0].counterValues.size() == cpuCounters.size());
     for (unsigned long i=0; i< cpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterId == cpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[0].counterValues[i].counterId == cpuCounters[i]);
+        CHECK(recievedTimestamp[0].counterValues[i].counterValue == i + 1u);
     }
 
-    BOOST_CHECK(recievedTimestamp[1].counterValues.size() == gpuCounters.size());
+    CHECK(recievedTimestamp[1].counterValues.size() == gpuCounters.size());
     for (unsigned long i=0; i< gpuCounters.size(); ++i)
     {
-        BOOST_CHECK(recievedTimestamp[1].counterValues[i].counterId == gpuCounters[i]);
-        BOOST_CHECK(recievedTimestamp[1].counterValues[i].counterValue == i + 1u);
+        CHECK(recievedTimestamp[1].counterValues[i].counterId == gpuCounters[i]);
+        CHECK(recievedTimestamp[1].counterValues[i].counterValue == i + 1u);
     }
     sendCounterPacket.ClearTimestamps();
 
@@ -326,27 +326,27 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 2);
-    BOOST_CHECK(activeIds.find(cpuAccId) != activeIds.end());
-    BOOST_CHECK(activeIds.find(gpuAccId) != activeIds.end());
+    CHECK(activeIds.size() == 2);
+    CHECK((activeIds.find(cpuAccId) != activeIds.end()));
+    CHECK((activeIds.find(gpuAccId) != activeIds.end()));
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
 
-    BOOST_CHECK(recievedTimestamp.size() == 2);
+    CHECK(recievedTimestamp.size() == 2);
 
-    BOOST_CHECK(recievedTimestamp[0].counterValues.size() == 2);
+    CHECK(recievedTimestamp[0].counterValues.size() == 2);
 
-    BOOST_CHECK(recievedTimestamp[0].counterValues[0].counterId == cpuCounters[0]);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[0].counterValue == 1u);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[1].counterId == cpuCounters[2]);
-    BOOST_CHECK(recievedTimestamp[0].counterValues[1].counterValue == 3u);
+    CHECK(recievedTimestamp[0].counterValues[0].counterId == cpuCounters[0]);
+    CHECK(recievedTimestamp[0].counterValues[0].counterValue == 1u);
+    CHECK(recievedTimestamp[0].counterValues[1].counterId == cpuCounters[2]);
+    CHECK(recievedTimestamp[0].counterValues[1].counterValue == 3u);
 
-    BOOST_CHECK(recievedTimestamp[1].counterValues.size() == 2);
+    CHECK(recievedTimestamp[1].counterValues.size() == 2);
 
-    BOOST_CHECK(recievedTimestamp[1].counterValues[0].counterId == gpuCounters[0]);
-    BOOST_CHECK(recievedTimestamp[1].counterValues[0].counterValue == 1u);
-    BOOST_CHECK(recievedTimestamp[1].counterValues[1].counterId == gpuCounters[1]);
-    BOOST_CHECK(recievedTimestamp[1].counterValues[1].counterValue == 2u);
+    CHECK(recievedTimestamp[1].counterValues[0].counterId == gpuCounters[0]);
+    CHECK(recievedTimestamp[1].counterValues[0].counterValue == 1u);
+    CHECK(recievedTimestamp[1].counterValues[1].counterId == gpuCounters[1]);
+    CHECK(recievedTimestamp[1].counterValues[1].counterValue == 2u);
 
     sendCounterPacket.ClearTimestamps();
 
@@ -355,10 +355,10 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 0);
+    CHECK(activeIds.size() == 0);
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
-    BOOST_CHECK(recievedTimestamp.size() == 0);
+    CHECK(recievedTimestamp.size() == 0);
 
     sendCounterPacket.ClearTimestamps();
 
@@ -367,13 +367,13 @@
     periodicCounterCapture.Stop();
 
     activeIds = holder.GetCaptureData().GetActiveBackends();
-    BOOST_CHECK(activeIds.size() == 0);
+    CHECK(activeIds.size() == 0);
 
     recievedTimestamp = sendCounterPacket.GetTimestamps();
-    BOOST_CHECK(recievedTimestamp.size() == 0);
+    CHECK(recievedTimestamp.size() == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TestBackendCounterLogging)
+TEST_CASE("TestBackendCounterLogging")
 {
     std::stringstream ss;
 
@@ -450,10 +450,10 @@
     periodicCounterCapture.Stop();
     SetLogFilter(armnn::LogSeverity::Fatal);
 
-    BOOST_CHECK(ss.str().find("ActivateCounters example test error") != std::string::npos);
+    CHECK(ss.str().find("ActivateCounters example test error") != std::string::npos);
 }
 
-BOOST_AUTO_TEST_CASE(BackendProfilingContextGetSendTimelinePacket)
+TEST_CASE("BackendProfilingContextGetSendTimelinePacket")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions options;
@@ -467,10 +467,10 @@
     armnn::MockBackendProfilingService mockProfilingService = armnn::MockBackendProfilingService::Instance();
     armnn::MockBackendProfilingContext *mockBackEndProfilingContext = mockProfilingService.GetContext();
     // Check that there is a valid context set.
-    BOOST_CHECK(mockBackEndProfilingContext);
+    CHECK(mockBackEndProfilingContext);
     armnn::IBackendInternal::IBackendProfilingPtr& backendProfilingIface =
         mockBackEndProfilingContext->GetBackendProfiling();
-    BOOST_CHECK(backendProfilingIface);
+    CHECK(backendProfilingIface);
 
     // Now for the meat of the test. We're just going to send a random packet and make sure there
     // are no exceptions or errors. The sending of packets is already tested in SendTimelinePacketTests.
@@ -486,7 +486,7 @@
     profilingService.ResetExternalProfilingOptions(options.m_ProfilingOptions, true);
 }
 
-BOOST_AUTO_TEST_CASE(GetProfilingGuidGenerator)
+TEST_CASE("GetProfilingGuidGenerator")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions options;
@@ -498,19 +498,19 @@
     armnn::MockBackendProfilingService mockProfilingService = armnn::MockBackendProfilingService::Instance();
     armnn::MockBackendProfilingContext *mockBackEndProfilingContext = mockProfilingService.GetContext();
     // Check that there is a valid context set.
-    BOOST_CHECK(mockBackEndProfilingContext);
+    CHECK(mockBackEndProfilingContext);
     armnn::IBackendInternal::IBackendProfilingPtr& backendProfilingIface =
         mockBackEndProfilingContext->GetBackendProfiling();
-    BOOST_CHECK(backendProfilingIface);
+    CHECK(backendProfilingIface);
 
     // Get the Guid generator and check the getting two Guid's results in the second being greater than the first.
     armnn::profiling::IProfilingGuidGenerator& guidGenerator = backendProfilingIface->GetProfilingGuidGenerator();
     const armnn::profiling::ProfilingDynamicGuid& firstGuid = guidGenerator.NextGuid();
     const armnn::profiling::ProfilingDynamicGuid& secondGuid = guidGenerator.NextGuid();
-    BOOST_CHECK(secondGuid > firstGuid);
+    CHECK(secondGuid > firstGuid);
 
     // Reset the profiling servie after the test.
     options.m_ProfilingOptions.m_EnableProfiling = false;
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/BackendRegistryTests.cpp b/src/backends/backendsCommon/test/BackendRegistryTests.cpp
index ce8acbb..d9c19d6 100644
--- a/src/backends/backendsCommon/test/BackendRegistryTests.cpp
+++ b/src/backends/backendsCommon/test/BackendRegistryTests.cpp
@@ -9,7 +9,7 @@
 #include <armnn/backends/IBackendInternal.hpp>
 #include <reference/RefBackend.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -33,20 +33,20 @@
 
 }
 
-BOOST_AUTO_TEST_SUITE(BackendRegistryTests)
-
-BOOST_AUTO_TEST_CASE(SwapRegistry)
+TEST_SUITE("BackendRegistryTests")
+{
+TEST_CASE("SwapRegistry")
 {
     using namespace armnn;
     auto nFactories = BackendRegistryInstance().Size();
     {
         SwapRegistryStorage helper;
-        BOOST_TEST(BackendRegistryInstance().Size() == 0);
+        CHECK(BackendRegistryInstance().Size() == 0);
     }
-    BOOST_TEST(BackendRegistryInstance().Size() == nFactories);
+    CHECK(BackendRegistryInstance().Size() == nFactories);
 }
 
-BOOST_AUTO_TEST_CASE(TestRegistryHelper)
+TEST_CASE("TestRegistryHelper")
 {
     using namespace armnn;
     SwapRegistryStorage helper;
@@ -64,19 +64,19 @@
     );
 
     // sanity check: the factory has not been called yet
-    BOOST_TEST(called == false);
+    CHECK(called == false);
 
     auto factoryFunction = BackendRegistryInstance().GetFactory("HelloWorld");
 
     // sanity check: the factory still not called
-    BOOST_TEST(called == false);
+    CHECK(called == false);
 
     factoryFunction();
-    BOOST_TEST(called == true);
+    CHECK(called == true);
     BackendRegistryInstance().Deregister("HelloWorld");
 }
 
-BOOST_AUTO_TEST_CASE(TestDirectCallToRegistry)
+TEST_CASE("TestDirectCallToRegistry")
 {
     using namespace armnn;
     SwapRegistryStorage helper;
@@ -92,15 +92,15 @@
     );
 
     // sanity check: the factory has not been called yet
-    BOOST_TEST(called == false);
+    CHECK(called == false);
 
     auto factoryFunction = BackendRegistryInstance().GetFactory("HelloWorld");
 
     // sanity check: the factory still not called
-    BOOST_TEST(called == false);
+    CHECK(called == false);
 
     factoryFunction();
-    BOOST_TEST(called == true);
+    CHECK(called == true);
     BackendRegistryInstance().Deregister("HelloWorld");
 }
 
@@ -111,7 +111,7 @@
 // of Neon not being detected the exception is raised and so the backend is not added to the supportedBackends
 // list
 
-BOOST_AUTO_TEST_CASE(ThrowBackendUnavailableException)
+TEST_CASE("ThrowBackendUnavailableException")
 {
     using namespace armnn;
 
@@ -142,9 +142,9 @@
     catch (const BackendUnavailableException& e)
     {
         // Caught
-        BOOST_CHECK_EQUAL(e.what(), exceptionMessage);
-        BOOST_TEST_MESSAGE("ThrowBackendUnavailableExceptionImpl: BackendUnavailableException caught.");
+        CHECK_EQ(e.what(), exceptionMessage);
+        MESSAGE("ThrowBackendUnavailableExceptionImpl: BackendUnavailableException caught.");
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
index d1be409..254b3c2 100644
--- a/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/BatchToSpaceNdEndToEndTestImpl.hpp
@@ -10,7 +10,7 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -62,7 +62,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateBatchToSpaceNdNetwork<ArmnnType>(inputShape, outputShape, blockShape, crops, dataLayout);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{ 1, 2, 3, 4 };
@@ -93,7 +93,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateBatchToSpaceNdNetwork<ArmnnType>(inputShape, outputShape, blockShape, crops, dataLayout);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
diff --git a/src/backends/backendsCommon/test/CMakeLists.txt b/src/backends/backendsCommon/test/CMakeLists.txt
index 6313bd5..d0c95c5 100644
--- a/src/backends/backendsCommon/test/CMakeLists.txt
+++ b/src/backends/backendsCommon/test/CMakeLists.txt
@@ -189,6 +189,7 @@
 target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
 target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling)
 target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include)
+target_include_directories(armnnBackendsCommonUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/third-party)
 
 # Dummy shared object for testing.
 # This is a simple library used to test the utility functions that will be used to handle the shared objects.
diff --git a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
index c705f87..40e3fd6 100644
--- a/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ComparisonEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -61,7 +61,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateComparisonNetwork<ArmnnInType>(inputShapes, outputShape, operation);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     const std::vector<TInput> input0({ 1, 1, 1, 1,  5, 5, 5, 5,
                                        3, 3, 3, 3,  4, 4, 4, 4 });
@@ -89,8 +89,6 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateComparisonNetwork<ArmnnInType>(inputShapes, outputShape, operation);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     const std::vector<TInput> input0({ 1, 2, 3, 1, 0, 6,
                                        7, 8, 9, 10, 11, 12 });
 
diff --git a/src/backends/backendsCommon/test/CompatibilityTests.cpp b/src/backends/backendsCommon/test/CompatibilityTests.cpp
index ed00088..4abab27 100644
--- a/src/backends/backendsCommon/test/CompatibilityTests.cpp
+++ b/src/backends/backendsCommon/test/CompatibilityTests.cpp
@@ -3,8 +3,6 @@
 // SPDX-License-Identifier: MIT
 //
 
-#include <boost/test/unit_test.hpp>
-
 #include <cl/ClBackend.hpp>
 #include <neon/NeonBackend.hpp>
 #include <reference/RefBackend.hpp>
@@ -12,192 +10,195 @@
 
 #include <Network.hpp>
 
+#include <doctest/doctest.h>
+
 #include <vector>
 #include <string>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(BackendsCompatibility, * boost::unit_test::disabled())
-
 #if defined(ARMCOMPUTENEON_ENABLED)
-BOOST_AUTO_TEST_CASE(Neon_Cl_DirectCompatibility_Test)
-{
-    auto neonBackend = std::make_unique<NeonBackend>();
-    auto clBackend = std::make_unique<ClBackend>();
-
-    TensorHandleFactoryRegistry registry;
-    neonBackend->RegisterTensorHandleFactories(registry);
-    clBackend->RegisterTensorHandleFactories(registry);
-
-    const BackendId& neonBackendId = neonBackend->GetId();
-    const BackendId& clBackendId = clBackend->GetId();
-
-    BackendsMap backends;
-    backends[neonBackendId] = std::move(neonBackend);
-    backends[clBackendId] = std::move(clBackend);
-
-    armnn::Graph graph;
-
-    armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
-
-    inputLayer->SetBackendId(neonBackendId);
-
-    armnn::SoftmaxDescriptor smDesc;
-    armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
-    softmaxLayer1->SetBackendId(clBackendId);
-
-    armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
-    softmaxLayer2->SetBackendId(neonBackendId);
-
-    armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
-    softmaxLayer3->SetBackendId(clBackendId);
-
-    armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
-    softmaxLayer4->SetBackendId(neonBackendId);
-
-    armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
-    outputLayer->SetBackendId(clBackendId);
-
-    inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
-    softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
-    softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
-    softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
-    softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
-
-    graph.TopologicalSort();
-
-    std::vector<std::string> errors;
-    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
-
-    BOOST_TEST(result.m_Error == false);
-    BOOST_TEST(result.m_Warning == false);
-
-    OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
-    OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
-    OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
-    OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
-    OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
-
-    // Check that the correct factory was selected
-    BOOST_TEST(inputLayerOut.GetTensorHandleFactoryId()    == "Arm/Cl/TensorHandleFactory");
-    BOOST_TEST(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-    BOOST_TEST(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-    BOOST_TEST(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-    BOOST_TEST(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
-
-    // Check that the correct strategy was selected
-    BOOST_TEST((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-    BOOST_TEST((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
-
-    graph.AddCompatibilityLayers(backends, registry);
-
-    // Test for copy layers
-    int copyCount= 0;
-    graph.ForEachLayer([&copyCount](Layer* layer)
-    {
-        if (layer->GetType() == LayerType::MemCopy)
-        {
-            copyCount++;
-        }
-    });
-    BOOST_TEST(copyCount == 0);
-
-    // Test for import layers
-    int importCount= 0;
-    graph.ForEachLayer([&importCount](Layer *layer)
-    {
-        if (layer->GetType() == LayerType::MemImport)
-        {
-            importCount++;
-        }
-    });
-    BOOST_TEST(importCount == 0);
-}
+// Disabled Test Suite
+//TEST_SUITE("BackendsCompatibility")
+//TEST_CASE("Neon_Cl_DirectCompatibility_Test")
+//{
+//    auto neonBackend = std::make_unique<NeonBackend>();
+//    auto clBackend = std::make_unique<ClBackend>();
+//
+//    TensorHandleFactoryRegistry registry;
+//    neonBackend->RegisterTensorHandleFactories(registry);
+//    clBackend->RegisterTensorHandleFactories(registry);
+//
+//    const BackendId& neonBackendId = neonBackend->GetId();
+//    const BackendId& clBackendId = clBackend->GetId();
+//
+//    BackendsMap backends;
+//    backends[neonBackendId] = std::move(neonBackend);
+//    backends[clBackendId] = std::move(clBackend);
+//
+//    armnn::Graph graph;
+//
+//    armnn::InputLayer* const inputLayer = graph.AddLayer<armnn::InputLayer>(0, "input");
+//
+//    inputLayer->SetBackendId(neonBackendId);
+//
+//    armnn::SoftmaxDescriptor smDesc;
+//    armnn::SoftmaxLayer* const softmaxLayer1 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax1");
+//    softmaxLayer1->SetBackendId(clBackendId);
+//
+//    armnn::SoftmaxLayer* const softmaxLayer2 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax2");
+//    softmaxLayer2->SetBackendId(neonBackendId);
+//
+//    armnn::SoftmaxLayer* const softmaxLayer3 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax3");
+//    softmaxLayer3->SetBackendId(clBackendId);
+//
+//    armnn::SoftmaxLayer* const softmaxLayer4 = graph.AddLayer<armnn::SoftmaxLayer>(smDesc, "softmax4");
+//    softmaxLayer4->SetBackendId(neonBackendId);
+//
+//    armnn::OutputLayer* const outputLayer = graph.AddLayer<armnn::OutputLayer>(0, "output");
+//    outputLayer->SetBackendId(clBackendId);
+//
+//    inputLayer->GetOutputSlot(0).Connect(softmaxLayer1->GetInputSlot(0));
+//    softmaxLayer1->GetOutputSlot(0).Connect(softmaxLayer2->GetInputSlot(0));
+//    softmaxLayer2->GetOutputSlot(0).Connect(softmaxLayer3->GetInputSlot(0));
+//    softmaxLayer3->GetOutputSlot(0).Connect(softmaxLayer4->GetInputSlot(0));
+//    softmaxLayer4->GetOutputSlot(0).Connect(outputLayer->GetInputSlot(0));
+//
+//    graph.TopologicalSort();
+//
+//    std::vector<std::string> errors;
+//    auto result = SelectTensorHandleStrategy(graph, backends, registry, true, errors);
+//
+//    CHECK(result.m_Error == false);
+//    CHECK(result.m_Warning == false);
+//
+//    OutputSlot& inputLayerOut = inputLayer->GetOutputSlot(0);
+//    OutputSlot& softmaxLayer1Out = softmaxLayer1->GetOutputSlot(0);
+//    OutputSlot& softmaxLayer2Out = softmaxLayer2->GetOutputSlot(0);
+//    OutputSlot& softmaxLayer3Out = softmaxLayer3->GetOutputSlot(0);
+//    OutputSlot& softmaxLayer4Out = softmaxLayer4->GetOutputSlot(0);
+//
+//    // Check that the correct factory was selected
+//    CHECK(inputLayerOut.GetTensorHandleFactoryId()    == "Arm/Cl/TensorHandleFactory");
+//    CHECK(softmaxLayer1Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+//    CHECK(softmaxLayer2Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+//    CHECK(softmaxLayer3Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+//    CHECK(softmaxLayer4Out.GetTensorHandleFactoryId() == "Arm/Cl/TensorHandleFactory");
+//
+//    // Check that the correct strategy was selected
+//    CHECK((inputLayerOut.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//    CHECK((softmaxLayer1Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//    CHECK((softmaxLayer2Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//    CHECK((softmaxLayer3Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//    CHECK((softmaxLayer4Out.GetEdgeStrategyForConnection(0) == EdgeStrategy::DirectCompatibility));
+//
+//    graph.AddCompatibilityLayers(backends, registry);
+//
+//    // Test for copy layers
+//    int copyCount= 0;
+//    graph.ForEachLayer([&copyCount](Layer* layer)
+//    {
+//        if (layer->GetType() == LayerType::MemCopy)
+//        {
+//            copyCount++;
+//        }
+//    });
+//    CHECK(copyCount == 0);
+//
+//    // Test for import layers
+//    int importCount= 0;
+//    graph.ForEachLayer([&importCount](Layer *layer)
+//    {
+//        if (layer->GetType() == LayerType::MemImport)
+//        {
+//            importCount++;
+//        }
+//    });
+//    CHECK(importCount == 0);
+//}
+//
+//}
 #endif
-BOOST_AUTO_TEST_SUITE_END()
 
-BOOST_AUTO_TEST_SUITE(BackendCapability)
-
+TEST_SUITE("BackendCapability")
+{
 #if defined(ARMNNREF_ENABLED)
 
-BOOST_AUTO_TEST_CASE(Ref_Backends_Capability_Test)
+TEST_CASE("Ref_Backends_Capability_Test")
 {
     auto refBackend  = std::make_unique<RefBackend>();
     auto refCapabilities = refBackend->GetCapabilities();
 
-    BOOST_CHECK(armnn::HasCapability("NonConstWeights", refCapabilities));
-    BOOST_CHECK(armnn::HasCapability("AsyncExecution", refCapabilities));
+    CHECK(armnn::HasCapability("NonConstWeights", refCapabilities));
+    CHECK(armnn::HasCapability("AsyncExecution", refCapabilities));
 
     armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", true};
     armnn::BackendOptions::BackendOption AsyncExecution{"AsyncExecution", true};
 
-    BOOST_CHECK(armnn::HasCapability(nonConstWeights, refCapabilities));
-    BOOST_CHECK(armnn::HasCapability(AsyncExecution, refCapabilities));
+    CHECK(armnn::HasCapability(nonConstWeights, refCapabilities));
+    CHECK(armnn::HasCapability(AsyncExecution, refCapabilities));
 }
 
-BOOST_AUTO_TEST_CASE(Ref_Backends_Unkown_Capability_Test)
+TEST_CASE("Ref_Backends_Unkown_Capability_Test")
 {
     auto refBackend  = std::make_unique<RefBackend>();
     auto refCapabilities = refBackend->GetCapabilities();
 
     armnn::BackendOptions::BackendOption AsyncExecutionFalse{"AsyncExecution", false};
-    BOOST_CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
+    CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
 
     armnn::BackendOptions::BackendOption AsyncExecutionInt{"AsyncExecution", 50};
-    BOOST_CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
+    CHECK(!armnn::HasCapability(AsyncExecutionFalse, refCapabilities));
 
     armnn::BackendOptions::BackendOption AsyncExecutionFloat{"AsyncExecution", 0.0f};
-    BOOST_CHECK(!armnn::HasCapability(AsyncExecutionFloat, refCapabilities));
+    CHECK(!armnn::HasCapability(AsyncExecutionFloat, refCapabilities));
 
     armnn::BackendOptions::BackendOption AsyncExecutionString{"AsyncExecution", "true"};
-    BOOST_CHECK(!armnn::HasCapability(AsyncExecutionString, refCapabilities));
+    CHECK(!armnn::HasCapability(AsyncExecutionString, refCapabilities));
 
-    BOOST_CHECK(!armnn::HasCapability("Telekinesis", refCapabilities));
+    CHECK(!armnn::HasCapability("Telekinesis", refCapabilities));
     armnn::BackendOptions::BackendOption unkownCapability{"Telekinesis", true};
-    BOOST_CHECK(!armnn::HasCapability(unkownCapability, refCapabilities));
+    CHECK(!armnn::HasCapability(unkownCapability, refCapabilities));
 }
 
 #endif
 
 #if defined(ARMCOMPUTENEON_ENABLED)
 
-BOOST_AUTO_TEST_CASE(Neon_Backends_Capability_Test)
+TEST_CASE("Neon_Backends_Capability_Test")
 {
     auto neonBackend = std::make_unique<NeonBackend>();
     auto neonCapabilities = neonBackend->GetCapabilities();
 
-    BOOST_CHECK(armnn::HasCapability("NonConstWeights", neonCapabilities));
-    BOOST_CHECK(armnn::HasCapability("AsyncExecution", neonCapabilities));
+    CHECK(armnn::HasCapability("NonConstWeights", neonCapabilities));
+    CHECK(armnn::HasCapability("AsyncExecution", neonCapabilities));
 
     armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", false};
     armnn::BackendOptions::BackendOption AsyncExecution{"AsyncExecution", false};
 
-    BOOST_CHECK(armnn::HasCapability(nonConstWeights, neonCapabilities));
-    BOOST_CHECK(armnn::HasCapability(AsyncExecution, neonCapabilities));
+    CHECK(armnn::HasCapability(nonConstWeights, neonCapabilities));
+    CHECK(armnn::HasCapability(AsyncExecution, neonCapabilities));
 }
 
 #endif
 
 #if defined(ARMCOMPUTECL_ENABLED)
 
-BOOST_AUTO_TEST_CASE(Cl_Backends_Capability_Test)
+TEST_CASE("Cl_Backends_Capability_Test")
 {
     auto clBackend = std::make_unique<ClBackend>();
     auto clCapabilities = clBackend->GetCapabilities();
 
-    BOOST_CHECK(armnn::HasCapability("NonConstWeights", clCapabilities));
-    BOOST_CHECK(armnn::HasCapability("AsyncExecution", clCapabilities));
+    CHECK(armnn::HasCapability("NonConstWeights", clCapabilities));
+    CHECK(armnn::HasCapability("AsyncExecution", clCapabilities));
 
     armnn::BackendOptions::BackendOption nonConstWeights{"NonConstWeights", false};
     armnn::BackendOptions::BackendOption AsyncExecution{"AsyncExecution", false};
 
-    BOOST_CHECK(armnn::HasCapability(nonConstWeights, clCapabilities));
-    BOOST_CHECK(armnn::HasCapability(AsyncExecution, clCapabilities));
+    CHECK(armnn::HasCapability(nonConstWeights, clCapabilities));
+    CHECK(armnn::HasCapability(AsyncExecution, clCapabilities));
 }
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
index 58a1f39..5b2f33f 100644
--- a/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ConcatEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -64,7 +64,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -128,8 +128,6 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     // Creates structures for input & output.
     std::vector<T> inputData{
         1, 2,
@@ -192,8 +190,6 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     // Creates structures for input & output.
     std::vector<T> inputData{
         1, 2,
@@ -255,8 +251,6 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateConcatNetwork<ArmnnType>(inputShapes, outputShape, concatAxis);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     // Creates structures for input & output.
     std::vector<T> inputData{
         1, 2,
diff --git a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
index 2dd5298..ea99729 100644
--- a/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
+++ b/src/backends/backendsCommon/test/DefaultAsyncExecuteTest.cpp
@@ -8,19 +8,21 @@
 #include <backendsCommon/TensorHandle.hpp>
 #include <backendsCommon/Workload.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(WorkloadAsyncExecuteTests)
 
 namespace
 {
 
+TEST_SUITE("WorkloadAsyncExecuteTests")
+{
+
 struct Workload0 : BaseWorkload<ElementwiseUnaryQueueDescriptor>
 {
     Workload0(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
-            : BaseWorkload(descriptor, info)
+        : BaseWorkload(descriptor, info)
     {
     }
 
@@ -33,7 +35,9 @@
         int* inVals = static_cast<int*>(m_Data.m_Inputs[0][0].Map());
         int* outVals = static_cast<int*>(m_Data.m_Outputs[0][0].Map());
 
-        for (unsigned int i = 0; i < m_Data.m_Inputs[0][0].GetShape().GetNumElements(); ++i)
+        for (unsigned int i = 0;
+             i < m_Data.m_Inputs[0][0].GetShape().GetNumElements();
+             ++i)
         {
             outVals[i] = inVals[i] * outVals[i];
             inVals[i] = outVals[i];
@@ -45,7 +49,9 @@
         int* inVals = static_cast<int*>(desc.m_Inputs[0][0].Map());
         int* outVals = static_cast<int*>(desc.m_Outputs[0][0].Map());
 
-        for (unsigned int i = 0; i < desc.m_Inputs[0][0].GetShape().GetNumElements(); ++i)
+        for (unsigned int i = 0;
+             i < desc.m_Inputs[0][0].GetShape().GetNumElements();
+             ++i)
         {
             outVals[i] = inVals[i] + outVals[i];
             inVals[i] = outVals[i];
@@ -61,7 +67,7 @@
 struct Workload1 : BaseWorkload<ElementwiseUnaryQueueDescriptor>
 {
     Workload1(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
-            : BaseWorkload(descriptor, info)
+        : BaseWorkload(descriptor, info)
     {
     }
 
@@ -70,7 +76,9 @@
         int* inVals = static_cast<int*>(m_Data.m_Inputs[0][0].Map());
         int* outVals = static_cast<int*>(m_Data.m_Outputs[0][0].Map());
 
-        for (unsigned int i = 0; i < m_Data.m_Inputs[0][0].GetShape().GetNumElements(); ++i)
+        for (unsigned int i = 0;
+             i < m_Data.m_Inputs[0][0].GetShape().GetNumElements();
+             ++i)
         {
             outVals[i] = inVals[i] * outVals[i];
             inVals[i] = outVals[i];
@@ -83,7 +91,9 @@
     int* actualOutput = static_cast<int*>(tensorHandle->Map());
 
     bool allValuesCorrect = true;
-    for (unsigned int i = 0; i < tensorHandle->GetShape().GetNumElements(); ++i)
+    for (unsigned int i = 0;
+         i < tensorHandle->GetShape().GetNumElements();
+         ++i)
     {
         if (actualOutput[i] != expectedValue)
         {
@@ -91,7 +101,7 @@
         }
     }
 
-    BOOST_CHECK(allValuesCorrect);
+    CHECK(allValuesCorrect);
 }
 
 template<typename Workload>
@@ -108,7 +118,7 @@
     return std::make_unique<Workload>(elementwiseUnaryQueueDescriptor, workloadInfo);
 }
 
-BOOST_AUTO_TEST_CASE(TestAsyncExecute)
+TEST_CASE("TestAsyncExecute")
 {
     TensorInfo info({5}, DataType::Signed32);
 
@@ -145,7 +155,7 @@
     ValidateTensor(&workload0.get()->GetQueueDescriptor()->m_Inputs[0][0], expectedExecuteval);
 }
 
-BOOST_AUTO_TEST_CASE(TestDefaultAsyncExecute)
+TEST_CASE("TestDefaultAsyncExecute")
 {
     TensorInfo info({5}, DataType::Signed32);
 
@@ -179,7 +189,7 @@
     ValidateTensor(workingMemDescriptor.m_Inputs[0], expectedExecuteval);
 }
 
-BOOST_AUTO_TEST_CASE(TestDefaultAsyncExeuteWithThreads)
+TEST_CASE("TestDefaultAsyncExeuteWithThreads")
 {
     // Use a large vector so the threads have a chance to interact
     unsigned int vecSize = 1000;
@@ -243,6 +253,6 @@
     ValidateTensor(workingMemDescriptor2.m_Inputs[0], expectedExecuteval2);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
 }
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
index e624159..a5e2fac 100644
--- a/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DequantizeEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 #include <armnn/INetwork.hpp>
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -45,7 +47,7 @@
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateDequantizeNetwork<T>(inputInfo, outputInfo);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     std::map<int, std::vector<T>> inputTensorData = { { 0, input } };
     std::map<int, std::vector<float>> expectedOutputData = { { 0, expectedOutput } };
diff --git a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
index 5a42550..a566964 100644
--- a/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/DetectionPostProcessEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 #include <armnn/INetwork.hpp>
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace{
 
 template<typename T>
@@ -89,7 +91,7 @@
     armnn::INetworkPtr net = CreateDetectionPostProcessNetwork<T>(boxEncodingsInfo, scoresInfo,
                                                                   anchorsInfo, anchors, useRegularNms);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     std::map<int, std::vector<T>> inputTensorData = {{ 0, boxEncodings }, { 1, scores }};
     std::map<int, std::vector<float>> expectedOutputData = {{ 0, expectedDetectionBoxes },
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.cpp b/src/backends/backendsCommon/test/DynamicBackendTests.cpp
index b1c8234..669ce60 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.cpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.cpp
@@ -7,13 +7,14 @@
 
 #include <test/UnitTests.hpp>
 
-BOOST_AUTO_TEST_SUITE(DynamicBackendTests)
+#include <doctest/doctest.h>
 
+TEST_SUITE("DynamicBackendTests")
+{
 ARMNN_SIMPLE_TEST_CASE(OpenCloseHandle, OpenCloseHandleTestImpl);
 ARMNN_SIMPLE_TEST_CASE(CloseInvalidHandle, CloseInvalidHandleTestImpl);
 ARMNN_SIMPLE_TEST_CASE(OpenEmptyFileName, OpenEmptyFileNameTestImpl);
 ARMNN_SIMPLE_TEST_CASE(OpenNotExistingFile, OpenNotExistingFileTestImpl);
-ARMNN_SIMPLE_TEST_CASE(OpenNotSharedObjectFile, OpenNotSharedObjectTestImpl);
 ARMNN_SIMPLE_TEST_CASE(GetValidEntryPoint, GetValidEntryPointTestImpl);
 ARMNN_SIMPLE_TEST_CASE(GetNameMangledEntryPoint, GetNameMangledEntryPointTestImpl);
 ARMNN_SIMPLE_TEST_CASE(GetNoExternEntryPoint, GetNoExternEntryPointTestImpl);
@@ -21,7 +22,6 @@
 
 ARMNN_SIMPLE_TEST_CASE(BackendVersioning, BackendVersioningTestImpl);
 
-
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendObjectInvalidHandle,
                        CreateDynamicBackendObjectInvalidHandleTestImpl);
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendObjectInvalidInterface1,
@@ -39,11 +39,12 @@
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendObjectInvalidInterface7,
                        CreateDynamicBackendObjectInvalidInterface7TestImpl);
 
+ARMNN_SIMPLE_TEST_CASE(OpenNotSharedObjectFile, OpenNotSharedObjectTestImpl);
+ARMNN_SIMPLE_TEST_CASE(GetSharedObjects, GetSharedObjectsTestImpl);
+
 ARMNN_SIMPLE_TEST_CASE(GetBackendPaths, GetBackendPathsTestImpl)
 ARMNN_SIMPLE_TEST_CASE(GetBackendPathsOverride, GetBackendPathsOverrideTestImpl)
 
-ARMNN_SIMPLE_TEST_CASE(GetSharedObjects, GetSharedObjectsTestImpl);
-
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackends, CreateDynamicBackendsTestImpl);
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendsNoPaths, CreateDynamicBackendsNoPathsTestImpl);
 ARMNN_SIMPLE_TEST_CASE(CreateDynamicBackendsAllInvalid, CreateDynamicBackendsAllInvalidTestImpl);
@@ -76,4 +77,4 @@
 ARMNN_SIMPLE_TEST_CASE(SampleDynamicBackendEndToEnd, SampleDynamicBackendEndToEndTestImpl);
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/DynamicBackendTests.hpp b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
index a4f1613..53ff5ca 100644
--- a/src/backends/backendsCommon/test/DynamicBackendTests.hpp
+++ b/src/backends/backendsCommon/test/DynamicBackendTests.hpp
@@ -18,7 +18,11 @@
 #include <string>
 #include <memory>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
+
+#if defined(_MSC_VER)
+#include <Windows.h>
+#endif
 
 #if !defined(DYNAMIC_BACKEND_BUILD_DIR)
 #define DYNAMIC_BACKEND_BUILD_DIR fs::path("./")
@@ -106,50 +110,43 @@
     FactoryStorage m_TempStorage;
 };
 
+#if defined(_MSC_VER)
+std::string GetUnitTestExecutablePath()
+{
+    char buffer[MAX_PATH] = "";
+    GetModuleFileNameA(NULL, buffer, MAX_PATH);
+    fs::path executablePath(buffer);
+    return executablePath.parent_path();
+}
+
+#else
+std::string GetUnitTestExecutablePath()
+{
+    char buffer[PATH_MAX] = "";
+    if (readlink("/proc/self/exe", buffer, PATH_MAX) != -1)
+    {
+        fs::path executablePath(buffer);
+        return executablePath.parent_path();
+    }
+    return "";
+}
+#endif
+
 std::string GetBasePath(const std::string& basePath)
 {
     using namespace fs;
     // What we're looking for here is the location of the UnitTests executable.
-    // In the normal build environment there are a series of files and
-    // directories created by cmake. If the executable has been relocated they
-    // may not be there. The search hierarchy is:
-    // * User specified --dynamic-backend-build-dir
-    // * Compile time value of DYNAMIC_BACKEND_BUILD_DIR.
-    // * Arg0 location.
-    // * Fall back value of current directory.
-    path programLocation = DYNAMIC_BACKEND_BUILD_DIR;
-    // Look for the specific argument --dynamic-backend-build-dir?
-    if (boost::unit_test::framework::master_test_suite().argc == 3)
+    // Fall back value of current directory.
+    path programLocation = GetUnitTestExecutablePath();
+    if (!exists(programLocation))
     {
-        // Boost custom arguments begin after a '--' on the command line.
-        if (g_TestDirCLI.compare(boost::unit_test::framework::master_test_suite().argv[1]) == 0)
-        {
-            // Then the next argument is the path.
-            programLocation = boost::unit_test::framework::master_test_suite().argv[2];
-        }
+        programLocation = DYNAMIC_BACKEND_BUILD_DIR;
     }
-    else
-    {
-        // Start by checking if DYNAMIC_BACKEND_BUILD_DIR value exist.
-        if (!exists(programLocation))
-        {
-            // That doesn't exist try looking at arg[0].
-            path arg0Path(boost::unit_test::framework::master_test_suite().argv[0]);
-            arg0Path.remove_filename();
-            path arg0SharedObjectPath(arg0Path);
-            arg0SharedObjectPath.append(basePath);
-            if (exists(arg0SharedObjectPath))
-            {
-                // Yeah arg0 worked.
-                programLocation = arg0Path;
-            }
-        }
-    }
+
     // This is the base path from the build where the test libraries were built.
     path sharedObjectPath = programLocation.append(basePath);
-    BOOST_REQUIRE_MESSAGE(exists(sharedObjectPath), "Base path for shared objects does not exist: " +
-                          sharedObjectPath.string() + "\nTo specify the root of this base path on the " +
-                          "command line add: \'-- --dynamic-backend-build-dir <path>\'");
+    REQUIRE_MESSAGE(exists(sharedObjectPath),
+                    "Base path for shared objects does not exist: " + sharedObjectPath.string());
     return sharedObjectPath.string();
 }
 
@@ -192,7 +189,7 @@
 
     path directoryPath(directory);
     path fileNamePath = directoryPath.append(fileName);
-    BOOST_CHECK(exists(fileNamePath));
+    CHECK(exists(fileNamePath));
 
     return fileNamePath.string();
 }
@@ -205,8 +202,8 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -224,8 +221,8 @@
     using namespace armnn;
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(""), RuntimeException);
-    BOOST_TEST((sharedObjectHandle == nullptr));
+    CHECK_THROWS_AS(sharedObjectHandle = DynamicBackendUtils::OpenHandle(""), RuntimeException);
+    CHECK((sharedObjectHandle == nullptr));
 }
 
 void OpenNotExistingFileTestImpl()
@@ -233,8 +230,8 @@
     using namespace armnn;
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle("NotExistingFileName"), RuntimeException);
-    BOOST_TEST((sharedObjectHandle == nullptr));
+    CHECK_THROWS_AS(sharedObjectHandle = DynamicBackendUtils::OpenHandle("NotExistingFileName"), RuntimeException);
+    CHECK((sharedObjectHandle == nullptr));
 }
 
 void OpenNotSharedObjectTestImpl()
@@ -245,8 +242,8 @@
     std::string notSharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestNoSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(notSharedObjectFilePath), RuntimeException);
-    BOOST_TEST((sharedObjectHandle == nullptr));
+    CHECK_THROWS_AS(sharedObjectHandle = DynamicBackendUtils::OpenHandle(notSharedObjectFilePath), RuntimeException);
+    CHECK((sharedObjectHandle == nullptr));
 }
 
 void GetValidEntryPointTestImpl()
@@ -257,15 +254,15 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     using TestFunctionType = int(*)(int);
     TestFunctionType testFunctionPointer = nullptr;
-    BOOST_CHECK_NO_THROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
+    CHECK_NOTHROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
                                                                                                     "TestFunction1"));
-    BOOST_TEST((testFunctionPointer != nullptr));
-    BOOST_TEST(testFunctionPointer(7) == 7);
+    CHECK((testFunctionPointer != nullptr));
+    CHECK(testFunctionPointer(7) == 7);
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -278,15 +275,15 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     using TestFunctionType = int(*)(int);
     TestFunctionType testFunctionPointer = nullptr;
-    BOOST_CHECK_THROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
+    CHECK_THROWS_AS(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
                                                                                                  "TestFunction2"),
                       RuntimeException);
-    BOOST_TEST((testFunctionPointer == nullptr));
+    CHECK((testFunctionPointer == nullptr));
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -299,15 +296,15 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     using TestFunctionType = int(*)(int);
     TestFunctionType testFunctionPointer = nullptr;
-    BOOST_CHECK_THROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
+    CHECK_THROWS_AS(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
                                                                                                  "TestFunction3"),
                       RuntimeException);
-    BOOST_TEST((testFunctionPointer == nullptr));
+    CHECK((testFunctionPointer == nullptr));
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -320,15 +317,15 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestSharedObjectFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     using TestFunctionType = int(*)(int);
     TestFunctionType testFunctionPointer = nullptr;
-    BOOST_CHECK_THROW(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
+    CHECK_THROWS_AS(testFunctionPointer = DynamicBackendUtils::GetEntryPoint<TestFunctionType>(sharedObjectHandle,
                                                                                                  "TestFunction4"),
                       RuntimeException);
-    BOOST_TEST((testFunctionPointer == nullptr));
+    CHECK((testFunctionPointer == nullptr));
 
     DynamicBackendUtils::CloseHandle(sharedObjectHandle);
 }
@@ -342,36 +339,36 @@
 
     // Same backend and backend API versions are compatible with the backend API
     BackendVersion sameBackendVersion{ 2, 4 };
-    BOOST_TEST(sameBackendVersion == backendApiVersion);
-    BOOST_TEST(sameBackendVersion <= backendApiVersion);
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, sameBackendVersion) == true);
+    CHECK(sameBackendVersion == backendApiVersion);
+    CHECK(sameBackendVersion <= backendApiVersion);
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, sameBackendVersion) == true);
 
     // Backend versions that differ from the backend API version by major revision are not compatible
     // with the backend API
     BackendVersion laterMajorBackendVersion{ 3, 4 };
-    BOOST_TEST(!(laterMajorBackendVersion == backendApiVersion));
-    BOOST_TEST(!(laterMajorBackendVersion <= backendApiVersion));
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, laterMajorBackendVersion) == false);
+    CHECK(!(laterMajorBackendVersion == backendApiVersion));
+    CHECK(!(laterMajorBackendVersion <= backendApiVersion));
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, laterMajorBackendVersion) == false);
 
     BackendVersion earlierMajorBackendVersion{ 1, 4 };
-    BOOST_TEST(!(earlierMajorBackendVersion == backendApiVersion));
-    BOOST_TEST(earlierMajorBackendVersion <= backendApiVersion);
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion,
+    CHECK(!(earlierMajorBackendVersion == backendApiVersion));
+    CHECK(earlierMajorBackendVersion <= backendApiVersion);
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion,
                                                                 earlierMajorBackendVersion) == false);
 
     // Backend versions with the same major revision but later minor revision than
     // the backend API version are not compatible with the backend API
     BackendVersion laterMinorBackendVersion{ 2, 5 };
-    BOOST_TEST(!(laterMinorBackendVersion == backendApiVersion));
-    BOOST_TEST(!(laterMinorBackendVersion <= backendApiVersion));
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, laterMinorBackendVersion) == false);
+    CHECK(!(laterMinorBackendVersion == backendApiVersion));
+    CHECK(!(laterMinorBackendVersion <= backendApiVersion));
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, laterMinorBackendVersion) == false);
 
     // Backend versions with the same major revision but earlier minor revision than
     // the backend API version are compatible with the backend API
     BackendVersion earlierMinorBackendVersion{ 2, 3 };
-    BOOST_TEST(!(earlierMinorBackendVersion == backendApiVersion));
-    BOOST_TEST(earlierMinorBackendVersion <= backendApiVersion);
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, earlierMinorBackendVersion) == true);
+    CHECK(!(earlierMinorBackendVersion == backendApiVersion));
+    CHECK(earlierMinorBackendVersion <= backendApiVersion);
+    CHECK(TestDynamicBackendUtils::IsBackendCompatibleTest(backendApiVersion, earlierMinorBackendVersion) == true);
 }
 
 #if defined(ARMNNREF_ENABLED)
@@ -387,41 +384,41 @@
     std::string testSubDirectory = GetTestSubDirectory(g_TestDynamicBackendSubDir);
 
     // We expect this path to exists so we can load a valid dynamic backend.
-    BOOST_CHECK_MESSAGE(fs::exists(testSubDirectory),
+    CHECK_MESSAGE(fs::exists(testSubDirectory),
                        "Base path for shared objects does not exist: " + testSubDirectory);
 
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestValidTestDynamicBackendFileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_NO_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)));
-    BOOST_TEST((dynamicBackend != nullptr));
+    CHECK_NOTHROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)));
+    CHECK((dynamicBackend != nullptr));
 
     BackendId dynamicBackendId;
-    BOOST_CHECK_NO_THROW(dynamicBackendId = dynamicBackend->GetBackendId());
-    BOOST_TEST((dynamicBackendId == "ValidTestDynamicBackend"));
+    CHECK_NOTHROW(dynamicBackendId = dynamicBackend->GetBackendId());
+    CHECK((dynamicBackendId == "ValidTestDynamicBackend"));
 
     BackendVersion dynamicBackendVersion;
-    BOOST_CHECK_NO_THROW(dynamicBackendVersion = dynamicBackend->GetBackendVersion());
-    BOOST_TEST((dynamicBackendVersion == IBackendInternal::GetApiVersion()));
+    CHECK_NOTHROW(dynamicBackendVersion = dynamicBackend->GetBackendVersion());
+    CHECK((dynamicBackendVersion == IBackendInternal::GetApiVersion()));
 
     IBackendInternalUniquePtr dynamicBackendInstance1;
-    BOOST_CHECK_NO_THROW(dynamicBackendInstance1 = dynamicBackend->GetBackend());
-    BOOST_TEST((dynamicBackendInstance1 != nullptr));
+    CHECK_NOTHROW(dynamicBackendInstance1 = dynamicBackend->GetBackend());
+    CHECK((dynamicBackendInstance1 != nullptr));
 
     BackendRegistry::FactoryFunction dynamicBackendFactoryFunction = nullptr;
-    BOOST_CHECK_NO_THROW(dynamicBackendFactoryFunction = dynamicBackend->GetFactoryFunction());
-    BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+    CHECK_NOTHROW(dynamicBackendFactoryFunction = dynamicBackend->GetFactoryFunction());
+    CHECK((dynamicBackendFactoryFunction != nullptr));
 
     IBackendInternalUniquePtr dynamicBackendInstance2;
-    BOOST_CHECK_NO_THROW(dynamicBackendInstance2 = dynamicBackendFactoryFunction());
-    BOOST_TEST((dynamicBackendInstance2 != nullptr));
+    CHECK_NOTHROW(dynamicBackendInstance2 = dynamicBackendFactoryFunction());
+    CHECK((dynamicBackendInstance2 != nullptr));
 
-    BOOST_TEST((dynamicBackendInstance1->GetId() == "ValidTestDynamicBackend"));
-    BOOST_TEST((dynamicBackendInstance2->GetId() == "ValidTestDynamicBackend"));
+    CHECK((dynamicBackendInstance1->GetId() == "ValidTestDynamicBackend"));
+    CHECK((dynamicBackendInstance2->GetId() == "ValidTestDynamicBackend"));
 }
 #endif
 
@@ -433,8 +430,8 @@
 
     void* sharedObjectHandle = nullptr;
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), InvalidArgumentException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), InvalidArgumentException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface1TestImpl()
@@ -448,12 +445,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend1FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface2TestImpl()
@@ -468,12 +465,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend2FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface3TestImpl()
@@ -488,12 +485,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend3FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface4TestImpl()
@@ -508,12 +505,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend4FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface5TestImpl()
@@ -529,12 +526,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend5FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface6TestImpl()
@@ -550,32 +547,32 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend6FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_NO_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)));
-    BOOST_TEST((dynamicBackend != nullptr));
+    CHECK_NOTHROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)));
+    CHECK((dynamicBackend != nullptr));
 
     BackendId dynamicBackendId;
-    BOOST_CHECK_NO_THROW(dynamicBackendId = dynamicBackend->GetBackendId());
-    BOOST_TEST((dynamicBackendId == "InvalidTestDynamicBackend"));
+    CHECK_NOTHROW(dynamicBackendId = dynamicBackend->GetBackendId());
+    CHECK((dynamicBackendId == "InvalidTestDynamicBackend"));
 
     BackendVersion dynamicBackendVersion;
-    BOOST_CHECK_NO_THROW(dynamicBackendVersion = dynamicBackend->GetBackendVersion());
-    BOOST_TEST((dynamicBackendVersion == BackendVersion({ 1, 0 })));
+    CHECK_NOTHROW(dynamicBackendVersion = dynamicBackend->GetBackendVersion());
+    CHECK((dynamicBackendVersion == BackendVersion({ 1, 0 })));
 
     IBackendInternalUniquePtr dynamicBackendInstance1;
-    BOOST_CHECK_THROW(dynamicBackendInstance1 = dynamicBackend->GetBackend(), RuntimeException);
-    BOOST_TEST((dynamicBackendInstance1 == nullptr));
+    CHECK_THROWS_AS(dynamicBackendInstance1 = dynamicBackend->GetBackend(), RuntimeException);
+    CHECK((dynamicBackendInstance1 == nullptr));
 
     BackendRegistry::FactoryFunction dynamicBackendFactoryFunction = nullptr;
-    BOOST_CHECK_NO_THROW(dynamicBackendFactoryFunction = dynamicBackend->GetFactoryFunction());
-    BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+    CHECK_NOTHROW(dynamicBackendFactoryFunction = dynamicBackend->GetFactoryFunction());
+    CHECK((dynamicBackendFactoryFunction != nullptr));
 
     IBackendInternalUniquePtr dynamicBackendInstance2;
-    BOOST_CHECK_THROW(dynamicBackendInstance2 = dynamicBackendFactoryFunction(), RuntimeException);
-    BOOST_TEST((dynamicBackendInstance2 == nullptr));
+    CHECK_THROWS_AS(dynamicBackendInstance2 = dynamicBackendFactoryFunction(), RuntimeException);
+    CHECK((dynamicBackendInstance2 == nullptr));
 }
 
 void CreateDynamicBackendObjectInvalidInterface7TestImpl()
@@ -591,12 +588,12 @@
     std::string sharedObjectFilePath = GetTestFilePath(testSubDirectory, g_TestInvalidTestDynamicBackend7FileName);
 
     void* sharedObjectHandle = nullptr;
-    BOOST_CHECK_NO_THROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
-    BOOST_TEST((sharedObjectHandle != nullptr));
+    CHECK_NOTHROW(sharedObjectHandle = DynamicBackendUtils::OpenHandle(sharedObjectFilePath));
+    CHECK((sharedObjectHandle != nullptr));
 
     DynamicBackendPtr dynamicBackend;
-    BOOST_CHECK_THROW(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
-    BOOST_TEST((dynamicBackend == nullptr));
+    CHECK_THROWS_AS(dynamicBackend.reset(new DynamicBackend(sharedObjectHandle)), RuntimeException);
+    CHECK((dynamicBackend == nullptr));
 }
 
 void GetBackendPathsTestImpl()
@@ -616,67 +613,67 @@
     std::string subDir3 = GetTestSubDirectory(g_TestDynamicBackendsSubDir3);
     std::string subDir4 = GetTestSubDirectory(g_TestDynamicBackendsSubDir4);
 
-    BOOST_CHECK(exists(subDir1));
-    BOOST_CHECK(exists(subDir2));
-    BOOST_CHECK(exists(subDir3));
-    BOOST_CHECK(!exists(subDir4));
+    CHECK(exists(subDir1));
+    CHECK(exists(subDir2));
+    CHECK(exists(subDir3));
+    CHECK(!exists(subDir4));
 
     // No path
-    BOOST_TEST(TestDynamicBackendUtils::GetBackendPathsImplTest("").empty());
+    CHECK(TestDynamicBackendUtils::GetBackendPathsImplTest("").empty());
 
     // Malformed path
     std::string malformedDir(subDir1 + "/" + subDir1);
-    BOOST_TEST(TestDynamicBackendUtils::GetBackendPathsImplTest(malformedDir).size()==0);
+    CHECK(TestDynamicBackendUtils::GetBackendPathsImplTest(malformedDir).size()==0);
 
     // Single valid path
     std::vector<std::string> DynamicBackendPaths2 = TestDynamicBackendUtils::GetBackendPathsImplTest(subDir1);
-    BOOST_TEST(DynamicBackendPaths2.size() == 1);
-    BOOST_TEST(DynamicBackendPaths2[0] == subDir1);
+    CHECK(DynamicBackendPaths2.size() == 1);
+    CHECK(DynamicBackendPaths2[0] == subDir1);
 
     // Multiple equal and valid paths
     std::string multipleEqualDirs(subDir1 + ":" + subDir1);
     std::vector<std::string> DynamicBackendPaths3 = TestDynamicBackendUtils::GetBackendPathsImplTest(multipleEqualDirs);
-    BOOST_TEST(DynamicBackendPaths3.size() == 1);
-    BOOST_TEST(DynamicBackendPaths3[0] == subDir1);
+    CHECK(DynamicBackendPaths3.size() == 1);
+    CHECK(DynamicBackendPaths3[0] == subDir1);
 
     // Multiple empty paths
-    BOOST_TEST(TestDynamicBackendUtils::GetBackendPathsImplTest(":::").empty());
+    CHECK(TestDynamicBackendUtils::GetBackendPathsImplTest(":::").empty());
 
     // Multiple valid paths
     std::string multipleValidPaths(subDir1 + ":" + subDir2 + ":" + subDir3);
     std::vector<std::string> DynamicBackendPaths5 =
         TestDynamicBackendUtils::GetBackendPathsImplTest(multipleValidPaths);
-    BOOST_TEST(DynamicBackendPaths5.size() == 3);
-    BOOST_TEST(DynamicBackendPaths5[0] == subDir1);
-    BOOST_TEST(DynamicBackendPaths5[1] == subDir2);
-    BOOST_TEST(DynamicBackendPaths5[2] == subDir3);
+    CHECK(DynamicBackendPaths5.size() == 3);
+    CHECK(DynamicBackendPaths5[0] == subDir1);
+    CHECK(DynamicBackendPaths5[1] == subDir2);
+    CHECK(DynamicBackendPaths5[2] == subDir3);
 
     // Valid among empty paths
     std::string validAmongEmptyDirs("::" + subDir1 + ":");
     std::vector<std::string> DynamicBackendPaths6 =
         TestDynamicBackendUtils::GetBackendPathsImplTest(validAmongEmptyDirs);
-    BOOST_TEST(DynamicBackendPaths6.size() == 1);
-    BOOST_TEST(DynamicBackendPaths6[0] == subDir1);
+    CHECK(DynamicBackendPaths6.size() == 1);
+    CHECK(DynamicBackendPaths6[0] == subDir1);
 
     // Invalid among empty paths
     std::string invalidAmongEmptyDirs(":" + subDir4 + "::");
-    BOOST_TEST(TestDynamicBackendUtils::GetBackendPathsImplTest(invalidAmongEmptyDirs).empty());
+    CHECK(TestDynamicBackendUtils::GetBackendPathsImplTest(invalidAmongEmptyDirs).empty());
 
     // Valid, invalid and empty paths
     std::string validInvalidEmptyDirs(subDir1 + ":" + subDir4 + ":");
     std::vector<std::string> DynamicBackendPaths8 =
         TestDynamicBackendUtils::GetBackendPathsImplTest(validInvalidEmptyDirs);
-    BOOST_TEST(DynamicBackendPaths8.size() == 1);
-    BOOST_TEST(DynamicBackendPaths8[0] == subDir1);
+    CHECK(DynamicBackendPaths8.size() == 1);
+    CHECK(DynamicBackendPaths8[0] == subDir1);
 
     // Mix of duplicates of valid, invalid and empty paths
     std::string duplicateValidInvalidEmptyDirs(validInvalidEmptyDirs + ":" + validInvalidEmptyDirs + ":" +
                                                subDir2 + ":" + subDir2);
     std::vector<std::string> DynamicBackendPaths9 =
         TestDynamicBackendUtils::GetBackendPathsImplTest(duplicateValidInvalidEmptyDirs);
-    BOOST_TEST(DynamicBackendPaths9.size() == 2);
-    BOOST_TEST(DynamicBackendPaths9[0] == subDir1);
-    BOOST_TEST(DynamicBackendPaths9[1] == subDir2);
+    CHECK(DynamicBackendPaths9.size() == 2);
+    CHECK(DynamicBackendPaths9[0] == subDir1);
+    CHECK(DynamicBackendPaths9[1] == subDir2);
 }
 
 void GetBackendPathsOverrideTestImpl()
@@ -687,17 +684,17 @@
     std::string subDir1 = GetTestSubDirectory(g_TestDynamicBackendsSubDir1);
     std::string subDir4 = GetTestSubDirectory(g_TestDynamicBackendsSubDir4);
 
-    BOOST_CHECK(exists(subDir1));
-    BOOST_CHECK(!exists(subDir4));
+    CHECK(exists(subDir1));
+    CHECK(!exists(subDir4));
 
     // Override with valid path
     std::vector<std::string> validResult = DynamicBackendUtils::GetBackendPaths(subDir1);
-    BOOST_TEST(validResult.size() == 1);
-    BOOST_TEST(validResult[0] == subDir1);
+    CHECK(validResult.size() == 1);
+    CHECK(validResult[0] == subDir1);
 
     // Override with invalid path
     std::vector<std::string> invalidResult = DynamicBackendUtils::GetBackendPaths(subDir4);
-    BOOST_TEST(invalidResult.empty());
+    CHECK(invalidResult.empty());
 }
 
 void GetSharedObjectsTestImpl()
@@ -753,10 +750,10 @@
     std::string testDynamicBackendsSubDir2 = GetTestSubDirectory(g_TestDynamicBackendsSubDir2);
     std::string testDynamicBackendsSubDir3 = GetTestSubDirectory(g_TestDynamicBackendsSubDir3);
     std::string testDynamicBackendsSubDir4 = GetTestSubDirectory(g_TestDynamicBackendsSubDir4);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir1));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir2));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir3));
-    BOOST_CHECK(!exists(testDynamicBackendsSubDir4));
+    CHECK(exists(testDynamicBackendsSubDir1));
+    CHECK(exists(testDynamicBackendsSubDir2));
+    CHECK(exists(testDynamicBackendsSubDir3));
+    CHECK(!exists(testDynamicBackendsSubDir4));
 
     std::vector<std::string> backendPaths
     {
@@ -779,16 +776,16 @@
         path(testDynamicBackendsSubDir2 + "Arm_GpuAcc_backend.so")          // Duplicates on different paths are allowed
     };
 
-    BOOST_TEST(sharedObjects.size() == expectedSharedObjects.size());
-    BOOST_TEST(fs::equivalent(path(sharedObjects[0]), expectedSharedObjects[0]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[1]), expectedSharedObjects[1]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[2]), expectedSharedObjects[2]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[3]), expectedSharedObjects[3]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[4]), expectedSharedObjects[4]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[5]), expectedSharedObjects[5]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[6]), expectedSharedObjects[6]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[7]), expectedSharedObjects[7]));
-    BOOST_TEST(fs::equivalent(path(sharedObjects[8]), expectedSharedObjects[8]));
+    CHECK(sharedObjects.size() == expectedSharedObjects.size());
+    CHECK(fs::equivalent(path(sharedObjects[0]), expectedSharedObjects[0]));
+    CHECK(fs::equivalent(path(sharedObjects[1]), expectedSharedObjects[1]));
+    CHECK(fs::equivalent(path(sharedObjects[2]), expectedSharedObjects[2]));
+    CHECK(fs::equivalent(path(sharedObjects[3]), expectedSharedObjects[3]));
+    CHECK(fs::equivalent(path(sharedObjects[4]), expectedSharedObjects[4]));
+    CHECK(fs::equivalent(path(sharedObjects[5]), expectedSharedObjects[5]));
+    CHECK(fs::equivalent(path(sharedObjects[6]), expectedSharedObjects[6]));
+    CHECK(fs::equivalent(path(sharedObjects[7]), expectedSharedObjects[7]));
+    CHECK(fs::equivalent(path(sharedObjects[8]), expectedSharedObjects[8]));
 }
 
 void CreateDynamicBackendsTestImpl()
@@ -824,10 +821,10 @@
     std::string testDynamicBackendsSubDir6 = GetTestSubDirectory(g_TestDynamicBackendsSubDir6);
     std::string testDynamicBackendsSubDir7 = GetTestSubDirectory(g_TestDynamicBackendsSubDir7);
     std::string testDynamicBackendsSubDir8 = GetTestSubDirectory(g_TestDynamicBackendsSubDir8);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir7));
-    BOOST_CHECK(!exists(testDynamicBackendsSubDir8));
+    CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir7));
+    CHECK(!exists(testDynamicBackendsSubDir8));
 
     std::vector<std::string> backendPaths
     {
@@ -839,19 +836,19 @@
     std::vector<std::string> sharedObjects = DynamicBackendUtils::GetSharedObjects(backendPaths);
     std::vector<DynamicBackendPtr> dynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 5);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[1] != nullptr));
-    BOOST_TEST((dynamicBackends[2] != nullptr));
-    BOOST_TEST((dynamicBackends[3] != nullptr));
-    BOOST_TEST((dynamicBackends[4] != nullptr));
+    CHECK(dynamicBackends.size() == 5);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[1] != nullptr));
+    CHECK((dynamicBackends[2] != nullptr));
+    CHECK((dynamicBackends[3] != nullptr));
+    CHECK((dynamicBackends[4] != nullptr));
 
     // Duplicates are allowed here, they will be skipped later during the backend registration
-    BOOST_TEST((dynamicBackends[0]->GetBackendId() == "TestValid2"));
-    BOOST_TEST((dynamicBackends[1]->GetBackendId() == "TestValid3"));
-    BOOST_TEST((dynamicBackends[2]->GetBackendId() == "TestValid2")); // From duplicate Arm_TestValid2_backend.so
-    BOOST_TEST((dynamicBackends[3]->GetBackendId() == "TestValid2")); // From Arm_TestValid4_backend.so
-    BOOST_TEST((dynamicBackends[4]->GetBackendId() == "TestValid5"));
+    CHECK((dynamicBackends[0]->GetBackendId() == "TestValid2"));
+    CHECK((dynamicBackends[1]->GetBackendId() == "TestValid3"));
+    CHECK((dynamicBackends[2]->GetBackendId() == "TestValid2")); // From duplicate Arm_TestValid2_backend.so
+    CHECK((dynamicBackends[3]->GetBackendId() == "TestValid2")); // From Arm_TestValid4_backend.so
+    CHECK((dynamicBackends[4]->GetBackendId() == "TestValid5"));
 }
 
 void CreateDynamicBackendsNoPathsTestImpl()
@@ -860,7 +857,7 @@
 
     std::vector<DynamicBackendPtr> dynamicBackends = DynamicBackendUtils::CreateDynamicBackends({});
 
-    BOOST_TEST(dynamicBackends.empty());
+    CHECK(dynamicBackends.empty());
 }
 
 void CreateDynamicBackendsAllInvalidTestImpl()
@@ -875,7 +872,7 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.empty());
+    CHECK(dynamicBackends.empty());
 }
 
 void CreateDynamicBackendsMixedTypesTestImpl()
@@ -885,8 +882,8 @@
 
     std::string testDynamicBackendsSubDir5 = GetTestSubDirectory(g_TestDynamicBackendsSubDir5);
     std::string testDynamicBackendsSubDir6 = GetTestSubDirectory(g_TestDynamicBackendsSubDir6);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir6));
 
     std::string testValidBackend2FilePath = GetTestFilePath(testDynamicBackendsSubDir5,
                                                             g_TestValidBackend2FileName);
@@ -894,9 +891,9 @@
                                                               g_TestInvalidBackend8FileName);
     std::string testInvalidBackend9FilePath = GetTestFilePath(testDynamicBackendsSubDir6,
                                                               g_TestInvalidBackend9FileName);
-    BOOST_CHECK(exists(testValidBackend2FilePath));
-    BOOST_CHECK(exists(testInvalidBackend8FilePath));
-    BOOST_CHECK(exists(testInvalidBackend9FilePath));
+    CHECK(exists(testValidBackend2FilePath));
+    CHECK(exists(testInvalidBackend8FilePath));
+    CHECK(exists(testInvalidBackend9FilePath));
 
     std::vector<std::string> sharedObjects
     {
@@ -907,9 +904,9 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = DynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 1);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[0]->GetBackendId() == "TestValid2"));
+    CHECK(dynamicBackends.size() == 1);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[0]->GetBackendId() == "TestValid2"));
 }
 
 #if defined(ARMNNREF_ENABLED)
@@ -922,42 +919,42 @@
 
     // Dummy registry used for testing
     BackendRegistry backendRegistry;
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     std::string testDynamicBackendsSubDir5 = GetTestSubDirectory(g_TestDynamicBackendsSubDir5);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir5));
 
     std::string testValidBackend2FilePath = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend2FileName);
-    BOOST_CHECK(exists(testValidBackend2FilePath));
+    CHECK(exists(testValidBackend2FilePath));
 
     std::vector<std::string> sharedObjects{ testValidBackend2FilePath };
     std::vector<DynamicBackendPtr> dynamicBackends = TestDynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 1);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
+    CHECK(dynamicBackends.size() == 1);
+    CHECK((dynamicBackends[0] != nullptr));
 
     BackendId dynamicBackendId = dynamicBackends[0]->GetBackendId();
-    BOOST_TEST((dynamicBackendId == "TestValid2"));
+    CHECK((dynamicBackendId == "TestValid2"));
 
     BackendVersion dynamicBackendVersion = dynamicBackends[0]->GetBackendVersion();
-    BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
+    CHECK(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
 
     BackendIdSet registeredBackendIds = TestDynamicBackendUtils::RegisterDynamicBackendsImplTest(backendRegistry,
                                                                                                  dynamicBackends);
-    BOOST_TEST(backendRegistry.Size() == 1);
-    BOOST_TEST(registeredBackendIds.size() == 1);
+    CHECK(backendRegistry.Size() == 1);
+    CHECK(registeredBackendIds.size() == 1);
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
-    BOOST_TEST(backendIds.size() == 1);
-    BOOST_TEST((backendIds.find(dynamicBackendId) != backendIds.end()));
-    BOOST_TEST((registeredBackendIds.find(dynamicBackendId) != registeredBackendIds.end()));
+    CHECK(backendIds.size() == 1);
+    CHECK((backendIds.find(dynamicBackendId) != backendIds.end()));
+    CHECK((registeredBackendIds.find(dynamicBackendId) != registeredBackendIds.end()));
 
     auto dynamicBackendFactoryFunction = backendRegistry.GetFactory(dynamicBackendId);
-    BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+    CHECK((dynamicBackendFactoryFunction != nullptr));
 
     IBackendInternalUniquePtr dynamicBackend = dynamicBackendFactoryFunction();
-    BOOST_TEST((dynamicBackend != nullptr));
-    BOOST_TEST((dynamicBackend->GetId() == dynamicBackendId));
+    CHECK((dynamicBackend != nullptr));
+    CHECK((dynamicBackend->GetId() == dynamicBackendId));
 }
 
 void RegisterMultipleDynamicBackendsTestImpl()
@@ -969,15 +966,15 @@
 
     std::string testDynamicBackendsSubDir5 = GetTestSubDirectory(g_TestDynamicBackendsSubDir5);
     std::string testDynamicBackendsSubDir6 = GetTestSubDirectory(g_TestDynamicBackendsSubDir6);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir6));
 
     std::string testValidBackend2FilePath = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend2FileName);
     std::string testValidBackend3FilePath = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend3FileName);
     std::string testValidBackend5FilePath = GetTestFilePath(testDynamicBackendsSubDir6, g_TestValidBackend5FileName);
-    BOOST_CHECK(exists(testValidBackend2FilePath));
-    BOOST_CHECK(exists(testValidBackend3FilePath));
-    BOOST_CHECK(exists(testValidBackend5FilePath));
+    CHECK(exists(testValidBackend2FilePath));
+    CHECK(exists(testValidBackend3FilePath));
+    CHECK(exists(testValidBackend5FilePath));
 
     std::vector<std::string> sharedObjects
     {
@@ -987,52 +984,52 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = TestDynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 3);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[1] != nullptr));
-    BOOST_TEST((dynamicBackends[2] != nullptr));
+    CHECK(dynamicBackends.size() == 3);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[1] != nullptr));
+    CHECK((dynamicBackends[2] != nullptr));
 
     BackendId dynamicBackendId1 = dynamicBackends[0]->GetBackendId();
     BackendId dynamicBackendId2 = dynamicBackends[1]->GetBackendId();
     BackendId dynamicBackendId3 = dynamicBackends[2]->GetBackendId();
-    BOOST_TEST((dynamicBackendId1 == "TestValid2"));
-    BOOST_TEST((dynamicBackendId2 == "TestValid3"));
-    BOOST_TEST((dynamicBackendId3 == "TestValid5"));
+    CHECK((dynamicBackendId1 == "TestValid2"));
+    CHECK((dynamicBackendId2 == "TestValid3"));
+    CHECK((dynamicBackendId3 == "TestValid5"));
 
     for (size_t i = 0; i < dynamicBackends.size(); i++)
     {
         BackendVersion dynamicBackendVersion = dynamicBackends[i]->GetBackendVersion();
-        BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
+        CHECK(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
     }
 
     // Dummy registry used for testing
     BackendRegistry backendRegistry;
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     BackendIdSet registeredBackendIds = TestDynamicBackendUtils::RegisterDynamicBackendsImplTest(backendRegistry,
                                                                                                  dynamicBackends);
-    BOOST_TEST(backendRegistry.Size() == 3);
-    BOOST_TEST(registeredBackendIds.size() == 3);
+    CHECK(backendRegistry.Size() == 3);
+    CHECK(registeredBackendIds.size() == 3);
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
-    BOOST_TEST(backendIds.size() == 3);
-    BOOST_TEST((backendIds.find(dynamicBackendId1) != backendIds.end()));
-    BOOST_TEST((backendIds.find(dynamicBackendId2) != backendIds.end()));
-    BOOST_TEST((backendIds.find(dynamicBackendId3) != backendIds.end()));
-    BOOST_TEST((registeredBackendIds.find(dynamicBackendId1) != registeredBackendIds.end()));
-    BOOST_TEST((registeredBackendIds.find(dynamicBackendId2) != registeredBackendIds.end()));
-    BOOST_TEST((registeredBackendIds.find(dynamicBackendId3) != registeredBackendIds.end()));
+    CHECK(backendIds.size() == 3);
+    CHECK((backendIds.find(dynamicBackendId1) != backendIds.end()));
+    CHECK((backendIds.find(dynamicBackendId2) != backendIds.end()));
+    CHECK((backendIds.find(dynamicBackendId3) != backendIds.end()));
+    CHECK((registeredBackendIds.find(dynamicBackendId1) != registeredBackendIds.end()));
+    CHECK((registeredBackendIds.find(dynamicBackendId2) != registeredBackendIds.end()));
+    CHECK((registeredBackendIds.find(dynamicBackendId3) != registeredBackendIds.end()));
 
     for (size_t i = 0; i < dynamicBackends.size(); i++)
     {
         BackendId dynamicBackendId = dynamicBackends[i]->GetBackendId();
 
         auto dynamicBackendFactoryFunction = backendRegistry.GetFactory(dynamicBackendId);
-        BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+        CHECK((dynamicBackendFactoryFunction != nullptr));
 
         IBackendInternalUniquePtr dynamicBackend = dynamicBackendFactoryFunction();
-        BOOST_TEST((dynamicBackend != nullptr));
-        BOOST_TEST((dynamicBackend->GetId() == dynamicBackendId));
+        CHECK((dynamicBackend != nullptr));
+        CHECK((dynamicBackend->GetId() == dynamicBackendId));
     }
 }
 
@@ -1076,11 +1073,11 @@
     std::string testDynamicBackendsSubDir7 = GetTestSubDirectory(g_TestDynamicBackendsSubDir7);
     std::string testDynamicBackendsSubDir8 = GetTestSubDirectory(g_TestDynamicBackendsSubDir8);
     std::string testDynamicBackendsSubDir9 = GetTestSubDirectory(g_TestDynamicBackendsSubDir9);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir7));
-    BOOST_CHECK(!exists(testDynamicBackendsSubDir8));
-    BOOST_CHECK(exists(testDynamicBackendsSubDir9));
+    CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir7));
+    CHECK(!exists(testDynamicBackendsSubDir8));
+    CHECK(exists(testDynamicBackendsSubDir9));
 
     std::string testValidBackend2FilePath    = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend2FileName);
     std::string testValidBackend3FilePath    = GetTestFilePath(testDynamicBackendsSubDir5, g_TestValidBackend3FileName);
@@ -1095,15 +1092,15 @@
                                                                g_TestInvalidBackend10FileName);
     std::string testInvalidBackend11FilePath = GetTestFilePath(testDynamicBackendsSubDir9,
                                                                g_TestInvalidBackend11FileName);
-    BOOST_CHECK(exists(testValidBackend2FilePath));
-    BOOST_CHECK(exists(testValidBackend3FilePath));
-    BOOST_CHECK(exists(testValidBackend2DupFilePath));
-    BOOST_CHECK(exists(testValidBackend4FilePath));
-    BOOST_CHECK(exists(testValidBackend5FilePath));
-    BOOST_CHECK(exists(testInvalidBackend8FilePath));
-    BOOST_CHECK(exists(testInvalidBackend9FilePath));
-    BOOST_CHECK(exists(testInvalidBackend10FilePath));
-    BOOST_CHECK(exists(testInvalidBackend11FilePath));
+    CHECK(exists(testValidBackend2FilePath));
+    CHECK(exists(testValidBackend3FilePath));
+    CHECK(exists(testValidBackend2DupFilePath));
+    CHECK(exists(testValidBackend4FilePath));
+    CHECK(exists(testValidBackend5FilePath));
+    CHECK(exists(testInvalidBackend8FilePath));
+    CHECK(exists(testInvalidBackend9FilePath));
+    CHECK(exists(testInvalidBackend10FilePath));
+    CHECK(exists(testInvalidBackend11FilePath));
 
     std::vector<std::string> sharedObjects
     {
@@ -1120,14 +1117,14 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = TestDynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 7);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[1] != nullptr));
-    BOOST_TEST((dynamicBackends[2] != nullptr));
-    BOOST_TEST((dynamicBackends[3] != nullptr));
-    BOOST_TEST((dynamicBackends[4] != nullptr));
-    BOOST_TEST((dynamicBackends[5] != nullptr));
-    BOOST_TEST((dynamicBackends[6] != nullptr));
+    CHECK(dynamicBackends.size() == 7);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[1] != nullptr));
+    CHECK((dynamicBackends[2] != nullptr));
+    CHECK((dynamicBackends[3] != nullptr));
+    CHECK((dynamicBackends[4] != nullptr));
+    CHECK((dynamicBackends[5] != nullptr));
+    CHECK((dynamicBackends[6] != nullptr));
 
     BackendId dynamicBackendId1 = dynamicBackends[0]->GetBackendId();
     BackendId dynamicBackendId2 = dynamicBackends[1]->GetBackendId();
@@ -1136,23 +1133,23 @@
     BackendId dynamicBackendId5 = dynamicBackends[4]->GetBackendId();
     BackendId dynamicBackendId6 = dynamicBackends[5]->GetBackendId();
     BackendId dynamicBackendId7 = dynamicBackends[6]->GetBackendId();
-    BOOST_TEST((dynamicBackendId1 == "TestValid2"));
-    BOOST_TEST((dynamicBackendId2 == "TestValid3"));
-    BOOST_TEST((dynamicBackendId3 == "TestValid2")); // From duplicate Arm_TestValid2_backend.so
-    BOOST_TEST((dynamicBackendId4 == "TestValid2")); // From Arm_TestValid4_backend.so
-    BOOST_TEST((dynamicBackendId5 == "TestValid5"));
-    BOOST_TEST((dynamicBackendId6 == ""));
-    BOOST_TEST((dynamicBackendId7 == "Unknown"));
+    CHECK((dynamicBackendId1 == "TestValid2"));
+    CHECK((dynamicBackendId2 == "TestValid3"));
+    CHECK((dynamicBackendId3 == "TestValid2")); // From duplicate Arm_TestValid2_backend.so
+    CHECK((dynamicBackendId4 == "TestValid2")); // From Arm_TestValid4_backend.so
+    CHECK((dynamicBackendId5 == "TestValid5"));
+    CHECK((dynamicBackendId6 == ""));
+    CHECK((dynamicBackendId7 == "Unknown"));
 
     for (size_t i = 0; i < dynamicBackends.size(); i++)
     {
         BackendVersion dynamicBackendVersion = dynamicBackends[i]->GetBackendVersion();
-        BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
+        CHECK(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
     }
 
     // Dummy registry used for testing
     BackendRegistry backendRegistry;
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     std::vector<BackendId> expectedRegisteredbackendIds
     {
@@ -1163,22 +1160,22 @@
 
     BackendIdSet registeredBackendIds = TestDynamicBackendUtils::RegisterDynamicBackendsImplTest(backendRegistry,
                                                                                                  dynamicBackends);
-    BOOST_TEST(backendRegistry.Size() == expectedRegisteredbackendIds.size());
-    BOOST_TEST(registeredBackendIds.size() == expectedRegisteredbackendIds.size());
+    CHECK(backendRegistry.Size() == expectedRegisteredbackendIds.size());
+    CHECK(registeredBackendIds.size() == expectedRegisteredbackendIds.size());
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
-    BOOST_TEST(backendIds.size() == expectedRegisteredbackendIds.size());
+    CHECK(backendIds.size() == expectedRegisteredbackendIds.size());
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
-        BOOST_TEST((registeredBackendIds.find(expectedRegisteredbackendId) != registeredBackendIds.end()));
+        CHECK((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
+        CHECK((registeredBackendIds.find(expectedRegisteredbackendId) != registeredBackendIds.end()));
 
         auto dynamicBackendFactoryFunction = backendRegistry.GetFactory(expectedRegisteredbackendId);
-        BOOST_TEST((dynamicBackendFactoryFunction != nullptr));
+        CHECK((dynamicBackendFactoryFunction != nullptr));
 
         IBackendInternalUniquePtr dynamicBackend = dynamicBackendFactoryFunction();
-        BOOST_TEST((dynamicBackend != nullptr));
-        BOOST_TEST((dynamicBackend->GetId() == expectedRegisteredbackendId));
+        CHECK((dynamicBackend != nullptr));
+        CHECK((dynamicBackend->GetId() == expectedRegisteredbackendId));
     }
 }
 #endif
@@ -1200,14 +1197,14 @@
     // Arm_TestInvalid11_backend.so -> not valid (invalid backend id)
 
     std::string testDynamicBackendsSubDir9 = GetTestSubDirectory(g_TestDynamicBackendsSubDir9);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir9));
+    CHECK(exists(testDynamicBackendsSubDir9));
 
     std::string testInvalidBackend10FilePath = GetTestFilePath(testDynamicBackendsSubDir9,
                                                                g_TestInvalidBackend10FileName);
     std::string testInvalidBackend11FilePath = GetTestFilePath(testDynamicBackendsSubDir9,
                                                                g_TestInvalidBackend11FileName);
-    BOOST_CHECK(exists(testInvalidBackend10FilePath));
-    BOOST_CHECK(exists(testInvalidBackend11FilePath));
+    CHECK(exists(testInvalidBackend10FilePath));
+    CHECK(exists(testInvalidBackend11FilePath));
 
     std::vector<std::string> sharedObjects
     {
@@ -1217,30 +1214,30 @@
     };
     std::vector<DynamicBackendPtr> dynamicBackends = TestDynamicBackendUtils::CreateDynamicBackends(sharedObjects);
 
-    BOOST_TEST(dynamicBackends.size() == 2);
-    BOOST_TEST((dynamicBackends[0] != nullptr));
-    BOOST_TEST((dynamicBackends[1] != nullptr));
+    CHECK(dynamicBackends.size() == 2);
+    CHECK((dynamicBackends[0] != nullptr));
+    CHECK((dynamicBackends[1] != nullptr));
 
     BackendId dynamicBackendId1 = dynamicBackends[0]->GetBackendId();
     BackendId dynamicBackendId2 = dynamicBackends[1]->GetBackendId();
-    BOOST_TEST((dynamicBackendId1 == ""));
-    BOOST_TEST((dynamicBackendId2 == "Unknown"));
+    CHECK((dynamicBackendId1 == ""));
+    CHECK((dynamicBackendId2 == "Unknown"));
 
     for (size_t i = 0; i < dynamicBackends.size(); i++)
     {
         BackendVersion dynamicBackendVersion = dynamicBackends[i]->GetBackendVersion();
-        BOOST_TEST(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
+        CHECK(TestDynamicBackendUtils::IsBackendCompatible(dynamicBackendVersion));
     }
 
     // Dummy registry used for testing
     BackendRegistry backendRegistry;
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     // Check that no dynamic backend got registered
     BackendIdSet registeredBackendIds = TestDynamicBackendUtils::RegisterDynamicBackendsImplTest(backendRegistry,
                                                                                                  dynamicBackends);
-    BOOST_TEST(backendRegistry.Size() == 0);
-    BOOST_TEST(registeredBackendIds.empty());
+    CHECK(backendRegistry.Size() == 0);
+    CHECK(registeredBackendIds.empty());
 }
 
 #if !defined(ARMNN_DYNAMIC_BACKEND_ENABLED)
@@ -1253,16 +1250,16 @@
     TestBackendRegistry testBackendRegistry;
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     IRuntime::CreationOptions creationOptions;
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.empty());
+    CHECK(supportedBackendIds.empty());
 
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 }
 
 #endif
@@ -1277,7 +1274,7 @@
 
     // This directory contains valid and invalid backends
     std::string testDynamicBackendsSubDir5 = GetTestSubDirectory(g_TestDynamicBackendsSubDir5);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir5));
+    CHECK(exists(testDynamicBackendsSubDir5));
 
     // Using the path override in CreationOptions to load some test dynamic backends
     IRuntime::CreationOptions creationOptions;
@@ -1291,20 +1288,20 @@
     };
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == expectedRegisteredbackendIds.size());
+    CHECK(backendRegistry.Size() == expectedRegisteredbackendIds.size());
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
+        CHECK((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
     }
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.size() == expectedRegisteredbackendIds.size());
+    CHECK(supportedBackendIds.size() == expectedRegisteredbackendIds.size());
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((supportedBackendIds.find(expectedRegisteredbackendId) != supportedBackendIds.end()));
+        CHECK((supportedBackendIds.find(expectedRegisteredbackendId) != supportedBackendIds.end()));
     }
 }
 
@@ -1318,7 +1315,7 @@
 
     // This directory contains valid, invalid and duplicate backends
     std::string testDynamicBackendsSubDir6 = GetTestSubDirectory(g_TestDynamicBackendsSubDir6);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir6));
+    CHECK(exists(testDynamicBackendsSubDir6));
 
     // Using the path override in CreationOptions to load some test dynamic backends
     IRuntime::CreationOptions creationOptions;
@@ -1332,20 +1329,20 @@
     };
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == expectedRegisteredbackendIds.size());
+    CHECK(backendRegistry.Size() == expectedRegisteredbackendIds.size());
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
+        CHECK((backendIds.find(expectedRegisteredbackendId) != backendIds.end()));
     }
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.size() == expectedRegisteredbackendIds.size());
+    CHECK(supportedBackendIds.size() == expectedRegisteredbackendIds.size());
     for (const BackendId& expectedRegisteredbackendId : expectedRegisteredbackendIds)
     {
-        BOOST_TEST((supportedBackendIds.find(expectedRegisteredbackendId) != supportedBackendIds.end()));
+        CHECK((supportedBackendIds.find(expectedRegisteredbackendId) != supportedBackendIds.end()));
     }
 }
 
@@ -1359,7 +1356,7 @@
 
     // This directory contains only invalid backends
     std::string testDynamicBackendsSubDir9 = GetTestSubDirectory(g_TestDynamicBackendsSubDir9);
-    BOOST_CHECK(exists(testDynamicBackendsSubDir9));
+    CHECK(exists(testDynamicBackendsSubDir9));
 
     // Using the path override in CreationOptions to load some test dynamic backends
     IRuntime::CreationOptions creationOptions;
@@ -1367,11 +1364,11 @@
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.empty());
+    CHECK(supportedBackendIds.empty());
 }
 
 void RuntimeInvalidOverridePathTestImpl()
@@ -1387,11 +1384,11 @@
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == 0);
+    CHECK(backendRegistry.Size() == 0);
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.empty());
+    CHECK(supportedBackendIds.empty());
 }
 
 #if defined(ARMNNREF_ENABLED)
@@ -1410,12 +1407,12 @@
     std::string dynamicBackendsBaseDir = GetDynamicBackendsBasePath();
     std::string referenceDynamicBackendSubDir = GetTestSubDirectory(dynamicBackendsBaseDir,
                                                                     g_ReferenceDynamicBackendSubDir);
-    BOOST_CHECK(exists(referenceDynamicBackendSubDir));
+    CHECK(exists(referenceDynamicBackendSubDir));
 
     // Check that the reference dynamic backend file exists
     std::string referenceBackendFilePath = GetTestFilePath(referenceDynamicBackendSubDir,
                                                            g_ReferenceBackendFileName);
-    BOOST_CHECK(exists(referenceBackendFilePath));
+    CHECK(exists(referenceBackendFilePath));
 
     // Using the path override in CreationOptions to load the reference dynamic backend
     IRuntime::CreationOptions creationOptions;
@@ -1423,28 +1420,28 @@
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
 
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() == 1);
+    CHECK(backendRegistry.Size() == 1);
 
     BackendIdSet backendIds = backendRegistry.GetBackendIds();
-    BOOST_TEST((backendIds.find("CpuRef") != backendIds.end()));
+    CHECK((backendIds.find("CpuRef") != backendIds.end()));
 
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.size() == 1);
-    BOOST_TEST((supportedBackendIds.find("CpuRef") != supportedBackendIds.end()));
+    CHECK(supportedBackendIds.size() == 1);
+    CHECK((supportedBackendIds.find("CpuRef") != supportedBackendIds.end()));
 
     // Get the factory function
     auto referenceDynamicBackendFactoryFunction = backendRegistry.GetFactory("CpuRef");
-    BOOST_TEST((referenceDynamicBackendFactoryFunction != nullptr));
+    CHECK((referenceDynamicBackendFactoryFunction != nullptr));
 
     // Use the factory function to create an instance of the reference backend
     IBackendInternalUniquePtr referenceDynamicBackend = referenceDynamicBackendFactoryFunction();
-    BOOST_TEST((referenceDynamicBackend != nullptr));
-    BOOST_TEST((referenceDynamicBackend->GetId() == "CpuRef"));
+    CHECK((referenceDynamicBackend != nullptr));
+    CHECK((referenceDynamicBackend->GetId() == "CpuRef"));
 
     // Test the backend instance by querying the layer support
     IBackendInternal::ILayerSupportSharedPtr referenceLayerSupport = referenceDynamicBackend->GetLayerSupport();
-    BOOST_TEST((referenceLayerSupport != nullptr));
+    CHECK((referenceLayerSupport != nullptr));
 
     TensorShape inputShape {  1, 16, 16, 16 };
     TensorShape outputShape{  1, 16, 16, 16 };
@@ -1459,11 +1456,11 @@
                                                             convolution2dDescriptor,
                                                             weightInfo,
                                                             EmptyOptional());
-    BOOST_TEST(referenceConvolution2dSupported);
+    CHECK(referenceConvolution2dSupported);
 
     // Test the backend instance by creating a workload
     IBackendInternal::IWorkloadFactoryPtr referenceWorkloadFactory = referenceDynamicBackend->CreateWorkloadFactory();
-    BOOST_TEST((referenceWorkloadFactory != nullptr));
+    CHECK((referenceWorkloadFactory != nullptr));
 
     // Create dummy settings for the workload
     Convolution2dQueueDescriptor convolution2dQueueDescriptor;
@@ -1478,8 +1475,8 @@
 
     // Create a convolution workload with the dummy settings
     auto workload = referenceWorkloadFactory->CreateConvolution2d(convolution2dQueueDescriptor, workloadInfo);
-    BOOST_TEST((workload != nullptr));
-    BOOST_TEST(workload.get() == PolymorphicDowncast<RefConvolution2dWorkload*>(workload.get()));
+    CHECK((workload != nullptr));
+    CHECK(workload.get() == PolymorphicDowncast<RefConvolution2dWorkload*>(workload.get()));
 }
 
 #endif
@@ -1498,7 +1495,7 @@
                               "Ensure a DYNAMIC_BACKEND_PATHS was set at compile time to the location of "
                               "libArm_SampleDynamic_backend.so. "
                               "To disable this test recompile with: -DSAMPLE_DYNAMIC_BACKEND_ENABLED=0";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 }
 
@@ -1509,25 +1506,25 @@
     IRuntime::CreationOptions creationOptions;
     IRuntimePtr runtime = IRuntime::Create(creationOptions);
     const BackendRegistry& backendRegistry = BackendRegistryInstance();
-    BOOST_TEST(backendRegistry.Size() >= 1);
+    CHECK(backendRegistry.Size() >= 1);
     CheckSampleDynamicBackendLoaded();
     const DeviceSpec& deviceSpec = *PolymorphicDowncast<const DeviceSpec*>(&runtime->GetDeviceSpec());
     BackendIdSet supportedBackendIds = deviceSpec.GetSupportedBackends();
-    BOOST_TEST(supportedBackendIds.size()>= 1);
-    BOOST_TEST((supportedBackendIds.find("SampleDynamic") != supportedBackendIds.end()));
+    CHECK(supportedBackendIds.size()>= 1);
+    CHECK((supportedBackendIds.find("SampleDynamic") != supportedBackendIds.end()));
 
     // Get the factory function
     auto sampleDynamicBackendFactoryFunction = backendRegistry.GetFactory("SampleDynamic");
-    BOOST_TEST((sampleDynamicBackendFactoryFunction != nullptr));
+    CHECK((sampleDynamicBackendFactoryFunction != nullptr));
 
     // Use the factory function to create an instance of the dynamic backend
     IBackendInternalUniquePtr sampleDynamicBackend = sampleDynamicBackendFactoryFunction();
-    BOOST_TEST((sampleDynamicBackend != nullptr));
-    BOOST_TEST((sampleDynamicBackend->GetId() == "SampleDynamic"));
+    CHECK((sampleDynamicBackend != nullptr));
+    CHECK((sampleDynamicBackend->GetId() == "SampleDynamic"));
 
     // Test the backend instance by querying the layer support
     IBackendInternal::ILayerSupportSharedPtr sampleLayerSupport = sampleDynamicBackend->GetLayerSupport();
-    BOOST_TEST((sampleLayerSupport != nullptr));
+    CHECK((sampleLayerSupport != nullptr));
 
     TensorShape inputShape {  1, 16, 16, 16 };
     TensorShape outputShape{  1, 16, 16, 16 };
@@ -1542,11 +1539,11 @@
                                                          convolution2dDescriptor,
                                                          weightInfo,
                                                          EmptyOptional());
-    BOOST_TEST(!sampleConvolution2dSupported);
+    CHECK(!sampleConvolution2dSupported);
 
     // Test the backend instance by creating a workload
     IBackendInternal::IWorkloadFactoryPtr sampleWorkloadFactory = sampleDynamicBackend->CreateWorkloadFactory();
-    BOOST_TEST((sampleWorkloadFactory != nullptr));
+    CHECK((sampleWorkloadFactory != nullptr));
 
     // Create dummy settings for the workload
     AdditionQueueDescriptor additionQueueDescriptor;
@@ -1558,7 +1555,7 @@
 
     // Create a addition workload
     auto workload = sampleWorkloadFactory->CreateAddition(additionQueueDescriptor, workloadInfo);
-    BOOST_TEST((workload != nullptr));
+    CHECK((workload != nullptr));
 }
 
 void SampleDynamicBackendEndToEndTestImpl()
@@ -1611,6 +1608,6 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results.
-    BOOST_TEST(outputData == expectedOutputData);
+    CHECK(outputData == expectedOutputData);
 }
 #endif
diff --git a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
index 5fedaa2..f958613 100644
--- a/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ElementwiseUnaryEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -61,7 +61,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateElementwiseUnaryNetwork<ArmnnInType>(inputShape, outputShape, operation, qScale, qOffset);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     const std::vector<float> input({ 1, -1, 1, 1,  5, -5, 5, 5,
                                        -3, 3, 3, 3,  4, 4, -4, 4 });
diff --git a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
index a5fe8c6..2d268f8 100644
--- a/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/EndToEndTestImpl.hpp
@@ -14,7 +14,7 @@
 #include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -169,7 +169,7 @@
         std::vector<TOutput> out = outputStorage.at(it.first);
         for (unsigned int i = 0; i < out.size(); ++i)
         {
-            BOOST_CHECK_MESSAGE(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true,
+            CHECK_MESSAGE(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true,
                     "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
 
         }
@@ -203,7 +203,7 @@
 
     // Optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -238,7 +238,7 @@
     runtime->GetProfiler(netId)->EnableProfiling(true);
 
     // Do the inference and expect it to fail with a ImportMemoryException
-    BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
+    CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
 }
 
 inline void ExportNonAlignedOutputPointerTest(std::vector<BackendId> backends)
@@ -268,7 +268,7 @@
 
     // Optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -304,11 +304,11 @@
     if (backends[0] == Compute::CpuAcc)
     {
         // For CpuAcc the NeonTensorHandle will throw its own exception on misaligned memory
-        BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
+        CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryImportException);
     }
     else
     {
-        BOOST_CHECK_THROW(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryExportException);
+        CHECK_THROWS_AS(runtime->EnqueueWorkload(netId, inputTensors, outputTensors), MemoryExportException);
     }
 }
 
@@ -339,7 +339,7 @@
 
     // Optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -383,18 +383,18 @@
 
     // Contains ActivationWorkload
     std::size_t found = dump.find("ActivationWorkload");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
 inline void ImportOnlyWorkload(std::vector<BackendId> backends)
@@ -424,17 +424,17 @@
     // optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    BOOST_TEST_CHECKPOINT("Load Network");
+    INFO("Load Network");
     // Load it into the runtime. It should pass.
     NetworkId netId;
     std::string ignoredErrorMessage;
 
     INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Undefined);
 
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
                == Status::Success);
 
-    BOOST_TEST_CHECKPOINT("Generate Data");
+    INFO("Generate Data");
     // Creates structures for input & output
     std::vector<float> inputData
     {
@@ -448,7 +448,7 @@
          1.0f, 4.0f, 9.0f, 16.0f
     };
 
-    BOOST_TEST_CHECKPOINT("Create Network");
+    INFO("Create Network");
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -458,15 +458,14 @@
         {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
     };
 
-    BOOST_TEST_CHECKPOINT("Get Profiler");
-
+    INFO("Get Profiler");
     runtime->GetProfiler(netId)->EnableProfiling(true);
 
-    BOOST_TEST_CHECKPOINT("Run Inference");
+    INFO("Run Inference");
     // Do the inference
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
-    BOOST_TEST_CHECKPOINT("Print Profiler");
+    INFO("Print Profiler");
     // Retrieve the Profiler.Print() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
     std::stringstream ss;
@@ -474,17 +473,17 @@
     std::string dump = ss.str();
 
     // Check there are no SyncMemGeneric workloads as we didn't export
-    BOOST_TEST_CHECKPOINT("Find SyncMemGeneric");
+    INFO("Find SyncMemGeneric");
     int count = SubStringCounter(dump, "SyncMemGeneric");
-    BOOST_TEST(count == 0);
+    CHECK(count == 0);
 
     // Should only be 1 CopyMemGeneric for the output as we imported
-    BOOST_TEST_CHECKPOINT("Find CopyMemGeneric");
+    INFO("Find CopyMemGeneric");
     count = SubStringCounter(dump, "CopyMemGeneric");
-    BOOST_TEST(count == 1);
+    CHECK(count == 1);
 
     // Check the output is correct
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end());
+    CHECK(std::equal(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end()));
 }
 
 inline void ExportOnlyWorkload(std::vector<BackendId> backends)
@@ -514,15 +513,15 @@
     // optimize the network
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    BOOST_TEST_CHECKPOINT("Load Network");
+    INFO("Load Network");
     // Load it into the runtime. It should pass.
     NetworkId netId;
     std::string ignoredErrorMessage;
     INetworkProperties networkProperties(false, MemorySource::Undefined, MemorySource::Malloc);
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
                == Status::Success);
 
-    BOOST_TEST_CHECKPOINT("Generate Data");
+    INFO("Generate Data");
     // Creates structures for input & output
     std::vector<float> inputData
     {
@@ -536,7 +535,7 @@
          1.0f, 4.0f, 9.0f, 16.0f
     };
 
-    BOOST_TEST_CHECKPOINT("Create Network");
+    INFO("Create Network");
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -546,15 +545,14 @@
         {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
     };
 
-    BOOST_TEST_CHECKPOINT("Get Profiler");
-
+    INFO("Get Profiler");
     runtime->GetProfiler(netId)->EnableProfiling(true);
 
-    BOOST_TEST_CHECKPOINT("Run Inference");
+    INFO("Run Inference");
     // Do the inference
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
-    BOOST_TEST_CHECKPOINT("Print Profiler");
+    INFO("Print Profiler");
     // Retrieve the Profiler.Print() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
     std::stringstream ss;
@@ -562,17 +560,17 @@
     std::string dump = ss.str();
 
     // Check there is a SyncMemGeneric workload as we exported
-    BOOST_TEST_CHECKPOINT("Find SyncMemGeneric");
+    INFO("Find SyncMemGeneric");
     int count = SubStringCounter(dump, "SyncMemGeneric");
-    BOOST_TEST(count == 1);
+    CHECK(count == 1);
 
     // Should be 1 CopyMemGeneric for the output as we did not import
-    BOOST_TEST_CHECKPOINT("Find CopyMemGeneric");
+    INFO("Find CopyMemGeneric");
     count = SubStringCounter(dump, "CopyMemGeneric");
-    BOOST_TEST(count == 1);
+    CHECK(count == 1);
 
     // Check the output is correct
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end());
+    CHECK(std::equal(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end()));
 }
 
 inline void ImportAndExportWorkload(std::vector<BackendId> backends)
@@ -601,17 +599,17 @@
 
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
 
-    BOOST_TEST_CHECKPOINT("Load Network");
+    INFO("Load Network");
     // Load it into the runtime. It should pass.
     NetworkId netId;
     std::string ignoredErrorMessage;
 
     INetworkProperties networkProperties(false, MemorySource::Malloc, MemorySource::Malloc);
 
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet),ignoredErrorMessage, networkProperties)
                == Status::Success);
 
-    BOOST_TEST_CHECKPOINT("Generate Data");
+    INFO("Generate Data");
     // Creates structures for input & output
     std::vector<float> inputData
     {
@@ -625,7 +623,7 @@
          1.0f, 4.0f, 9.0f, 16.0f
     };
 
-    BOOST_TEST_CHECKPOINT("Create Network");
+    INFO("Create Network");
     InputTensors inputTensors
     {
         {0,armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData.data())},
@@ -635,15 +633,14 @@
         {0,armnn::Tensor(runtime->GetOutputTensorInfo(netId, 0), outputData.data())}
     };
 
-    BOOST_TEST_CHECKPOINT("Get Profiler");
-
+    INFO("Get Profiler");
     runtime->GetProfiler(netId)->EnableProfiling(true);
 
-    BOOST_TEST_CHECKPOINT("Run Inference");
+    INFO("Run Inference");
     // Do the inference
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
-    BOOST_TEST_CHECKPOINT("Print Profiler");
+    INFO("Print Profiler");
     // Retrieve the Profiler.Print() output to get the workload execution
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
     std::stringstream ss;
@@ -651,17 +648,17 @@
     std::string dump = ss.str();
 
     // Check there is a SyncMemGeneric workload as we exported
-    BOOST_TEST_CHECKPOINT("Find SyncMemGeneric");
+    INFO("Find SyncMemGeneric");
     int count = SubStringCounter(dump, "SyncMemGeneric");
-    BOOST_TEST(count == 1);
+    CHECK(count == 1);
 
     // Shouldn't be any CopyMemGeneric workloads
-    BOOST_TEST_CHECKPOINT("Find CopyMemGeneric");
+    INFO("Find CopyMemGeneric");
     count = SubStringCounter(dump, "CopyMemGeneric");
-    BOOST_TEST(count == 0);
+    CHECK(count == 0);
 
     // Check the output is correct
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end());
+    CHECK(std::equal(outputData.begin(), outputData.end(), expectedOutput.begin(), expectedOutput.end()));
 }
 
 inline void ExportOutputWithSeveralOutputSlotConnectionsTest(std::vector<BackendId> backends)
@@ -753,19 +750,19 @@
         found = dump.find("ClActivationWorkload");
     }
 
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
     // No contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
     // Contains CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check that the outputs are correct
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData0.begin(), outputData0.end(),
-                                  expectedOutput.begin(), expectedOutput.end());
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData1.begin(), outputData1.end(),
-                                  expectedOutput.begin(), expectedOutput.end());
+    CHECK(std::equal(outputData0.begin(), outputData0.end(),
+                                  expectedOutput.begin(), expectedOutput.end()));
+    CHECK(std::equal(outputData1.begin(), outputData1.end(),
+                                  expectedOutput.begin(), expectedOutput.end()));
 }
 
 inline void StridedSliceInvalidSliceEndToEndTest(std::vector<BackendId> backends)
@@ -801,7 +798,7 @@
     stridedSlice->GetOutputSlot(0).SetTensorInfo(TensorInfo({ 3 }, DataType::Float32));
 
     // Attempt to optimize the network and check that the correct exception is thrown
-    BOOST_CHECK_THROW(Optimize(*net, backends, runtime->GetDeviceSpec()), armnn::LayerValidationException);
+    CHECK_THROWS_AS(Optimize(*net, backends, runtime->GetDeviceSpec()), armnn::LayerValidationException);
 }
 
 } // anonymous namespace
diff --git a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
index 5d7601b..2a4ccb6 100644
--- a/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FillEndToEndTestImpl.hpp
@@ -12,6 +12,8 @@
 
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -55,7 +57,7 @@
 
     armnn::INetworkPtr network = CreateFillNetwork(inputInfo, outputInfo, descriptor);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(network);
 
     std::map<int, std::vector<int32_t>> inputTensorData    = {{ 0, inputData }};
     std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
diff --git a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
index 5a618c3..923d6f3 100644
--- a/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/FullyConnectedEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <armnn/utility/NumericCast.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -83,7 +83,7 @@
                                                                             weightsTensorInfo,
                                                                             descriptor);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(network);
 
     std::map<int, std::vector<T>> inputTensorData    = {{ 0, inputData }, {1, weightsData}};
     std::map<int, std::vector<T>> expectedOutputTensorData = {{ 0, expectedOutputData }};
diff --git a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
index 82f9451..431ef31 100644
--- a/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/GatherEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 #include <armnn/INetwork.hpp>
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace{
 
 armnn::INetworkPtr CreateGatherNetwork(const armnn::TensorInfo& paramsInfo,
@@ -59,7 +61,7 @@
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateGatherNetwork(paramsInfo, indicesInfo, outputInfo, indicesData);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
@@ -115,8 +117,6 @@
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateGatherNetwork(paramsInfo, indicesInfo, outputInfo, indicesData);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     std::map<int, std::vector<T>> inputTensorData = {{ 0, paramsData }};
     std::map<int, std::vector<T>> expectedOutputData = {{ 0, expectedOutput }};
 
diff --git a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
index a3dd88c..d758137 100644
--- a/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/InstanceNormalizationEndToEndTestImpl.cpp
@@ -16,7 +16,7 @@
 
 #include <test/TestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -82,7 +82,7 @@
                                                                             beta,
                                                                             eps);
 
-    BOOST_TEST_CHECKPOINT("Create a network");
+    CHECK(net);
 
     std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
diff --git a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
index 4240bb1..5a05ee1 100644
--- a/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
+++ b/src/backends/backendsCommon/test/IsLayerSupportedTestImpl.hpp
@@ -12,6 +12,8 @@
 
 #include <armnn/utility/IgnoreUnused.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 armnn::Graph dummyGraph;
@@ -756,7 +758,7 @@
         try
         {
             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() != nullptr;
-            BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
+            CHECK_MESSAGE(retVal, layerName << errorMsg);
             return retVal;
         }
         catch(const armnn::InvalidArgumentException& e)
@@ -768,13 +770,13 @@
         catch(const std::exception& e)
         {
             errorMsg = e.what();
-            BOOST_TEST_ERROR(layerName << ": " << errorMsg);
+            FAIL(layerName << ": " << errorMsg);
             return false;
         }
         catch(...)
         {
             errorMsg = "Unexpected error while testing support for ";
-            BOOST_TEST_ERROR(errorMsg << layerName);
+            FAIL(errorMsg << layerName);
             return false;
         }
     }
@@ -784,7 +786,7 @@
         try
         {
             bool retVal = LayerPolicy::MakeDummyWorkload(factory, numIn, numOut).get() == nullptr;
-            BOOST_CHECK_MESSAGE(retVal, layerName << errorMsg);
+            CHECK_MESSAGE(retVal, layerName << errorMsg);
             return retVal;
         }
         // These two exceptions are ok: For workloads that are partially supported, attempting to instantiate them
@@ -803,13 +805,13 @@
         catch(const std::exception& e)
         {
             errorMsg = e.what();
-            BOOST_TEST_ERROR(layerName << ": " << errorMsg);
+            FAIL(layerName << ": " << errorMsg);
             return false;
         }
         catch(...)
         {
             errorMsg = "Unexpected error while testing support for ";
-            BOOST_TEST_ERROR(errorMsg << layerName);
+            FAIL(errorMsg << layerName);
             return false;
         }
     }
@@ -871,7 +873,7 @@
     std::stringstream ss;
     ss << LayerPolicy::NameStr << " layer type mismatches expected layer type value.";
     bool v = Type == layer.m_Layer->GetType();
-    BOOST_CHECK_MESSAGE(v, ss.str());
+    CHECK_MESSAGE(v, ss.str());
     return v;
 }
 
diff --git a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
index a2206f7..92c8e14 100644
--- a/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
+++ b/src/backends/backendsCommon/test/JsonPrinterTestImpl.cpp
@@ -12,11 +12,12 @@
 #include <armnn/IRuntime.hpp>
 #include <armnn/INetwork.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <sstream>
 #include <stack>
 #include <string>
+#include <algorithm>
 
 inline bool AreMatchingPair(const char opening, const char closing)
 {
@@ -67,7 +68,7 @@
             }
             catch (std::invalid_argument const&)
             {
-                BOOST_FAIL("Could not convert measurements to double: " + numberString);
+                FAIL("Could not convert measurements to double: " + numberString);
             }
 
             numberString.clear();
@@ -82,7 +83,7 @@
             }
             catch (std::invalid_argument const&)
             {
-                BOOST_FAIL("Could not convert measurements to double: " + numberString);
+                FAIL("Could not convert measurements to double: " + numberString);
             }
             numberString.clear();
         }
@@ -120,7 +121,7 @@
 {
     using namespace armnn;
 
-    BOOST_CHECK(!backends.empty());
+    CHECK(!backends.empty());
 
     ProfilerManager& profilerManager = armnn::ProfilerManager::GetInstance();
 
@@ -160,12 +161,12 @@
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec());
     if(!optNet)
     {
-        BOOST_FAIL("Error occurred during Optimization, Optimize() returned nullptr.");
+        FAIL("Error occurred during Optimization, Optimize() returned nullptr.");
     }
     // load it into the runtime
     NetworkId netId;
     auto error = runtime->LoadNetwork(netId, std::move(optNet));
-    BOOST_TEST(error == Status::Success);
+    CHECK(error == Status::Success);
 
     // create structures for input & output
     std::vector<uint8_t> inputData
@@ -202,7 +203,7 @@
 {
     // ensure all measurements are greater than zero
     std::vector<double> measurementsVector = ExtractMeasurements(result);
-    BOOST_CHECK(!measurementsVector.empty());
+    CHECK(!measurementsVector.empty());
 
     // check sections contain raw and unit tags
     // first ensure Parenthesis are balanced
@@ -219,12 +220,12 @@
                 sectionVector.erase(sectionVector.begin() + static_cast<int>(i));
             }
         }
-        BOOST_CHECK(!sectionVector.empty());
+        CHECK(!sectionVector.empty());
 
-        BOOST_CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
+        CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
                                 [](std::string i) { return (i.find("\"raw\":") != std::string::npos); }));
 
-        BOOST_CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
+        CHECK(std::all_of(sectionVector.begin(), sectionVector.end(),
                                 [](std::string i) { return (i.find("\"unit\":") != std::string::npos); }));
     }
 
@@ -235,11 +236,11 @@
     result.erase(std::remove_if (result.begin(),result.end(),
                                  [](char c) { return c == '\t'; }), result.end());
 
-    BOOST_CHECK(result.find("ArmNN") != std::string::npos);
-    BOOST_CHECK(result.find("inference_measurements") != std::string::npos);
+    CHECK(result.find("ArmNN") != std::string::npos);
+    CHECK(result.find("inference_measurements") != std::string::npos);
 
     // ensure no spare parenthesis present in print output
-    BOOST_CHECK(AreParenthesesMatching(result));
+    CHECK(AreParenthesesMatching(result));
 }
 
 void RunSoftmaxProfilerJsonPrinterTest(const std::vector<armnn::BackendId>& backends)
@@ -253,11 +254,11 @@
     const armnn::BackendId& firstBackend = backends.at(0);
     if (firstBackend == armnn::Compute::GpuAcc)
     {
-        BOOST_CHECK(result.find("OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]")
+        CHECK(result.find("OpenClKernelTimer/: softmax_layer_max_shift_exp_sum_quantized_serial GWS[,,]")
                     != std::string::npos);
     }
     else if (firstBackend == armnn::Compute::CpuAcc)
     {
-        BOOST_CHECK(result.find("NeonKernelTimer/: CpuLogitsDLogSoftmaxKernel_#") != std::string::npos);
+        CHECK(result.find("NeonKernelTimer/: CpuLogitsDLogSoftmaxKernel_#") != std::string::npos);
     }
 }
diff --git a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
index 0ca4b0a..579be51 100644
--- a/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
+++ b/src/backends/backendsCommon/test/LayerReleaseConstantDataTest.cpp
@@ -10,7 +10,7 @@
 #include <backendsCommon/TensorHandle.hpp>
 #include <backendsCommon/WorkloadData.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <utility>
 
@@ -23,9 +23,9 @@
 // Checks weights and biases before the method called and after.
 /////////////////////////////////////////////////////////////////////////////////////////////
 
-BOOST_AUTO_TEST_SUITE(LayerReleaseConstantDataTest)
-
-BOOST_AUTO_TEST_CASE(ReleaseBatchNormalizationLayerConstantDataTest)
+TEST_SUITE("LayerReleaseConstantDataTest")
+{
+TEST_CASE("ReleaseBatchNormalizationLayerConstantDataTest")
 {
     Graph graph;
 
@@ -54,24 +54,24 @@
     Connect(layer, output, tensorInfo);
 
     // check the constants that they are not NULL
-    BOOST_CHECK(layer->m_Mean != nullptr);
-    BOOST_CHECK(layer->m_Variance != nullptr);
-    BOOST_CHECK(layer->m_Beta != nullptr);
-    BOOST_CHECK(layer->m_Gamma != nullptr);
+    CHECK(layer->m_Mean != nullptr);
+    CHECK(layer->m_Variance != nullptr);
+    CHECK(layer->m_Beta != nullptr);
+    CHECK(layer->m_Gamma != nullptr);
 
     // free up the constants..
     layer->ReleaseConstantData();
 
     // check the constants that they are NULL now
-    BOOST_CHECK(layer->m_Mean == nullptr);
-    BOOST_CHECK(layer->m_Variance == nullptr);
-    BOOST_CHECK(layer->m_Beta == nullptr);
-    BOOST_CHECK(layer->m_Gamma == nullptr);
+    CHECK(layer->m_Mean == nullptr);
+    CHECK(layer->m_Variance == nullptr);
+    CHECK(layer->m_Beta == nullptr);
+    CHECK(layer->m_Gamma == nullptr);
 
  }
 
 
- BOOST_AUTO_TEST_CASE(ReleaseConvolution2dLayerConstantDataTest)
+ TEST_CASE("ReleaseConvolution2dLayerConstantDataTest")
  {
      Graph graph;
 
@@ -104,18 +104,18 @@
      Connect(layer, output, TensorInfo({2, 2, 2, 10}, armnn::DataType::Float32));
 
      // check the constants that they are not NULL
-     BOOST_CHECK(layer->m_Weight != nullptr);
-     BOOST_CHECK(layer->m_Bias != nullptr);
+     CHECK(layer->m_Weight != nullptr);
+     CHECK(layer->m_Bias != nullptr);
 
      // free up the constants..
      layer->ReleaseConstantData();
 
      // check the constants that they are NULL now
-     BOOST_CHECK(layer->m_Weight == nullptr);
-     BOOST_CHECK(layer->m_Bias == nullptr);
+     CHECK(layer->m_Weight == nullptr);
+     CHECK(layer->m_Bias == nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(ReleaseDepthwiseConvolution2dLayerConstantDataTest)
+TEST_CASE("ReleaseDepthwiseConvolution2dLayerConstantDataTest")
 {
     Graph graph;
 
@@ -145,18 +145,18 @@
     Connect(layer, output, TensorInfo({2, 9, 2, 10}, armnn::DataType::Float32));
 
     // check the constants that they are not NULL
-    BOOST_CHECK(layer->m_Weight != nullptr);
-    BOOST_CHECK(layer->m_Bias != nullptr);
+    CHECK(layer->m_Weight != nullptr);
+    CHECK(layer->m_Bias != nullptr);
 
     // free up the constants..
     layer->ReleaseConstantData();
 
     // check the constants that they are NULL now
-    BOOST_CHECK(layer->m_Weight == nullptr);
-    BOOST_CHECK(layer->m_Bias == nullptr);
+    CHECK(layer->m_Weight == nullptr);
+    CHECK(layer->m_Bias == nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(ReleaseFullyConnectedLayerConstantDataTest)
+TEST_CASE("ReleaseFullyConnectedLayerConstantDataTest")
 {
     Graph graph;
 
@@ -186,16 +186,16 @@
     Connect(layer, output, TensorInfo({3, 7}, DataType::QAsymmU8, outputQScale));
 
     // check the constants that they are not NULL
-    BOOST_CHECK(layer->m_Weight != nullptr);
-    BOOST_CHECK(layer->m_Bias != nullptr);
+    CHECK(layer->m_Weight != nullptr);
+    CHECK(layer->m_Bias != nullptr);
 
     // free up the constants..
     layer->ReleaseConstantData();
 
     // check the constants that they are NULL now
-    BOOST_CHECK(layer->m_Weight == nullptr);
-    BOOST_CHECK(layer->m_Bias == nullptr);
+    CHECK(layer->m_Weight == nullptr);
+    CHECK(layer->m_Bias == nullptr);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
 
diff --git a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
index f1e6242..1f7f578 100644
--- a/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/LogSoftmaxEndToEndTestImpl.cpp
@@ -10,7 +10,7 @@
 
 #include <test/TestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace {
 
@@ -60,7 +60,7 @@
                                                                  beta,
                                                                  axis);
 
-    BOOST_TEST_CHECKPOINT("Create a network");
+    CHECK(net);
 
     std::map<int, std::vector<float>> inputTensorData = { {0, inputData} };
     std::map<int, std::vector<float>> expectedOutputTensorData = { {0, expectedOutputData} };
diff --git a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
index b472a03..246cb50 100644
--- a/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizationViewsTests.cpp
@@ -14,8 +14,7 @@
 #include <SubgraphView.hpp>
 #include <SubgraphViewSelector.hpp>
 
-#include <boost/test/unit_test.hpp>
-
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
@@ -28,31 +27,31 @@
         {
             case LayerType::Input:
                 ++m_inputLayerCount;
-                BOOST_TEST((layer->GetName() == std::string("inLayer0") ||
+                CHECK((layer->GetName() == std::string("inLayer0") ||
                             layer->GetName() == std::string("inLayer1")));
                 break;
             // The Addition layer should become a PreCompiled Layer after Optimisation
             case LayerType::PreCompiled:
                 ++m_addLayerCount;
-                BOOST_TEST(layer->GetName() == "pre-compiled");
+                CHECK(std::string(layer->GetName()) == "pre-compiled");
                 break;
             case LayerType::Output:
                 ++m_outputLayerCount;
-                BOOST_TEST(layer->GetName() == "outLayer");
+                CHECK(std::string(layer->GetName()) == "outLayer");
                 break;
             default:
                 //Fail for anything else
-                BOOST_TEST(false);
+                CHECK(false);
         }
     }
-    BOOST_TEST(m_inputLayerCount == 2);
-    BOOST_TEST(m_outputLayerCount == 1);
-    BOOST_TEST(m_addLayerCount == 1);
+    CHECK(m_inputLayerCount == 2);
+    CHECK(m_outputLayerCount == 1);
+    CHECK(m_addLayerCount == 1);
 }
 
-BOOST_AUTO_TEST_SUITE(OptimizationViewsTestSuite)
-
-BOOST_AUTO_TEST_CASE(OptimizedViewsSubgraphLayerCount)
+TEST_SUITE("OptimizationViewsTestSuite")
+{
+TEST_CASE("OptimizedViewsSubgraphLayerCount")
 {
     OptimizationViews view;
     // Construct a graph with 3 layers
@@ -117,10 +116,10 @@
             CreateOutputsFrom({convLayer2}),
             {convLayer1, convLayer2, substitutionpreCompiledLayer});
 
-    BOOST_CHECK(view.Validate(*originalSubgraph));
+    CHECK(view.Validate(*originalSubgraph));
 }
 
-BOOST_AUTO_TEST_CASE(OptimizedViewsSubgraphLayerCountFailValidate)
+TEST_CASE("OptimizedViewsSubgraphLayerCountFailValidate")
 {
     OptimizationViews view;
     // Construct a graph with 3 layers
@@ -180,10 +179,10 @@
                                    {convLayer1, convLayer2, substitutionpreCompiledLayer});
 
     // Validate should fail as convLayer1 is not counted
-    BOOST_CHECK(!view.Validate(*originalSubgraph));
+    CHECK(!view.Validate(*originalSubgraph));
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeViewsValidateDeviceMockBackend)
+TEST_CASE("OptimizeViewsValidateDeviceMockBackend")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -209,11 +208,11 @@
 
     std::vector<armnn::BackendId> backends = { MockBackend().GetIdStatic() };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Check the optimised graph
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     CheckLayers(graph);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
index f7ebf1a..6c76da6 100644
--- a/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizeSubgraphViewTests.cpp
@@ -12,8 +12,7 @@
 
 #include <armnn/BackendRegistry.hpp>
 
-#include <boost/test/unit_test.hpp>
-
+#include <doctest/doctest.h>
 #include <unordered_map>
 
 using namespace armnn;
@@ -64,7 +63,7 @@
                      LayerBindingId inputId = 0)
 {
     Layer* const inputLayer = graph.AddLayer<InputLayer>(inputId, layerName.c_str());
-    BOOST_TEST(inputLayer);
+    CHECK(inputLayer);
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
     return inputLayer;
 }
@@ -74,7 +73,7 @@
                       const std::string& layerName)
 {
     Layer* const outputLayer = graph.AddLayer<OutputLayer>(0, layerName.c_str());
-    BOOST_TEST(outputLayer);
+    CHECK(outputLayer);
     return outputLayer;
 }
 
@@ -88,7 +87,7 @@
                                         const TensorInfo& outputInfo)
 {
     Convolution2dLayer* const convLayer = graph.AddLayer<Convolution2dLayer>(convolutionDescriptor, layerName.c_str());
-    BOOST_TEST(convLayer);
+    CHECK(convLayer);
     SetWeightAndBias(convLayer, weightInfo, biasInfo);
     convLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
     layersInGraph.insert(std::make_pair(convLayer->GetName(), convLayer));
@@ -103,7 +102,7 @@
                                 const TensorInfo& outputInfo)
 {
     Pooling2dLayer* const poolingLayer = graph.AddLayer<Pooling2dLayer>(poolingDescriptor, layerName.c_str());
-    BOOST_TEST(poolingLayer);
+    CHECK(poolingLayer);
     poolingLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
     layersInGraph.insert(std::make_pair(poolingLayer->GetName(), poolingLayer));
     return poolingLayer;
@@ -116,7 +115,7 @@
                                  const TensorInfo& outputInfo)
 {
     AdditionLayer* const additionLayer = graph.AddLayer<AdditionLayer>(layerName.c_str());
-    BOOST_TEST(additionLayer);
+    CHECK(additionLayer);
     additionLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
     layersInGraph.insert(std::make_pair(additionLayer->GetName(), additionLayer));
     return additionLayer;
@@ -140,23 +139,23 @@
     const SubgraphView::OutputSlots& replacementSubgraphOutputSlots = replacementSubgraph.GetOutputSlots();
     const SubgraphView::Layers&      replacementSubgraphLayers      = replacementSubgraph.GetLayers();
 
-    BOOST_TEST(substitutableSubgraphInputSlots.size()  == expectedSubstitutableSubgraphSize.m_NumInputSlots);
-    BOOST_TEST(substitutableSubgraphOutputSlots.size() == expectedSubstitutableSubgraphSize.m_NumOutputSlots);
-    BOOST_TEST(substitutableSubgraphLayers.size()      == expectedSubstitutableSubgraphSize.m_NumLayers);
+    CHECK(substitutableSubgraphInputSlots.size()  == expectedSubstitutableSubgraphSize.m_NumInputSlots);
+    CHECK(substitutableSubgraphOutputSlots.size() == expectedSubstitutableSubgraphSize.m_NumOutputSlots);
+    CHECK(substitutableSubgraphLayers.size()      == expectedSubstitutableSubgraphSize.m_NumLayers);
 
-    BOOST_TEST(AreEqual(substitutableSubgraphInputSlots,  expectedSubstitutableInputSlots));
-    BOOST_TEST(AreEqual(substitutableSubgraphOutputSlots, expectedSubstitutableOutputSlots));
-    BOOST_TEST(AreEqual(substitutableSubgraphLayers,      expectedSubstitutableLayers));
+    CHECK(AreEqual(substitutableSubgraphInputSlots,  expectedSubstitutableInputSlots));
+    CHECK(AreEqual(substitutableSubgraphOutputSlots, expectedSubstitutableOutputSlots));
+    CHECK(AreEqual(substitutableSubgraphLayers,      expectedSubstitutableLayers));
 
-    BOOST_TEST(replacementSubgraphInputSlots.size()  == expectedReplacementSubgraphSize.m_NumInputSlots);
-    BOOST_TEST(replacementSubgraphOutputSlots.size() == expectedReplacementSubgraphSize.m_NumOutputSlots);
-    BOOST_TEST(replacementSubgraphLayers.size()      == expectedReplacementSubgraphSize.m_NumLayers);
+    CHECK(replacementSubgraphInputSlots.size()  == expectedReplacementSubgraphSize.m_NumInputSlots);
+    CHECK(replacementSubgraphOutputSlots.size() == expectedReplacementSubgraphSize.m_NumOutputSlots);
+    CHECK(replacementSubgraphLayers.size()      == expectedReplacementSubgraphSize.m_NumLayers);
 
-    BOOST_TEST(!AreEqual(replacementSubgraphInputSlots,  expectedSubstitutableInputSlots));
-    BOOST_TEST(!AreEqual(replacementSubgraphOutputSlots, expectedSubstitutableOutputSlots));
-    BOOST_TEST(!AreEqual(replacementSubgraphLayers,      expectedSubstitutableLayers));
+    CHECK(!AreEqual(replacementSubgraphInputSlots,  expectedSubstitutableInputSlots));
+    CHECK(!AreEqual(replacementSubgraphOutputSlots, expectedSubstitutableOutputSlots));
+    CHECK(!AreEqual(replacementSubgraphLayers,      expectedSubstitutableLayers));
 
-    BOOST_TEST(std::all_of(replacementSubgraphLayers.begin(),
+    CHECK(std::all_of(replacementSubgraphLayers.begin(),
                            replacementSubgraphLayers.end(),
                            [](const Layer* layer)
     {
@@ -175,13 +174,13 @@
     const SubgraphView::OutputSlots& failedSubgraphOutputSlots = failedSubgraph.GetOutputSlots();
     const SubgraphView::Layers&      failedSubgraphLayers      = failedSubgraph.GetLayers();
 
-    BOOST_TEST(failedSubgraphInputSlots.size()  == expectedFailedSubgraphSize.m_NumInputSlots);
-    BOOST_TEST(failedSubgraphOutputSlots.size() == expectedFailedSubgraphSize.m_NumOutputSlots);
-    BOOST_TEST(failedSubgraphLayers.size()      == expectedFailedSubgraphSize.m_NumLayers);
+    CHECK(failedSubgraphInputSlots.size()  == expectedFailedSubgraphSize.m_NumInputSlots);
+    CHECK(failedSubgraphOutputSlots.size() == expectedFailedSubgraphSize.m_NumOutputSlots);
+    CHECK(failedSubgraphLayers.size()      == expectedFailedSubgraphSize.m_NumLayers);
 
-    BOOST_TEST(AreEqual(failedSubgraphInputSlots,  expectedFailedInputSlots));
-    BOOST_TEST(AreEqual(failedSubgraphOutputSlots, expectedFailedOutputSlots));
-    BOOST_TEST(AreEqual(failedSubgraphLayers,      expectedFailedLayers));
+    CHECK(AreEqual(failedSubgraphInputSlots,  expectedFailedInputSlots));
+    CHECK(AreEqual(failedSubgraphOutputSlots, expectedFailedOutputSlots));
+    CHECK(AreEqual(failedSubgraphLayers,      expectedFailedLayers));
 }
 
 // Convenience function to check that the given untouched subgraph matches the specified expected values
@@ -195,13 +194,13 @@
     const SubgraphView::OutputSlots& untouchedSubgraphOutputSlots = untouchedSubgraph.GetOutputSlots();
     const SubgraphView::Layers&      untouchedSubgraphLayers      = untouchedSubgraph.GetLayers();
 
-    BOOST_TEST(untouchedSubgraphInputSlots.size()  == expectedUntouchedSubgraphSize.m_NumInputSlots);
-    BOOST_TEST(untouchedSubgraphOutputSlots.size() == expectedUntouchedSubgraphSize.m_NumOutputSlots);
-    BOOST_TEST(untouchedSubgraphLayers.size()      == expectedUntouchedSubgraphSize.m_NumLayers);
+    CHECK(untouchedSubgraphInputSlots.size()  == expectedUntouchedSubgraphSize.m_NumInputSlots);
+    CHECK(untouchedSubgraphOutputSlots.size() == expectedUntouchedSubgraphSize.m_NumOutputSlots);
+    CHECK(untouchedSubgraphLayers.size()      == expectedUntouchedSubgraphSize.m_NumLayers);
 
-    BOOST_TEST(AreEqual(untouchedSubgraphInputSlots,  expectedUntouchedInputSlots));
-    BOOST_TEST(AreEqual(untouchedSubgraphOutputSlots, expectedUntouchedOutputSlots));
-    BOOST_TEST(AreEqual(untouchedSubgraphLayers,      expectedUntouchedLayers));
+    CHECK(AreEqual(untouchedSubgraphInputSlots,  expectedUntouchedInputSlots));
+    CHECK(AreEqual(untouchedSubgraphOutputSlots, expectedUntouchedOutputSlots));
+    CHECK(AreEqual(untouchedSubgraphLayers,      expectedUntouchedLayers));
 }
 
 // Creates a subgraph containing only a single unsupported layer (only convolutions are unsupported by the mock backend)
@@ -551,28 +550,28 @@
 
     // Create an unsupported subgraph
     SubgraphView::SubgraphViewPtr subgraphPtr = BuildFullyUnsupportedSubgraph1(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 1);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 1);
 
-    BOOST_TEST(Contains(layersInGraph, "pooling layer"));
+    CHECK(Contains(layersInGraph, "pooling layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly, but no optimization is performed
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // =======================================================================
     // The expected results are:
@@ -585,14 +584,14 @@
     // Check the substitutions
     // -----------------------
 
-    BOOST_TEST(optimizationViews.GetSubstitutions().empty());
+    CHECK(optimizationViews.GetSubstitutions().empty());
 
     // --------------------------
     // Check the failed subgraphs
     // --------------------------
 
     const OptimizationViews::Subgraphs& failedSubgraphs = optimizationViews.GetFailedSubgraphs();
-    BOOST_TEST(failedSubgraphs.size() == 1);
+    CHECK(failedSubgraphs.size() == 1);
 
     CheckFailedSubgraph(failedSubgraphs.at(0),
                         { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() },
@@ -604,7 +603,7 @@
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // The input subgraph contains only unsupported layers (only convolutions are unsupported by the mock backend)
@@ -615,30 +614,30 @@
 
     // Create an unsupported subgraph
     SubgraphView::SubgraphViewPtr subgraphPtr = BuildFullyUnsupportedSubgraph2(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 3);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 3);
 
-    BOOST_TEST(Contains(layersInGraph, "pooling1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling2 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling3 layer"));
+    CHECK(Contains(layersInGraph, "pooling1 layer"));
+    CHECK(Contains(layersInGraph, "pooling2 layer"));
+    CHECK(Contains(layersInGraph, "pooling3 layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly, but no optimization is performed
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // =======================================================================
     // The expected results are:
@@ -651,18 +650,18 @@
     // Check the substitutions
     // -----------------------
 
-    BOOST_TEST(optimizationViews.GetSubstitutions().empty());
+    CHECK(optimizationViews.GetSubstitutions().empty());
 
     // --------------------------
     // Check the failed subgraphs
     // --------------------------
 
     const OptimizationViews::Subgraphs& failedSubgraphs = optimizationViews.GetFailedSubgraphs();
-    BOOST_TEST(failedSubgraphs.size() == 1);
+    CHECK(failedSubgraphs.size() == 1);
 
-    std::vector<Layer*> expectedFailedLayers{ layersInGraph.at("pooling1 layer"),
-                                              layersInGraph.at("pooling2 layer"),
-                                              layersInGraph.at("pooling3 layer") };
+    std::list<Layer*> expectedFailedLayers{ layersInGraph.at("pooling1 layer"),
+                                            layersInGraph.at("pooling2 layer"),
+                                            layersInGraph.at("pooling3 layer") };
 
     const SubgraphView& failedSubgraph = failedSubgraphs.at(0);
 
@@ -674,15 +673,15 @@
 
     const SubgraphView::Layers& failedSubgraphLayers = failedSubgraph.GetLayers();
 
-    BOOST_TEST(failedSubgraphLayers.front() + 0, expectedFailedLayers.at(0));
-    BOOST_TEST(failedSubgraphLayers.front() + 1, expectedFailedLayers.at(1));
-    BOOST_TEST(failedSubgraphLayers.front() + 2, expectedFailedLayers.at(2));
+    CHECK_EQ(failedSubgraphLayers.front() + 0, expectedFailedLayers.front() + 0);
+    CHECK_EQ(failedSubgraphLayers.front() + 1, expectedFailedLayers.front() + 1);
+    CHECK_EQ(failedSubgraphLayers.front() + 2, expectedFailedLayers.front() + 2);
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // A simple case with only one layer (convolution) to optimize, supported by the mock backend
@@ -693,28 +692,28 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyOptimizableSubgraph1(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 1);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 1);
 
-    BOOST_TEST(Contains(layersInGraph, "conv layer"));
+    CHECK(Contains(layersInGraph, "conv layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ===========================================================================================
     // The expected results are:
@@ -728,7 +727,7 @@
     // -----------------------
 
     const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 1);
+    CHECK(substitutions.size() == 1);
 
     CheckSubstitution(substitutions.at(0),
                       { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() },
@@ -741,13 +740,13 @@
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // A case with five layers (all convolutions) to optimize, all supported by the mock backend
@@ -758,32 +757,32 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyOptimizableSubgraph2(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphPtr->GetInputSlots().size()  == 1);
-    BOOST_TEST(subgraphPtr->GetOutputSlots().size() == 1);
-    BOOST_TEST(subgraphPtr->GetLayers().size()      == 5);
+    CHECK(subgraphPtr->GetInputSlots().size()  == 1);
+    CHECK(subgraphPtr->GetOutputSlots().size() == 1);
+    CHECK(subgraphPtr->GetLayers().size()      == 5);
 
-    BOOST_TEST(Contains(layersInGraph, "conv1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv2 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv3 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv4 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv5 layer"));
+    CHECK(Contains(layersInGraph, "conv1 layer"));
+    CHECK(Contains(layersInGraph, "conv2 layer"));
+    CHECK(Contains(layersInGraph, "conv3 layer"));
+    CHECK(Contains(layersInGraph, "conv4 layer"));
+    CHECK(Contains(layersInGraph, "conv5 layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ===========================================================================================
     // The expected results are:
@@ -797,7 +796,7 @@
     // -----------------------
 
     const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 1);
+    CHECK(substitutions.size() == 1);
 
     std::list<Layer*> expectedSubstitutableLayers{ layersInGraph.at("conv1 layer"),
                                                    layersInGraph.at("conv2 layer"),
@@ -816,23 +815,23 @@
 
     const SubgraphView::Layers& substitutableSubgraphLayers = substitution.m_SubstitutableSubgraph.GetLayers();
 
-    BOOST_TEST(substitutableSubgraphLayers.front() + 0, expectedSubstitutableLayers.front() + 0);
-    BOOST_TEST(substitutableSubgraphLayers.front() + 1, expectedSubstitutableLayers.front() + 1);
-    BOOST_TEST(substitutableSubgraphLayers.front() + 2, expectedSubstitutableLayers.front() + 2);
-    BOOST_TEST(substitutableSubgraphLayers.front() + 3, expectedSubstitutableLayers.front() + 3);
-    BOOST_TEST(substitutableSubgraphLayers.front() + 4, expectedSubstitutableLayers.front() + 4);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 0, expectedSubstitutableLayers.front() + 0);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 1, expectedSubstitutableLayers.front() + 1);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 2, expectedSubstitutableLayers.front() + 2);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 3, expectedSubstitutableLayers.front() + 3);
+    CHECK_EQ(substitutableSubgraphLayers.front() + 4, expectedSubstitutableLayers.front() + 4);
 
     // --------------------------
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // The input subgraph contaions both supported and unsupported layers
@@ -844,32 +843,32 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallySupportedSubgraph(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 5);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 5);
 
-    BOOST_TEST(Contains(layersInGraph, "conv1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling2 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv2 layer"));
-    BOOST_TEST(Contains(layersInGraph, "pooling3 layer"));
+    CHECK(Contains(layersInGraph, "conv1 layer"));
+    CHECK(Contains(layersInGraph, "pooling1 layer"));
+    CHECK(Contains(layersInGraph, "pooling2 layer"));
+    CHECK(Contains(layersInGraph, "conv2 layer"));
+    CHECK(Contains(layersInGraph, "pooling3 layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ========================================================================
     // The expected results are:
@@ -883,7 +882,7 @@
     // -----------------------
 
     OptimizationViews::Substitutions substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 2);
+    CHECK(substitutions.size() == 2);
     // Sort into a consistent order
     std::sort(substitutions.begin(), substitutions.end(), [](auto s1, auto s2) {
         return strcmp(s1.m_SubstitutableSubgraph.GetLayers().front()->GetName(),
@@ -925,7 +924,7 @@
     // --------------------------
 
     OptimizationViews::Subgraphs failedSubgraphs = optimizationViews.GetFailedSubgraphs();
-    BOOST_TEST(failedSubgraphs.size() == 2);
+    CHECK(failedSubgraphs.size() == 2);
     // Sort into a consistent order
     std::sort(failedSubgraphs.begin(), failedSubgraphs.end(), [](auto s1, auto s2) {
         return strcmp(s1.GetLayers().front()->GetName(), s2.GetLayers().front()->GetName()) < 0;
@@ -963,7 +962,7 @@
     // Check the untouched subgraphs
     // -----------------------------
 
-    BOOST_TEST(optimizationViews.GetUntouchedSubgraphs().empty());
+    CHECK(optimizationViews.GetUntouchedSubgraphs().empty());
 }
 
 // The input subgraph contains only unoptimizable layers ("unoptimizable" is added to the layer's name)
@@ -974,28 +973,28 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildFullyUnoptimizableSubgraph1(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 1);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 1);
 
-    BOOST_TEST(Contains(layersInGraph, "conv layer unoptimizable"));
+    CHECK(Contains(layersInGraph, "conv layer unoptimizable"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ============================================================================
     // The expected results are:
@@ -1008,20 +1007,20 @@
     // Check the substitutions
     // -----------------------
 
-    BOOST_TEST(optimizationViews.GetSubstitutions().empty());
+    CHECK(optimizationViews.GetSubstitutions().empty());
 
     // --------------------------
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
     const OptimizationViews::Subgraphs& untouchedSubgraphs = optimizationViews.GetUntouchedSubgraphs();
-    BOOST_TEST(untouchedSubgraphs.size() == 1);
+    CHECK(untouchedSubgraphs.size() == 1);
 
     CheckUntouchedSubgraph(untouchedSubgraphs.at(0),
                            { subgraphInputSlots.size(), subgraphOutputSlots.size(), subgraphLayers.size() },
@@ -1038,32 +1037,32 @@
 
     // Create a fully optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallyOptimizableSubgraph1(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 1);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 5);
+    CHECK(subgraphInputSlots.size()  == 1);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 5);
 
-    BOOST_TEST(Contains(layersInGraph, "conv1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv2 layer unoptimizable"));
-    BOOST_TEST(Contains(layersInGraph, "conv3 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv4 layer unoptimizable"));
-    BOOST_TEST(Contains(layersInGraph, "conv5 layer"));
+    CHECK(Contains(layersInGraph, "conv1 layer"));
+    CHECK(Contains(layersInGraph, "conv2 layer unoptimizable"));
+    CHECK(Contains(layersInGraph, "conv3 layer"));
+    CHECK(Contains(layersInGraph, "conv4 layer unoptimizable"));
+    CHECK(Contains(layersInGraph, "conv5 layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ===============================================================================
     // The expected results are:
@@ -1077,7 +1076,7 @@
     // -----------------------
 
     OptimizationViews::Substitutions substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 3);
+    CHECK(substitutions.size() == 3);
     // Sort into a consistent order
     std::sort(substitutions.begin(), substitutions.end(),
         [](auto s1, auto s2) { return strcmp(s1.m_SubstitutableSubgraph.GetLayers().front()->GetName(),
@@ -1122,14 +1121,14 @@
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
     OptimizationViews::Subgraphs untouchedSubgraphs = optimizationViews.GetUntouchedSubgraphs();
-    BOOST_TEST(untouchedSubgraphs.size() == 2);
+    CHECK(untouchedSubgraphs.size() == 2);
     // Sort into a consistent order
     std::sort(untouchedSubgraphs.begin(), untouchedSubgraphs.end(), [](auto s1, auto s2) {
         return strcmp(s1.GetLayers().front()->GetName(), s2.GetLayers().front()->GetName()) < 0;
@@ -1172,31 +1171,31 @@
 
     // Create a partially optimizable subgraph
     SubgraphViewSelector::SubgraphViewPtr subgraphPtr = BuildPartiallyOptimizableSubgraph2(graph, layersInGraph);
-    BOOST_TEST((subgraphPtr != nullptr));
+    CHECK((subgraphPtr != nullptr));
 
     const SubgraphView::InputSlots&  subgraphInputSlots  = subgraphPtr->GetInputSlots();
     const SubgraphView::OutputSlots& subgraphOutputSlots = subgraphPtr->GetOutputSlots();
     const SubgraphView::Layers&      subgraphLayers      = subgraphPtr->GetLayers();
 
-    BOOST_TEST(subgraphInputSlots.size()  == 2);
-    BOOST_TEST(subgraphOutputSlots.size() == 1);
-    BOOST_TEST(subgraphLayers.size()      == 4);
+    CHECK(subgraphInputSlots.size()  == 2);
+    CHECK(subgraphOutputSlots.size() == 1);
+    CHECK(subgraphLayers.size()      == 4);
 
-    BOOST_TEST(Contains(layersInGraph, "conv1 layer"));
-    BOOST_TEST(Contains(layersInGraph, "conv2 layer unoptimizable"));
-    BOOST_TEST(Contains(layersInGraph, "conv3 layer"));
-    BOOST_TEST(Contains(layersInGraph, "add layer"));
+    CHECK(Contains(layersInGraph, "conv1 layer"));
+    CHECK(Contains(layersInGraph, "conv2 layer unoptimizable"));
+    CHECK(Contains(layersInGraph, "conv3 layer"));
+    CHECK(Contains(layersInGraph, "add layer"));
 
     // Create a mock backend object
     MockBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     // Optimize the subgraph
     OptimizationViews optimizationViews;
 
     // Check that the optimization is carried out correctly
-    BOOST_CHECK_NO_THROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
+    CHECK_NOTHROW(optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraphPtr));
 
     // ==============================================================================
     // The expected results are:
@@ -1210,7 +1209,7 @@
     // -----------------------
 
     const OptimizationViews::Substitutions& substitutions = optimizationViews.GetSubstitutions();
-    BOOST_TEST(substitutions.size() == 1);
+    CHECK(substitutions.size() == 1);
 
     ExpectedSubgraphSize expectedSubstitutableSubgraphSizes{ 2, 1, 3 };
     ExpectedSubgraphSize expectedReplacementSubgraphSizes{ 2, 1, 1 };
@@ -1241,14 +1240,14 @@
     // Check the failed subgraphs
     // --------------------------
 
-    BOOST_TEST(optimizationViews.GetFailedSubgraphs().empty());
+    CHECK(optimizationViews.GetFailedSubgraphs().empty());
 
     // -----------------------------
     // Check the untouched subgraphs
     // -----------------------------
 
     const OptimizationViews::Subgraphs& untouchedSubgraphs = optimizationViews.GetUntouchedSubgraphs();
-    BOOST_TEST(untouchedSubgraphs.size() == 1);
+    CHECK(untouchedSubgraphs.size() == 1);
 
     std::vector<ExpectedSubgraphSize> expectedUntouchedSubgraphSizes{ { 1, 1, 1 } };
     std::vector<SubgraphView::InputSlots> expectedUntouchedInputSlots
@@ -1276,15 +1275,15 @@
 
 } // Anonymous namespace
 
-BOOST_AUTO_TEST_SUITE(OptimizeSubGraph)
+TEST_SUITE("OptimizeSubGraph")
+{
+TEST_CASE("FullyUnsupportedSubgraph1")     { FullyUnsupporteSubgraphTestImpl1();      }
+TEST_CASE("FullyUnsupportedSubgraph2")     { FullyUnsupporteSubgraphTestImpl2();      }
+TEST_CASE("FullyOptimizableSubgraph1")     { FullyOptimizableSubgraphTestImpl1();     }
+TEST_CASE("FullyOptimizableSubgraph2")     { FullyOptimizableSubgraphTestImpl2();     }
+TEST_CASE("PartiallySupportedSubgraph")    { PartiallySupportedSubgraphTestImpl();    }
+TEST_CASE("FullyUnoptimizableSubgraph")    { FullyUnoptimizableSubgraphTestImpl1();   }
+TEST_CASE("PartiallyOptimizableSubgraph1") { PartiallyOptimizableSubgraphTestImpl1(); }
+TEST_CASE("PartiallyOptimizableSubgraph2") { PartiallyOptimizableSubgraphTestImpl2(); }
 
-BOOST_AUTO_TEST_CASE(FullyUnsupportedSubgraph1)     { FullyUnsupporteSubgraphTestImpl1();      }
-BOOST_AUTO_TEST_CASE(FullyUnsupportedSubgraph2)     { FullyUnsupporteSubgraphTestImpl2();      }
-BOOST_AUTO_TEST_CASE(FullyOptimizableSubgraph1)     { FullyOptimizableSubgraphTestImpl1();     }
-BOOST_AUTO_TEST_CASE(FullyOptimizableSubgraph2)     { FullyOptimizableSubgraphTestImpl2();     }
-BOOST_AUTO_TEST_CASE(PartiallySupportedSubgraph)    { PartiallySupportedSubgraphTestImpl();    }
-BOOST_AUTO_TEST_CASE(FullyUnoptimizableSubgraph)    { FullyUnoptimizableSubgraphTestImpl1();   }
-BOOST_AUTO_TEST_CASE(PartiallyOptimizableSubgraph1) { PartiallyOptimizableSubgraphTestImpl1(); }
-BOOST_AUTO_TEST_CASE(PartiallyOptimizableSubgraph2) { PartiallyOptimizableSubgraphTestImpl2(); }
-
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
index 66d166f..2c74690 100644
--- a/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
+++ b/src/backends/backendsCommon/test/OptimizedNetworkTests.cpp
@@ -3,17 +3,16 @@
 // SPDX-License-Identifier: MIT
 //
 
-
 #include <Graph.hpp>
 #include <Network.hpp>
 
 #include <reference/RefWorkloadFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(OptimizedNetwork)
-
-BOOST_AUTO_TEST_CASE(SerializeToDot)
+TEST_SUITE("OptimizedNetwork")
+{
+TEST_CASE("SerializeToDot")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -62,10 +61,10 @@
         "    " << addId << " -> " << outputId << " [label=< [4] >];\n"
         "}\n";
 
-    BOOST_TEST(ss.str() == expected.str());
+    CHECK(ss.str() == expected.str());
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
+TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -93,16 +92,16 @@
     try
     {
         Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
-        BOOST_FAIL("Should have thrown an exception.");
+        FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException& e)
     {
         // Different exceptions are thrown on different backends
     }
-    BOOST_CHECK(errMessages.size() > 0);
+    CHECK(errMessages.size() > 0);
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerWithFallback)
+TEST_CASE("OptimizeValidateDeviceNonSupportLayerWithFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -126,7 +125,7 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc, armnn::Compute::CpuRef };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_REQUIRE(optNet);
+    REQUIRE(optNet);
 
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     graph.AllocateDynamicBuffers();
@@ -139,19 +138,19 @@
 #if defined(ARMCOMPUTENEON_ENABLED)
         if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
         }
         else if (layer->GetType() == armnn::LayerType::Normalization)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
         }
 #else
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
 #endif
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDevice)
+TEST_CASE("OptimizeValidateWorkloadsUndefinedComputeDevice")
 {
     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
 
@@ -213,16 +212,16 @@
     try
     {
         Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
-        BOOST_FAIL("Should have thrown an exception.");
+        FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException& e)
     {
         // Different exceptions are thrown on different backends
     }
-    BOOST_CHECK(errMessages.size() > 0);
+    CHECK(errMessages.size() > 0);
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback)
+TEST_CASE("OptimizeValidateWorkloadsUndefinedComputeDeviceWithFallback")
 {
     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
 
@@ -281,7 +280,7 @@
     std::vector<armnn::BackendId> backends = { armnn::Compute::Undefined, armnn::Compute::CpuRef };
 
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     graph.AllocateDynamicBuffers();
@@ -290,13 +289,13 @@
     armnn::RefWorkloadFactory fact;
     for (auto&& layer : graph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
-        BOOST_CHECK_NO_THROW(
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK_NOTHROW(
             layer->CreateWorkload(fact));
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback)
+TEST_CASE("OptimizeValidateWorkloadsDuplicateComputeDeviceWithFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -323,7 +322,7 @@
                                              armnn::Compute::CpuRef };
 
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_REQUIRE(optNet);
+    REQUIRE(optNet);
 
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     graph.AllocateDynamicBuffers();
@@ -338,25 +337,25 @@
 #if defined(ARMCOMPUTENEON_ENABLED)
         if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
         }
         else if (layer->GetType() == armnn::LayerType::Normalization)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
         }
 #elif defined(ARMCOMPUTECL_ENABLED)
         if (layer->GetType() == armnn::LayerType::Input || layer->GetType() == armnn::LayerType::Output)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
+            CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
         }
         else if (layer->GetType() == armnn::LayerType::Normalization)
         {
-            BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+            CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
         }
 #else
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
 #endif
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
index 0dc1e78..e11553d 100644
--- a/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/PreluEndToEndTestImpl.hpp
@@ -10,6 +10,8 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 template<typename armnn::DataType DataType>
@@ -56,7 +58,7 @@
 
     INetworkPtr net = CreatePreluNetwork<ArmnnType>(inputInfo, alphaInfo, outputInfo);
 
-    BOOST_TEST_CHECKPOINT("Create a network");
+    CHECK(net);
 
     std::map<int, std::vector<T>> inputTensorData          = { { 0, inputData }, { 1, alphaData} };
     std::map<int, std::vector<T>> expectedOutputTensorData = { { 0, expectedOutputData } };
diff --git a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
index 9949824..281bed1 100644
--- a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
@@ -11,7 +11,7 @@
 #include <armnn/INetwork.hpp>
 #include <armnn/LstmParams.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -260,11 +260,11 @@
     constexpr int8_t toleranceInt8 = 1;
     for (unsigned int i = 0u; i < outputStateOutResult.size(); ++i)
     {
-        BOOST_TEST(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceInt8));
+        CHECK(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceInt8));
     }
 
     for (unsigned int i = 0u; i < outputResult.size(); ++i)
     {
-        BOOST_TEST(IsCloseEnough(outputVector[i], outputResult[i], toleranceInt8));
+        CHECK(IsCloseEnough(outputVector[i], outputResult[i], toleranceInt8));
     }
 }
\ No newline at end of file
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index c68051c..a2fadc7 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -17,7 +17,7 @@
 
 #include <test/TensorHelpers.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <type_traits>
 
@@ -196,8 +196,6 @@
     // Builds up the structure of the network
     armnn::INetworkPtr net = CreateQuantizedLstmNetwork(inputDesc.GetShape(), outputDesc.GetShape());
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
     IRuntime::CreationOptions options;
     IRuntimePtr runtime(IRuntime::Create(options));
 
@@ -232,12 +230,12 @@
     constexpr int16_t toleranceInt16 = 2;
     for (unsigned int i = 0u; i < cellStateOutResult.size(); ++i)
     {
-        BOOST_CHECK(IsCloseEnough(cellStateOutVector[i], cellStateOutResult[i], toleranceInt16));
+        CHECK(IsCloseEnough(cellStateOutVector[i], cellStateOutResult[i], toleranceInt16));
     }
 
     constexpr uint8_t toleranceUint8 = 1;
     for (unsigned int i = 0u; i < outputStateOutResult.size(); ++i)
     {
-        BOOST_TEST(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceUint8));
+        CHECK(IsCloseEnough(outputStateOutVector[i], outputStateOutResult[i], toleranceUint8));
     }
 }
diff --git a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
index a67bd46..461b3b9 100644
--- a/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/RankEndToEndTestImpl.hpp
@@ -12,6 +12,8 @@
 
 #include <ResolveType.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -50,7 +52,7 @@
 
     armnn::INetworkPtr network = CreateRankNetwork(inputInfo, outputInfo);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(network);
 
     std::map<int, std::vector<T>> inputTensorData   = {{ 0, inputData }};
     std::map<int, std::vector<int32_t>> expectedOutputTensorData = {{ 0, expectedOutputData }};
diff --git a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
index cde85ca..aa7af11 100644
--- a/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/ResizeEndToEndTestImpl.hpp
@@ -14,8 +14,6 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <map>
 #include <vector>
 
diff --git a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
index 6d1a7b0..4e5baad 100644
--- a/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/SpaceToDepthEndToEndTestImpl.cpp
@@ -16,7 +16,7 @@
 
 #include <test/TestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 namespace
 {
@@ -81,7 +81,7 @@
             dataLayout,
             blockSize);
 
-    BOOST_TEST_CHECKPOINT("Create a network");
+    CHECK(net);
 
     std::map<int, std::vector<float>> inputTensorData = { { 0, inputData } };
     std::map<int, std::vector<float>> expectedOutputTensorData = { { 0, expectedOutputData } };
diff --git a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
index 257a81b..64e24e5 100644
--- a/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/SplitterEndToEndTestImpl.hpp
@@ -12,7 +12,7 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -86,7 +86,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{ 1, 2, 3, 4 };
@@ -114,7 +114,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -149,7 +149,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -187,7 +187,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -235,7 +235,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -283,7 +283,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -323,7 +323,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -403,7 +403,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -483,7 +483,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
@@ -562,7 +562,7 @@
     // Builds up the structure of the network
     INetworkPtr net = CreateSplitterNetwork<ArmnnType>(inputShape, outputShapes, splitAxis, numSplit);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData{
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index 0ad6bc4..a552a6a 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -14,7 +14,7 @@
 #include <AsyncExecutionCallback.hpp>
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
@@ -109,7 +109,7 @@
             std::vector<TOutput> out = outputStorageVec[i].at(it.first);
             for (unsigned int j = 0; j < out.size(); ++j)
             {
-                BOOST_CHECK(Compare<ArmnnOType>(it.second[j], out[j], tolerance) == true);
+                CHECK(Compare<ArmnnOType>(it.second[j], out[j], tolerance) == true);
             }
         }
     }
@@ -197,7 +197,7 @@
             cb->Wait();
             
             // Checks the results.
-            BOOST_CHECK(cb->GetStatus() == Status::Success);
+            CHECK(cb->GetStatus() == Status::Success);
         }
     }
 
@@ -207,7 +207,7 @@
 
         for (unsigned int i = 0; i < out.size(); ++i)
         {
-            BOOST_CHECK(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true);
+            CHECK(Compare<ArmnnOType>(it.second[i], out[i], tolerance) == true);
         }
     }
 }
@@ -282,8 +282,7 @@
                                                            ellipsisMask,
                                                            newAxisMask);
 
-    BOOST_TEST_CHECKPOINT("create a network");
-
+    CHECK(net);
     // Creates structures for input & output.
     std::vector<T> inputData{
             1.0f, 1.0f, 1.0f, 2.0f, 2.0f, 2.0f,
@@ -337,7 +336,7 @@
                                                            ellipsisMask,
                                                            newAxisMask);
 
-    BOOST_TEST_CHECKPOINT("create a network");
+    CHECK(net);
 
     // Creates structures for input & output.
     std::vector<T> inputData1{
diff --git a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
index 57fc200..133829c 100644
--- a/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
+++ b/src/backends/backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp
@@ -14,8 +14,6 @@
 
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
-
 #include <map>
 #include <vector>
 
diff --git a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
index 182c913..2034a65 100644
--- a/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
+++ b/src/backends/backendsCommon/test/WorkloadDataValidation.cpp
@@ -13,13 +13,13 @@
 #include <reference/workloads/RefWorkloads.hpp>
 #include <reference/RefWorkloadFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(WorkloadInfoValidation)
-
-BOOST_AUTO_TEST_CASE(BatchNormalizationQueueDescriptor_Validate_DifferentQuantizationData)
+TEST_SUITE("WorkloadInfoValidation")
+{
+TEST_CASE("BatchNormalizationQueueDescriptor_Validate_DifferentQuantizationData")
 {
     TensorShape inputShape { 1, 3, 2, 2 };
     TensorShape outputShape { 1, 3, 2, 2 };
@@ -42,18 +42,18 @@
     invalidData.m_Beta= &sameTensor;
     invalidData.m_Gamma = &sameTensor;
 
-    BOOST_CHECK_NO_THROW(RefBatchNormalizationWorkload(invalidData, invalidInfo));
+    CHECK_NOTHROW(RefBatchNormalizationWorkload(invalidData, invalidInfo));
 }
 
-BOOST_AUTO_TEST_CASE(QueueDescriptor_Validate_WrongNumOfInputsOutputs)
+TEST_CASE("QueueDescriptor_Validate_WrongNumOfInputsOutputs")
 {
     InputQueueDescriptor invalidData;
     WorkloadInfo invalidInfo;
     //Invalid argument exception is expected, because no inputs and no outputs were defined.
-    BOOST_CHECK_THROW(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefWorkloadFactory().CreateInput(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(RefPooling2dFloat32Workload_Validate_WrongDimTensor)
+TEST_CASE("RefPooling2dFloat32Workload_Validate_WrongDimTensor")
 {
     armnn::TensorInfo inputTensorInfo;
     armnn::TensorInfo outputTensorInfo;
@@ -71,10 +71,10 @@
     AddInputToWorkload(invalidData, invalidInfo, inputTensorInfo, nullptr);
 
     // Invalid argument exception is expected, input tensor has to be 4D.
-    BOOST_CHECK_THROW(RefPooling2dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefPooling2dWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(SoftmaxQueueDescriptor_Validate_WrongInputHeight)
+TEST_CASE("SoftmaxQueueDescriptor_Validate_WrongInputHeight")
 {
     unsigned int inputHeight = 1;
     unsigned int inputWidth = 1;
@@ -102,10 +102,10 @@
     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
     //Invalid argument exception is expected, because height != 1.
-    BOOST_CHECK_THROW(RefSoftmaxWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefSoftmaxWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(FullyConnectedQueueDescriptor_Validate_RequiredDataMissing)
+TEST_CASE("FullyConnectedQueueDescriptor_Validate_RequiredDataMissing")
 {
     unsigned int inputWidth = 1;
     unsigned int inputHeight = 1;
@@ -149,11 +149,11 @@
 
     //Invalid argument exception is expected, because not all required fields have been provided.
     //In particular inputsData[0], outputsData[0] and weightsData can not be null.
-    BOOST_CHECK_THROW(RefFullyConnectedWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefFullyConnectedWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
 
-BOOST_AUTO_TEST_CASE(NormalizationQueueDescriptor_Validate_WrongInputHeight)
+TEST_CASE("NormalizationQueueDescriptor_Validate_WrongInputHeight")
 {
     constexpr unsigned int inputNum = 5;
     constexpr unsigned int inputHeight   = 32;
@@ -197,10 +197,10 @@
     invalidData.m_Parameters.m_K               = kappa;
 
     //Invalid argument exception is expected, because input height != output height.
-    BOOST_CHECK_THROW(RefNormalizationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefNormalizationWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(SplitterQueueDescriptor_Validate_WrongWindow)
+TEST_CASE("SplitterQueueDescriptor_Validate_WrongWindow")
 {
     constexpr unsigned int inputNum = 1;
     constexpr unsigned int inputHeight   = 32;
@@ -233,16 +233,15 @@
     armnn::SplitterQueueDescriptor::ViewOrigin window(wOrigin);
     invalidData.m_ViewOrigins.push_back(window);
 
-    BOOST_TEST_INFO("Invalid argument exception is expected, because split window dimensionality does not "
-        "match input.");
-    BOOST_CHECK_THROW(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid argument exception is expected, because split window dimensionality does not match input.");
+    CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
     // Invalid, since window extends past the boundary of input tensor.
     std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
     armnn::SplitterQueueDescriptor::ViewOrigin window3(wOrigin3);
     invalidData.m_ViewOrigins[0] = window3;
-    BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight");
-    BOOST_CHECK_THROW(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid argument exception is expected (wOrigin3[2]+ outputHeight > inputHeight");
+    CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
 
     std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
@@ -253,12 +252,12 @@
     armnn::SplitterQueueDescriptor::ViewOrigin window5(wOrigin4);
     invalidData.m_ViewOrigins.push_back(window5);
 
-    BOOST_TEST_INFO("Invalid exception due to number of split windows not matching number of outputs.");
-    BOOST_CHECK_THROW(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid exception due to number of split windows not matching number of outputs.");
+    CHECK_THROWS_AS(RefSplitterWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
 
-BOOST_AUTO_TEST_CASE(ConcatQueueDescriptor_Validate_WrongWindow)
+TEST_CASE("ConcatQueueDescriptor_Validate_WrongWindow")
 {
     constexpr unsigned int inputNum = 1;
     constexpr unsigned int inputChannels = 3;
@@ -291,16 +290,15 @@
     armnn::ConcatQueueDescriptor::ViewOrigin window(wOrigin);
     invalidData.m_ViewOrigins.push_back(window);
 
-    BOOST_TEST_INFO("Invalid argument exception is expected, because merge window dimensionality does not "
-        "match input.");
-    BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid argument exception is expected, because merge window dimensionality does not match input.");
+    CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
     // Invalid, since window extends past the boundary of output tensor.
     std::vector<unsigned int> wOrigin3 = {0, 0, 15, 0};
     armnn::ConcatQueueDescriptor::ViewOrigin window3(wOrigin3);
     invalidData.m_ViewOrigins[0] = window3;
-    BOOST_TEST_INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
-    BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid argument exception is expected (wOrigin3[2]+ inputHeight > outputHeight");
+    CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
 
     std::vector<unsigned int> wOrigin4 = {0, 0, 0, 0};
@@ -311,11 +309,11 @@
     armnn::ConcatQueueDescriptor::ViewOrigin window5(wOrigin4);
     invalidData.m_ViewOrigins.push_back(window5);
 
-    BOOST_TEST_INFO("Invalid exception due to number of merge windows not matching number of inputs.");
-    BOOST_CHECK_THROW(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    INFO("Invalid exception due to number of merge windows not matching number of inputs.");
+    CHECK_THROWS_AS(RefConcatWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputNumbers)
+TEST_CASE("AdditionQueueDescriptor_Validate_InputNumbers")
 {
     armnn::TensorInfo input1TensorInfo;
     armnn::TensorInfo input2TensorInfo;
@@ -336,20 +334,20 @@
     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
     // Too few inputs.
-    BOOST_CHECK_THROW(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
 
     AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
 
     // Correct.
-    BOOST_CHECK_NO_THROW(RefAdditionWorkload<>(invalidData, invalidInfo));
+    CHECK_NOTHROW(RefAdditionWorkload<>(invalidData, invalidInfo));
 
     AddInputToWorkload(invalidData, invalidInfo, input3TensorInfo, nullptr);
 
     // Too many inputs.
-    BOOST_CHECK_THROW(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(AdditionQueueDescriptor_Validate_InputShapes)
+TEST_CASE("AdditionQueueDescriptor_Validate_InputShapes")
 {
     armnn::TensorInfo input1TensorInfo;
     armnn::TensorInfo input2TensorInfo;
@@ -371,7 +369,7 @@
         AddInputToWorkload(invalidData, invalidInfo, input2TensorInfo, nullptr);
         AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
-        BOOST_CHECK_THROW(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+        CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
     }
 
     // Output size not compatible with input sizes.
@@ -388,11 +386,11 @@
         AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
         // Output differs.
-        BOOST_CHECK_THROW(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+        CHECK_THROWS_AS(RefAdditionWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
     }
 }
 
-BOOST_AUTO_TEST_CASE(MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch)
+TEST_CASE("MultiplicationQueueDescriptor_Validate_InputTensorDimensionMismatch")
 {
     armnn::TensorInfo input0TensorInfo;
     armnn::TensorInfo input1TensorInfo;
@@ -423,7 +421,7 @@
         AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
         AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
 
-        BOOST_CHECK_THROW(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+        CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
     }
 
     // Checks dimension consistency for input and output tensors.
@@ -448,11 +446,11 @@
         AddInputToWorkload(invalidData, invalidInfo, input0TensorInfo, nullptr);
         AddInputToWorkload(invalidData, invalidInfo, input1TensorInfo, nullptr);
 
-        BOOST_CHECK_THROW(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
+        CHECK_THROWS_AS(RefMultiplicationWorkload<>(invalidData, invalidInfo), armnn::InvalidArgumentException);
     }
 }
 
-BOOST_AUTO_TEST_CASE(ReshapeQueueDescriptor_Validate_MismatchingNumElements)
+TEST_CASE("ReshapeQueueDescriptor_Validate_MismatchingNumElements")
 {
     armnn::TensorInfo inputTensorInfo;
     armnn::TensorInfo outputTensorInfo;
@@ -471,11 +469,11 @@
     AddOutputToWorkload(invalidData, invalidInfo, outputTensorInfo, nullptr);
 
     // InvalidArgumentException is expected, because the number of elements don't match.
-    BOOST_CHECK_THROW(RefReshapeWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(RefReshapeWorkload(invalidData, invalidInfo), armnn::InvalidArgumentException);
 }
 
 
-BOOST_AUTO_TEST_CASE(LstmQueueDescriptor_Validate)
+TEST_CASE("LstmQueueDescriptor_Validate")
 {
     armnn::DataType dataType = armnn::DataType::Float32;
 
@@ -568,61 +566,61 @@
     data.m_Parameters.m_LayerNormEnabled = true;
 
     // check wrong number of outputs
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     AddOutputToWorkload(data, info, outputTensorInfo, nullptr);
 
     // check wrong cifg parameter configuration
     data.m_Parameters.m_CifgEnabled = true;
     armnn::TensorInfo scratchBufferTensorInfo2({batchSize, numUnits * 3}, dataType, qScale, qOffset);
     SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo2, nullptr);
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_CifgEnabled = false;
     SetWorkloadOutput(data, info, 0, scratchBufferTensorInfo, nullptr);
 
     // check wrong inputGateBias configuration
     data.m_InputGateBias = nullptr;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_InputGateBias = &inputGateBiasTensor;
 
     // check inconsistant projection parameters
     data.m_Parameters.m_ProjectionEnabled = false;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_ProjectionEnabled = true;
     data.m_ProjectionWeights = nullptr;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_ProjectionWeights = &projectionWeightsTensor;
 
     // check missing input layer normalisation weights
     data.m_InputLayerNormWeights = nullptr;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_InputLayerNormWeights = &inputLayerNormWeightsTensor;
 
     // layer norm disabled but normalisation weights are present
     data.m_Parameters.m_LayerNormEnabled = false;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_LayerNormEnabled = true;
 
     // check invalid outputTensor shape
     armnn::TensorInfo incorrectOutputTensorInfo({batchSize, outputSize + 1}, dataType, qScale, qOffset);
     SetWorkloadOutput(data, info, 3, incorrectOutputTensorInfo, nullptr);
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     SetWorkloadOutput(data, info, 3, outputTensorInfo, nullptr);
 
     // check invalid cell clipping parameters
     data.m_Parameters.m_ClippingThresCell = -1.0f;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_ClippingThresCell = 0.0f;
 
     // check invalid projection clipping parameters
     data.m_Parameters.m_ClippingThresProj = -1.0f;
-    BOOST_CHECK_THROW(data.Validate(info), armnn::InvalidArgumentException);
+    CHECK_THROWS_AS(data.Validate(info), armnn::InvalidArgumentException);
     data.m_Parameters.m_ClippingThresProj = 0.0f;
 
     // check correct configuration
-    BOOST_CHECK_NO_THROW(data.Validate(info));
+    CHECK_NOTHROW(data.Validate(info));
 }
 
-BOOST_AUTO_TEST_CASE(BiasPerAxisQuantization_Validate)
+TEST_CASE("BiasPerAxisQuantization_Validate")
 {
     constexpr unsigned int nInput  = 1u;
     constexpr unsigned int cInput  = 3u;
@@ -667,7 +665,7 @@
     ScopedTensorHandle biasHandle1(biasInfo1);
     queueDescriptor.m_Bias = &biasHandle1;
 
-    BOOST_CHECK_NO_THROW(queueDescriptor.Validate(workloadInfo));
+    CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
 
     // Test 2: wrong per-axis quantization values
     const std::vector<float> biasPerAxisScales2 = { 4.00f, 5.00f };
@@ -676,7 +674,7 @@
     ScopedTensorHandle biasHandle2(biasInfo2);
     queueDescriptor.m_Bias = &biasHandle2;
 
-    BOOST_CHECK_NO_THROW(queueDescriptor.Validate(workloadInfo));
+    CHECK_NOTHROW(queueDescriptor.Validate(workloadInfo));
 
     // Test 3: mismatched number of quantization scales
     const std::vector<float> biasPerAxisScales3 = { 3.75f, 5.25f, 5.25f };
@@ -685,7 +683,7 @@
     ScopedTensorHandle biasHandle3(biasInfo3);
     queueDescriptor.m_Bias = &biasHandle3;
 
-    BOOST_CHECK_THROW(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
+    CHECK_THROWS_AS(queueDescriptor.Validate(workloadInfo), InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
index 9720475..0539cd1 100644
--- a/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/DebugTestImpl.cpp
@@ -14,6 +14,8 @@
 
 #include <test/TensorHelpers.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -69,7 +71,7 @@
 
     std::cout.rdbuf(coutStreambuf);
 
-    BOOST_TEST(oss.str() == expectedStringOutput);
+    CHECK(oss.str() == expectedStringOutput);
 
     CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
 
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index 143f9e0..2472c34 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -18,6 +18,8 @@
 
 #include <test/TensorHelpers.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -221,25 +223,25 @@
                                  expectedDetectionBoxes,
                                  outputBoxesHandle->GetShape(),
                                  detectionBoxesInfo.GetShape());
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     result = CompareTensors(actualDetectionClassesOutput,
                             expectedDetectionClasses,
                             classesHandle->GetShape(),
                             detectionClassesInfo.GetShape());
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     result = CompareTensors(actualDetectionScoresOutput,
                             expectedDetectionScores,
                             outputScoresHandle->GetShape(),
                             detectionScoresInfo.GetShape());
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     result = CompareTensors(actualNumDetectionOutput,
                             expectedNumDetections,
                             numDetectionHandle->GetShape(),
                             numDetectionInfo.GetShape());
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 }
 
 template<armnn::DataType QuantizedType, typename RawType = armnn::ResolveType<QuantizedType>>
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 11003a2..035c592 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -20,6 +20,7 @@
 
 #include <test/TensorHelpers.hpp>
 
+#include <doctest/doctest.h>
 namespace
 {
 
@@ -45,11 +46,11 @@
 
     // check shape and compare values
     auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape);
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     batchVecEncoder->Set(1.0f);
-    BOOST_TEST(batchVec[0] == 1.0f);
+    CHECK(batchVec[0] == 1.0f);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -72,11 +73,11 @@
 
     // check shape and compare values
     auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape);
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     outputEncoder->Set(1.0f);
-    BOOST_TEST(input[0] == 1.0f);
+    CHECK(input[0] == 1.0f);
 
 }
 
@@ -100,11 +101,11 @@
 
     // check shape and compare values
     auto result = CompareTensors(input, expectedOutput, expectedShape, expectedShape);
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     outputEncoder->Set(1.0f);
-    BOOST_TEST(input[0] == 1.0f);
+    CHECK(input[0] == 1.0f);
 }
 
 template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
@@ -129,11 +130,11 @@
 
     // check shape and compare values
     auto result = CompareTensors(batchVec, expectedOutput, expectedShape, expectedShape);
-    BOOST_TEST(result.m_Result, result.m_Message.str());
+    CHECK_MESSAGE(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     batchVecEncoder->Set(1.0f);
-    BOOST_TEST(batchVec[0] == 1.0f);
+    CHECK(batchVec[0] == 1.0f);
 }
 
 // Lstm Layer tests:
diff --git a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
index cd77572..dae7483 100644
--- a/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/TransposeConvolution2dTestImpl.cpp
@@ -20,7 +20,7 @@
 
 #include <test/TensorHelpers.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <string>
 #include <utility>
diff --git a/src/backends/cl/test/ClContextSerializerTests.cpp b/src/backends/cl/test/ClContextSerializerTests.cpp
index 1fc0fb9..1135e11 100644
--- a/src/backends/cl/test/ClContextSerializerTests.cpp
+++ b/src/backends/cl/test/ClContextSerializerTests.cpp
@@ -7,7 +7,7 @@
 
 #include <cl/test/ClContextControlFixture.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <fstream>
 
@@ -66,9 +66,7 @@
 
 } // anonymous namespace
 
-BOOST_FIXTURE_TEST_SUITE(ClContextSerializer, ClContextControlFixture)
-
-BOOST_AUTO_TEST_CASE(ClContextSerializerTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextSerializerTest")
 {
     // Get tmp directory and create blank file.
     fs::path filePath = armnnUtils::Filesystem::NamedTempFile("Armnn-CachedNetworkFileTest-TempFile.bin");
@@ -101,24 +99,24 @@
             *net1, backends, runtime->GetDeviceSpec(), optimizerOptions1);
     armnn::IOptimizedNetworkPtr optNet2 = armnn::Optimize(
             *net2, backends, runtime->GetDeviceSpec(), optimizerOptions2);
-    BOOST_CHECK(optNet1);
-    BOOST_CHECK(optNet2);
+    CHECK(optNet1);
+    CHECK(optNet2);
 
     // Cached file should be empty until net1 is loaded into runtime.
-    BOOST_TEST(fs::is_empty(filePathString));
+    CHECK(fs::is_empty(filePathString));
 
     // Load net1 into the runtime.
     armnn::NetworkId netId1;
-    BOOST_TEST(runtime->LoadNetwork(netId1, std::move(optNet1)) == armnn::Status::Success);
+    CHECK(runtime->LoadNetwork(netId1, std::move(optNet1)) == armnn::Status::Success);
 
     // File should now exist and not be empty. It has been serialized.
-    BOOST_TEST(fs::exists(filePathString));
+    CHECK(fs::exists(filePathString));
     std::vector<char> dataSerialized = ReadBinaryFile(filePathString);
-    BOOST_TEST(dataSerialized.size() != 0);
+    CHECK(dataSerialized.size() != 0);
 
     // Load net2 into the runtime using file and deserialize.
     armnn::NetworkId netId2;
-    BOOST_TEST(runtime->LoadNetwork(netId2, std::move(optNet2)) == armnn::Status::Success);
+    CHECK(runtime->LoadNetwork(netId2, std::move(optNet2)) == armnn::Status::Success);
 
     // Run inference and get output data.
     std::vector<uint8_t> outputData1(5);
@@ -128,11 +126,8 @@
     RunInference(netId2, runtime, outputData2);
 
     // Compare outputs from both networks.
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputData1.begin(), outputData1.end(),
-                                  outputData2.begin(), outputData2.end());
+    CHECK(std::equal(outputData1.begin(), outputData1.end(), outputData2.begin(), outputData2.end()));
 
     // Remove temp file created.
     fs::remove(filePath);
 }
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 7602cbb..4e40328 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -21,14 +21,16 @@
 #include <cl/workloads/ClWorkloads.hpp>
 #include <cl/workloads/ClWorkloadUtils.hpp>
 
+#include <doctest/doctest.h>
+
 armnn::PredicateResult CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
                                                    std::initializer_list<unsigned int> expectedDimensions)
 {
     return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
 }
 
-BOOST_FIXTURE_TEST_SUITE(CreateWorkloadCl, ClContextControlFixture)
-
+TEST_SUITE("CreateWorkloadCl")
+{
 template <armnn::DataType DataType>
 static void ClCreateActivationWorkloadTest()
 {
@@ -44,18 +46,18 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     predResult = CompareIClTensorHandleShape(outputHandle, {1, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateActivationFloatWorkload")
 {
     ClCreateActivationWorkloadTest<armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateActivationFloat16Workload")
 {
     ClCreateActivationWorkloadTest<armnn::DataType::Float16>();
 }
@@ -78,14 +80,14 @@
     auto inputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle1, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(inputHandle2, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloatWorkload")
 {
     ClCreateElementwiseWorkloadTest<ClAdditionWorkload,
                                     AdditionQueueDescriptor,
@@ -93,7 +95,7 @@
                                     armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateAdditionFloat16Workload")
 {
     ClCreateElementwiseWorkloadTest<ClAdditionWorkload,
                                     AdditionQueueDescriptor,
@@ -101,7 +103,7 @@
                                     armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloatWorkload")
 {
     ClCreateElementwiseWorkloadTest<ClSubtractionWorkload,
                                     SubtractionQueueDescriptor,
@@ -109,7 +111,7 @@
                                     armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSubtractionFloat16Workload")
 {
     ClCreateElementwiseWorkloadTest<ClSubtractionWorkload,
                                     SubtractionQueueDescriptor,
@@ -117,7 +119,7 @@
                                     armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloatWorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
                                     MultiplicationQueueDescriptor,
@@ -125,7 +127,7 @@
                                     armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationFloat16WorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
                                     MultiplicationQueueDescriptor,
@@ -133,7 +135,7 @@
                                     armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMultiplicationUint8WorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClMultiplicationWorkload,
                                     MultiplicationQueueDescriptor,
@@ -141,7 +143,7 @@
                                     armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloatWorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClDivisionWorkload,
                                     DivisionQueueDescriptor,
@@ -149,7 +151,7 @@
                                     armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDivisionFloat16WorkloadTest")
 {
     ClCreateElementwiseWorkloadTest<ClDivisionWorkload,
                                     DivisionQueueDescriptor,
@@ -174,13 +176,13 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateRsqrtFloat32WorkloadTest")
 {
     ClCreateElementwiseUnaryWorkloadTest<ClRsqrtWorkload, RsqrtQueueDescriptor, armnn::DataType::Float32>(
         UnaryOperation::Rsqrt);
@@ -206,43 +208,43 @@
     {
         case DataLayout::NHWC:
             predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             break;
         default: // NCHW
             predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     }
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationFloatNchwWorkload")
 {
     ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
                                            armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationFloat16NchwWorkload")
 {
     ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
                                            armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationFloatNhwcWorkload")
 {
     ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
                                            armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationNhwcFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateBatchNormalizationNhwcFloat16NhwcWorkload")
 {
     ClCreateBatchNormalizationWorkloadTest<ClBatchNormalizationFloatWorkload,
                                            armnn::DataType::Float16>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvertFp16ToFp32Workload")
 {
     Graph graph;
     ClWorkloadFactory factory =
@@ -254,14 +256,14 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
-    BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
-    BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
+    CHECK((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
+    CHECK((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvertFp32ToFp16Workload")
 {
     Graph graph;
     ClWorkloadFactory factory =
@@ -274,11 +276,11 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
-    BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
-    BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
+    CHECK((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
+    CHECK((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
 }
 
 template <typename Convolution2dWorkloadType, typename armnn::DataType DataType>
@@ -301,31 +303,31 @@
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloatNchwWorkload")
 {
     ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloatNhwcWorkload")
 {
     ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloat16NchwWorkload")
 {
     ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFloat16NhwcWorkload")
 {
     ClConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dFastMathEnabledWorkload")
 {
     Graph graph;
 
@@ -353,7 +355,7 @@
     ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dClCompiledContextWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConvolution2dClCompiledContextWorkload")
 {
     using namespace armnn;
 
@@ -434,7 +436,7 @@
 
 
     // Check built programs are empty in context
-    BOOST_TEST(clCompileContext.get_built_programs().empty());
+    CHECK(clCompileContext.get_built_programs().empty());
 
     auto workload = std::make_unique<ClConvolution2dWorkload>(queueDescriptor,
                                                               workloadInfo,
@@ -442,7 +444,7 @@
                                                               clCompileContext);
     ARMNN_ASSERT(workload != nullptr);
     // Check built programs are not empty in context
-    BOOST_TEST(!clCompileContext.get_built_programs().empty());
+    CHECK(!clCompileContext.get_built_programs().empty());
 }
 
 template <typename DepthwiseConvolutionWorkloadType, typename armnn::DataType DataType>
@@ -465,11 +467,11 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
                                                                : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDepthwiseConvolutionFloat32NhwcWorkload")
 {
     ClDepthwiseConvolutionWorkloadTest<ClDepthwiseConvolutionWorkload, DataType::Float32>(DataLayout::NHWC);
 }
@@ -488,22 +490,22 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDirectConvolution2dFloatWorkload")
 {
     ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDirectConvolution2dFloat16Workload")
 {
     ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateDirectConvolution2dUint8Workload")
 {
     ClDirectConvolution2dWorkloadTest<ClConvolution2dWorkload, armnn::DataType::QAsymmU8>();
 }
@@ -523,18 +525,18 @@
     auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {3, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateFullyConnectedFloatWorkloadTest")
 {
     ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateFullyConnectedFloat16WorkloadTest")
 {
     ClCreateFullyConnectedWorkloadTest<ClFullyConnectedWorkload, armnn::DataType::Float16>();
 }
@@ -558,26 +560,26 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({3, 5, 5, 1})
                                                                : std::initializer_list<unsigned int>({3, 1, 5, 5});
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat32NchwWorkload")
 {
     ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat16NchwWorkload")
 {
     ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat32NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat32NhwcWorkload")
 {
     ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateNormalizationFloat16NhwcWorkload")
 {
     ClNormalizationWorkloadTest<ClNormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
 }
@@ -601,26 +603,26 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloatNchwWorkload")
 {
     ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloatNhwcWorkload")
 {
     ClPooling2dWorkloadTest<armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloat16NchwWorkload")
 {
     ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePooling2dFloat16NhwcWorkload")
 {
     ClPooling2dWorkloadTest<armnn::DataType::Float16>(DataLayout::NHWC);
 }
@@ -647,22 +649,22 @@
     auto alphaHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((alphaHandle->GetShape() == alphaShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((alphaHandle->GetShape() == alphaShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePreluFloat16Workload")
 {
     ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float16);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePreluFloatWorkload")
 {
     ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreatePreluUint8Workload")
 {
     ClCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
 }
@@ -682,22 +684,22 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {1, 4});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateReshapeFloatWorkload")
 {
     ClCreateReshapeWorkloadTest<armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateReshapeFloat16Workload")
 {
     ClCreateReshapeWorkloadTest<armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateReshapeUint8Workload")
 {
     ClCreateReshapeWorkloadTest<armnn::DataType::QAsymmU8>();
 }
@@ -729,28 +731,28 @@
     }
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxFloat32WorkloadTest")
 {
     ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxFloat16WorkloadTest")
 {
     ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmU8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxQAsymmU8Workload")
 {
     ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmS8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSoftmaxQAsymmS8Workload")
 {
     ClSoftmaxWorkloadTest<ClSoftmaxWorkload, armnn::DataType::QAsymmS8>();
 }
@@ -768,27 +770,27 @@
     SplitterQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {5, 7, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     auto outputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
     predResult = CompareIClTensorHandleShape(outputHandle1, {2, 7, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     auto outputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
     predResult = CompareIClTensorHandleShape(outputHandle2, {2, 7, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 
     auto outputHandle0 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     predResult = CompareIClTensorHandleShape(outputHandle0, {1, 7, 7});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterFloatWorkload")
 {
     ClSplitterWorkloadTest<armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterFloat16Workload")
 {
     ClSplitterWorkloadTest<armnn::DataType::Float16>();
 }
@@ -819,35 +821,35 @@
     armnn::ClSubTensorHandle* mIn0 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
     armnn::ClSubTensorHandle* mIn1 = dynamic_cast<armnn::ClSubTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(mIn0);
-    BOOST_TEST(mIn1);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(mIn0);
+    CHECK(mIn1);
 
     //Fliped order of inputs/outputs.
     bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 
 
     //Also make sure that the inputs are subtensors of one tensor and outputs are sub tensors of another tensor.
     bool validSubTensorParents = (mIn0->GetTensor().parent() == mIn1->GetTensor().parent())
                                     && (sOut0->GetTensor().parent() == sOut1->GetTensor().parent());
 
-    BOOST_TEST(validSubTensorParents);
+    CHECK(validSubTensorParents);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterConcatFloatWorkload")
 {
     ClSplitterConcatTest<armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSplitterConcatFloat16Workload")
 {
     ClSplitterConcatTest<armnn::DataType::Float16>();
 }
 
 
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSingleOutputMultipleInputs")
 {
     // Test that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
     // We create a splitter with two outputs. That each of those outputs is used by two different activation layers.
@@ -875,24 +877,24 @@
     armnn::ClSubTensorHandle* activ1_1Im = dynamic_cast<armnn::ClSubTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
 
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(activ0_0Im);
-    BOOST_TEST(activ0_1Im);
-    BOOST_TEST(activ1_0Im);
-    BOOST_TEST(activ1_1Im);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(activ0_0Im);
+    CHECK(activ0_1Im);
+    CHECK(activ1_0Im);
+    CHECK(activ1_1Im);
 
     bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
                              (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
 #if defined(ARMNNREF_ENABLED)
 
 // This test unit needs the reference backend, it's not available if the reference backend is not built
 
-BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsCl)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMemCopyWorkloadsCl")
 {
     ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
@@ -922,26 +924,26 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 5, 20, 50, 67 })
                                                                : std::initializer_list<unsigned int>({ 5, 50, 67, 20 });
 
-    BOOST_TEST((inputHandle->GetShape() == inputShape));
-    BOOST_TEST((outputHandle->GetShape() == outputShape));
+    CHECK((inputHandle->GetShape() == inputShape));
+    CHECK((outputHandle->GetShape() == outputShape));
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloatNchwWorkload")
 {
     ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloatNhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloatNhwcWorkload")
 {
     ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloat16NchwWorkload")
 {
     ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateL2NormalizationFloat16NhwcWorkload")
 {
     ClL2NormalizationWorkloadTest<ClL2NormalizationFloatWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
 }
@@ -961,12 +963,12 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat32WorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateLogSoftmaxFloat32WorkloadTest")
 {
     ClCreateLogSoftmaxWorkloadTest<ClLogSoftmaxWorkload, armnn::DataType::Float32>();
 }
@@ -984,12 +986,12 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
     auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 2});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, {2, 4});
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateLSTMWorkloadFloatWorkload")
 {
     ClCreateLstmWorkloadTest<ClLstmFloatWorkload>();
 }
@@ -1013,44 +1015,44 @@
     {
         case DataLayout::NHWC:
             predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             break;
         default: // DataLayout::NCHW
             predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     }
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat32NchwWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat16NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat16NchwWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeUint8NchwWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat32NhwcWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat16NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeFloat16NhwcWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::Float16>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateResizeUint8NhwcWorkload")
 {
     ClResizeWorkloadTest<ClResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
@@ -1071,22 +1073,22 @@
 
     // The first dimension (batch size) in both input and output is singular thus it has been reduced by ACL.
     auto predResult = CompareIClTensorHandleShape(inputHandle, {  1, 3, 7, 4 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, { 1, 4 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateMeanFloat32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMeanFloat32Workload")
 {
     ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMeanFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMeanFloat16Workload")
 {
     ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMeanUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateMeanUint8Workload")
 {
     ClMeanWorkloadTest<ClMeanWorkload, armnn::DataType::QAsymmU8>();
 }
@@ -1107,39 +1109,39 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, outputShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim0Float32Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim1Float32Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim3Float32Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim0Uint8Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim1Uint8Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateConcatDim3Uint8Workload")
 {
     ClCreateConcatWorkloadTest<ClConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
 }
@@ -1158,27 +1160,27 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     predResult = CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 });
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthFloat32Workload")
 {
     ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthFloat16Workload")
 {
     ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthQAsymm8Workload")
 {
     ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateSpaceToDepthQSymm16Workload")
 {
     ClSpaceToDepthWorkloadTest<ClSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
 }
@@ -1206,24 +1208,24 @@
     {
         auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
         auto predResult1 = CompareIClTensorHandleShape(inputHandle, inputShape);
-        BOOST_TEST(predResult1.m_Result, predResult1.m_Message.str());
+        CHECK_MESSAGE(predResult1.m_Result, predResult1.m_Message.str());
     }
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
     auto predResult2 = CompareIClTensorHandleShape(outputHandle, outputShape);
-    BOOST_TEST(predResult2.m_Result, predResult2.m_Message.str());
+    CHECK_MESSAGE(predResult2.m_Result, predResult2.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateStackFloat32Workload")
 {
     ClCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateStackFloat16Workload")
 {
     ClCreateStackWorkloadTest<armnn::DataType::Float16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateStackUint8Workload")
 {
     ClCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
@@ -1239,19 +1241,19 @@
     QLstmQueueDescriptor queueDescriptor = workload->GetData();
 
     IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+    CHECK((inputHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
 
     IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
-    BOOST_TEST((outputHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+    CHECK((outputHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQLstmWorkloadTest)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateQLstmWorkloadTest")
 {
     ClCreateQLstmWorkloadTest<ClQLstmWorkload>();
 }
@@ -1270,29 +1272,29 @@
     QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
 
     IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2})));
-    BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((inputHandle->GetShape() == TensorShape({2, 2})));
+    CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 
     IAclTensorHandle* cellStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
-    BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateInHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[2]);
-    BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((outputStateInHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 
     IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQuantizedLstmWorkload)
+TEST_CASE_FIXTURE(ClContextControlFixture, "CreateQuantizedLstmWorkload")
 {
     ClCreateQuantizedLstmWorkloadTest<ClQuantizedLstmWorkload>();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClEndToEndTests.cpp b/src/backends/cl/test/ClEndToEndTests.cpp
index edee368..9e0137e 100644
--- a/src/backends/cl/test/ClEndToEndTests.cpp
+++ b/src/backends/cl/test/ClEndToEndTests.cpp
@@ -21,14 +21,14 @@
 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
 #include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(ClEndToEnd)
-
-std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::GpuAcc};
+TEST_SUITE("ClEndToEnd")
+{
+std::vector<armnn::BackendId> clDefaultBackends = {armnn::Compute::GpuAcc};
 
 // Abs
-BOOST_AUTO_TEST_CASE(ClAbsEndToEndTestFloat32)
+TEST_CASE("ClAbsEndToEndTestFloat32")
 {
     std::vector<float> expectedOutput =
     {
@@ -36,482 +36,482 @@
         3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
     };
 
-    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(clDefaultBackends,
                                                              UnaryOperation::Abs,
                                                              expectedOutput);
 }
 
 // Constant
-BOOST_AUTO_TEST_CASE(ConstantUsage_Cl_Float32)
+TEST_CASE("ConstantUsage_Cl_Float32")
 {
-    ConstantUsageFloat32Test(defaultBackends);
+    ConstantUsageFloat32Test(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Test)
+TEST_CASE("ClConcatEndToEndDim0Test")
 {
-    ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim0Uint8Test)
+TEST_CASE("ClConcatEndToEndDim0Uint8Test")
 {
-    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Test)
+TEST_CASE("ClConcatEndToEndDim1Test")
 {
-    ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim1Uint8Test)
+TEST_CASE("ClConcatEndToEndDim1Uint8Test")
 {
-    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Test)
+TEST_CASE("ClConcatEndToEndDim3Test")
 {
-    ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClConcatEndToEndDim3Uint8Test)
+TEST_CASE("ClConcatEndToEndDim3Uint8Test")
 {
-    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
 // DepthToSpace
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat32)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat32")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::Float32>(clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::Float16>(clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
+TEST_CASE("DephtToSpaceEndToEndNchwUint8")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
+TEST_CASE("DephtToSpaceEndToEndNchwInt16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat32")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::Float32>(clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::Float16>(clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
+TEST_CASE("DephtToSpaceEndToEndNhwcUint8")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
+TEST_CASE("DephtToSpaceEndToEndNhwcInt16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Dequantize
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+TEST_CASE("DequantizeEndToEndSimpleTest")
 {
-    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+TEST_CASE("DequantizeEndToEndOffsetTest")
 {
-    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
+    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClStridedSliceInvalidSliceEndToEndTest)
+TEST_CASE("ClStridedSliceInvalidSliceEndToEndTest")
 {
-    StridedSliceInvalidSliceEndToEndTest(defaultBackends);
+    StridedSliceInvalidSliceEndToEndTest(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClEluEndToEndTestFloat32)
+TEST_CASE("ClEluEndToEndTestFloat32")
 {
-    EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+    EluEndToEndTest<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClEluEndToEndTestFloat16)
+TEST_CASE("ClEluEndToEndTestFloat16")
 {
-    EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+    EluEndToEndTest<armnn::DataType::Float16>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndTest)
+TEST_CASE("ClGreaterSimpleEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::Float32>(clDefaultBackends,
                                                        ComparisonOperation::Greater,
                                                        expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(ClGreaterSimpleEndToEndUint8Test)
+TEST_CASE("ClGreaterSimpleEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends,
                                                                ComparisonOperation::Greater,
                                                                expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(ClGreaterBroadcastEndToEndTest)
+TEST_CASE("ClGreaterBroadcastEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(clDefaultBackends,
                                                           ComparisonOperation::Greater,
                                                           expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(ClGreaterBroadcastEndToEndUint8Test)
+TEST_CASE("ClGreaterBroadcastEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends,
                                                                   ComparisonOperation::Greater,
                                                                   expectedOutput);
 }
 
 // HardSwish
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestFloat32)
+TEST_CASE("ClHardSwishEndToEndTestFloat32")
 {
-    HardSwishEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestFloat16)
+TEST_CASE("ClHardSwishEndToEndTestFloat16")
 {
-    HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::Float16>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestQAsymmS8)
+TEST_CASE("ClHardSwishEndToEndTestQAsymmS8")
 {
-    HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestQAsymmU8)
+TEST_CASE("ClHardSwishEndToEndTestQAsymmU8")
 {
-    HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClHardSwishEndToEndTestQSymmS16)
+TEST_CASE("ClHardSwishEndToEndTestQSymmS16")
 {
-    HardSwishEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QSymmS16>(clDefaultBackends);
 }
 
 // InstanceNormalization
-BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNhwcEndToEndTest1)
+TEST_CASE("ClInstanceNormalizationNhwcEndToEndTest1")
 {
-    InstanceNormalizationNhwcEndToEndTest1(defaultBackends);
+    InstanceNormalizationNhwcEndToEndTest1(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNchwEndToEndTest1)
+TEST_CASE("ClInstanceNormalizationNchwEndToEndTest1")
 {
-    InstanceNormalizationNchwEndToEndTest1(defaultBackends);
+    InstanceNormalizationNchwEndToEndTest1(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNhwcEndToEndTest2)
+TEST_CASE("ClInstanceNormalizationNhwcEndToEndTest2")
 {
-    InstanceNormalizationNhwcEndToEndTest2(defaultBackends);
+    InstanceNormalizationNhwcEndToEndTest2(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClInstanceNormalizationNchwEndToEndTest2)
+TEST_CASE("ClInstanceNormalizationNchwEndToEndTest2")
 {
-    InstanceNormalizationNchwEndToEndTest2(defaultBackends);
+    InstanceNormalizationNchwEndToEndTest2(clDefaultBackends);
 }
 
 // Fill
-BOOST_AUTO_TEST_CASE(ClFillEndToEndTest)
+TEST_CASE("ClFillEndToEndTest")
 {
-    FillEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTestFloat16)
+TEST_CASE("RefFillEndToEndTestFloat16")
 {
-    FillEndToEnd<armnn::DataType::Float16>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Float16>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClFillEndToEndTestInt32)
+TEST_CASE("ClFillEndToEndTestInt32")
 {
-    FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Signed32>(clDefaultBackends);
 }
 
 // Prelu
-BOOST_AUTO_TEST_CASE(ClPreluEndToEndFloat32Test)
+TEST_CASE("ClPreluEndToEndFloat32Test")
 {
-    PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
+    PreluEndToEndNegativeTest<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClPreluEndToEndTestUint8)
+TEST_CASE("ClPreluEndToEndTestUint8")
 {
-    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
+    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSpaceToDepthNhwcEndToEndTest1)
+TEST_CASE("ClSpaceToDepthNhwcEndToEndTest1")
 {
-    SpaceToDepthNhwcEndToEndTest1(defaultBackends);
+    SpaceToDepthNhwcEndToEndTest1(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSpaceToDepthNchwEndToEndTest1)
+TEST_CASE("ClSpaceToDepthNchwEndToEndTest1")
 {
-    SpaceToDepthNchwEndToEndTest1(defaultBackends);
+    SpaceToDepthNchwEndToEndTest1(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSpaceToDepthNhwcEndToEndTest2)
+TEST_CASE("ClSpaceToDepthNhwcEndToEndTest2")
 {
-    SpaceToDepthNhwcEndToEndTest2(defaultBackends);
+    SpaceToDepthNhwcEndToEndTest2(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSpaceToDepthNchwEndToEndTest2)
+TEST_CASE("ClSpaceToDepthNchwEndToEndTest2")
 {
-    SpaceToDepthNchwEndToEndTest2(defaultBackends);
+    SpaceToDepthNchwEndToEndTest2(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndTest)
+TEST_CASE("ClSplitter1dEndToEndTest")
 {
-    Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter1dEndToEndUint8Test)
+TEST_CASE("ClSplitter1dEndToEndUint8Test")
 {
-    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndTest)
+TEST_CASE("ClSplitter2dDim0EndToEndTest")
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter2dDim1EndToEndTest)
+TEST_CASE("ClSplitter2dDim1EndToEndTest")
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter2dDim0EndToEndUint8Test)
+TEST_CASE("ClSplitter2dDim0EndToEndUint8Test")
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter2dDim1EndToEndUint8Test)
+TEST_CASE("ClSplitter2dDim1EndToEndUint8Test")
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndTest)
+TEST_CASE("ClSplitter3dDim0EndToEndTest")
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim1EndToEndTest)
+TEST_CASE("ClSplitter3dDim1EndToEndTest")
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim2EndToEndTest)
+TEST_CASE("ClSplitter3dDim2EndToEndTest")
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim0EndToEndUint8Test)
+TEST_CASE("ClSplitter3dDim0EndToEndUint8Test")
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim1EndToEndUint8Test)
+TEST_CASE("ClSplitter3dDim1EndToEndUint8Test")
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter3dDim2EndToEndUint8Test)
+TEST_CASE("ClSplitter3dDim2EndToEndUint8Test")
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndTest)
+TEST_CASE("ClSplitter4dDim0EndToEndTest")
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim1EndToEndTest)
+TEST_CASE("ClSplitter4dDim1EndToEndTest")
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim2EndToEndTest)
+TEST_CASE("ClSplitter4dDim2EndToEndTest")
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim3EndToEndTest)
+TEST_CASE("ClSplitter4dDim3EndToEndTest")
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim0EndToEndUint8Test)
+TEST_CASE("ClSplitter4dDim0EndToEndUint8Test")
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim1EndToEndUint8Test)
+TEST_CASE("ClSplitter4dDim1EndToEndUint8Test")
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim2EndToEndUint8Test)
+TEST_CASE("ClSplitter4dDim2EndToEndUint8Test")
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClSplitter4dDim3EndToEndUint8Test)
+TEST_CASE("ClSplitter4dDim3EndToEndUint8Test")
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
 // TransposeConvolution2d
-BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndFloatNchwTest)
+TEST_CASE("ClTransposeConvolution2dEndToEndFloatNchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
-        defaultBackends, armnn::DataLayout::NCHW);
+        clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NchwTest)
+TEST_CASE("ClTransposeConvolution2dEndToEndUint8NchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-        defaultBackends, armnn::DataLayout::NCHW);
+        clDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndFloatNhwcTest)
+TEST_CASE("ClTransposeConvolution2dEndToEndFloatNhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
-        defaultBackends, armnn::DataLayout::NHWC);
+        clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(ClTransposeConvolution2dEndToEndUint8NhwcTest)
+TEST_CASE("ClTransposeConvolution2dEndToEndUint8NhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-        defaultBackends, armnn::DataLayout::NHWC);
+        clDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(ClQuantizedLstmEndToEndTest)
+TEST_CASE("ClQuantizedLstmEndToEndTest")
 {
-    QuantizedLstmEndToEnd(defaultBackends);
+    QuantizedLstmEndToEnd(clDefaultBackends);
 }
 
 // ArgMinMax
-BOOST_AUTO_TEST_CASE(ClArgMaxSimpleTest)
+TEST_CASE("ClArgMaxSimpleTest")
 {
-    ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinSimpleTest)
+TEST_CASE("ClArgMinSimpleTest")
 {
-    ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis0Test)
+TEST_CASE("ClArgMaxAxis0Test")
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis0Test)
+TEST_CASE("ClArgMinAxis0Test")
 {
-    ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis1Test)
+TEST_CASE("ClArgMaxAxis1Test")
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis1Test)
+TEST_CASE("ClArgMinAxis1Test")
 {
-    ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis2Test)
+TEST_CASE("ClArgMaxAxis2Test")
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis2Test)
+TEST_CASE("ClArgMinAxis2Test")
 {
-    ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis3Test)
+TEST_CASE("ClArgMaxAxis3Test")
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis3Test)
+TEST_CASE("ClArgMinAxis3Test")
 {
-    ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::Float32>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxSimpleTestQAsymmU8)
+TEST_CASE("ClArgMaxSimpleTestQAsymmU8")
 {
-    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinSimpleTestQAsymmU8)
+TEST_CASE("ClArgMinSimpleTestQAsymmU8")
 {
-    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis0TestQAsymmU8)
+TEST_CASE("ClArgMaxAxis0TestQAsymmU8")
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis0TestQAsymmU8)
+TEST_CASE("ClArgMinAxis0TestQAsymmU8")
 {
-    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis1TestQAsymmU8)
+TEST_CASE("ClArgMaxAxis1TestQAsymmU8")
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis1TestQAsymmU8)
+TEST_CASE("ClArgMinAxis1TestQAsymmU8")
 {
-    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis2TestQAsymmU8)
+TEST_CASE("ClArgMaxAxis2TestQAsymmU8")
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis2TestQAsymmU8)
+TEST_CASE("ClArgMinAxis2TestQAsymmU8")
 {
-    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMaxAxis3TestQAsymmU8)
+TEST_CASE("ClArgMaxAxis3TestQAsymmU8")
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClArgMinAxis3TestQAsymmU8)
+TEST_CASE("ClArgMinAxis3TestQAsymmU8")
 {
-    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(ClQLstmEndToEndTest)
+TEST_CASE("ClQLstmEndToEndTest")
 {
-    QLstmEndToEnd(defaultBackends);
+    QLstmEndToEnd(clDefaultBackends);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 183b8ca..7721206 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -7,11 +7,11 @@
 
 #include <test/GraphUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(ClFallback)
-
-BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackToNeon)
+TEST_SUITE("ClFallback")
+{
+TEST_CASE("ClImportEnabledFallbackToNeon")
 {
     using namespace armnn;
 
@@ -62,18 +62,18 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::CpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::CpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -109,14 +109,14 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData0 = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr0 = inputData0.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr0, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr0, space));
 
     auto* intputPtr0 = reinterpret_cast<float*>(alignedInputPtr0);
     std::copy(inputValue0.begin(), inputValue0.end(), intputPtr0);
 
     auto inputData1 = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr1 = inputData1.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr1, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr1, space));
 
     auto* intputPtr1 = reinterpret_cast<float*>(alignedInputPtr1);
     std::copy(inputValue1.begin(), inputValue1.end(), intputPtr1);
@@ -145,19 +145,19 @@
 
     // Executed Subtraction using CpuAcc
     std::size_t found = dump.find("NeonSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 
     runtime->UnloadNetwork(netId);
 }
 
-BOOST_AUTO_TEST_CASE(ClImportDisabledFallbackToNeon)
+TEST_CASE("ClImportDisabledFallbackToNeon")
 {
     using namespace armnn;
 
@@ -207,18 +207,18 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::CpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::CpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -269,17 +269,17 @@
 
     // Executed Subtraction using CpuAcc
     std::size_t found = dump.find("NeonSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(ClImportEnabledFallbackSubgraphToNeon)
+TEST_CASE("ClImportEnabledFallbackSubgraphToNeon")
 {
     using namespace armnn;
 
@@ -342,21 +342,21 @@
     armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
-    BOOST_TEST(CheckOrder(graph, layer6, layer7));
-    BOOST_TEST(CheckOrder(graph, layer7, layer8));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer6, layer7));
+    CHECK(CheckOrder(graph, layer7, layer8));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
-    BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer6->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::CpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::CpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -388,14 +388,14 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData0 = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr0 = inputData0.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr0, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr0, space));
 
     auto* intputPtr0 = reinterpret_cast<float*>(alignedInputPtr0);
     std::copy(inputValue0.begin(), inputValue0.end(), intputPtr0);
 
     auto inputData1 = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr1 = inputData1.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr1, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr1, space));
 
     auto* intputPtr1 = reinterpret_cast<float*>(alignedInputPtr1);
     std::copy(inputValue1.begin(), inputValue1.end(), intputPtr1);
@@ -424,23 +424,23 @@
 
     // Executed Subtraction using CpuAcc
     std::size_t found = dump.find("NeonSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Correctly switch back to GpuAcc
     found = dump.find("ClPooling2dWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 
     runtime->UnloadNetwork(netId);
 }
 
-BOOST_AUTO_TEST_CASE(ClImportDisableFallbackSubgraphToNeon)
+TEST_CASE("ClImportDisableFallbackSubgraphToNeon")
 {
     using namespace armnn;
 
@@ -498,21 +498,21 @@
     armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
-    BOOST_TEST(CheckOrder(graph, layer6, layer7));
-    BOOST_TEST(CheckOrder(graph, layer7, layer8));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer6, layer7));
+    CHECK(CheckOrder(graph, layer7, layer8));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
-    BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer6->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::CpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::CpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -560,18 +560,18 @@
 
     // Executed Subtraction using CpuAcc
     std::size_t found = dump.find("NeonSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Correctly switch back to GpuAcc
     found = dump.find("ClPooling2dWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
index 0c6a9c6..fee40fd 100644
--- a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
@@ -3,14 +3,17 @@
 // SPDX-License-Identifier: MIT
 //
 
+#include <armnn/utility/Assert.hpp>
+
 #include <cl/ClImportTensorHandleFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(ClImportTensorHandleFactoryTests)
+TEST_SUITE("ClImportTensorHandleFactoryTests")
+{
 using namespace armnn;
 
-BOOST_AUTO_TEST_CASE(ImportTensorFactoryAskedToCreateManagedTensorThrowsException)
+TEST_CASE("ImportTensorFactoryAskedToCreateManagedTensorThrowsException")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -18,11 +21,11 @@
     TensorInfo tensorInfo;
     // This factory is designed to import the memory of tensors. Asking for a handle that requires
     // a memory manager should result in an exception.
-    BOOST_REQUIRE_THROW(factory.CreateTensorHandle(tensorInfo, true), InvalidArgumentException);
-    BOOST_REQUIRE_THROW(factory.CreateTensorHandle(tensorInfo, DataLayout::NCHW, true), InvalidArgumentException);
+    REQUIRE_THROWS_AS(factory.CreateTensorHandle(tensorInfo, true), InvalidArgumentException);
+    REQUIRE_THROWS_AS(factory.CreateTensorHandle(tensorInfo, DataLayout::NCHW, true), InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(ImportTensorFactoryCreateMallocTensorHandle)
+TEST_CASE("ImportTensorFactoryCreateMallocTensorHandle")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -32,24 +35,24 @@
     // Start with the TensorInfo factory method. Create an import tensor handle and verify the data is
     // passed through correctly.
     auto tensorHandle = factory.CreateTensorHandle(tensorInfo);
-    BOOST_ASSERT(tensorHandle);
-    BOOST_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
-    BOOST_ASSERT(tensorHandle->GetShape() == tensorShape);
+    ARMNN_ASSERT(tensorHandle);
+    ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+    ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
 
     // Same method but explicitly specifying isManaged = false.
     tensorHandle = factory.CreateTensorHandle(tensorInfo, false);
-    BOOST_CHECK(tensorHandle);
-    BOOST_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
-    BOOST_ASSERT(tensorHandle->GetShape() == tensorShape);
+    CHECK(tensorHandle);
+    ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+    ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
 
     // Now try TensorInfo and DataLayout factory method.
     tensorHandle = factory.CreateTensorHandle(tensorInfo, DataLayout::NHWC);
-    BOOST_CHECK(tensorHandle);
-    BOOST_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
-    BOOST_ASSERT(tensorHandle->GetShape() == tensorShape);
+    CHECK(tensorHandle);
+    ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+    ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtensorOfImportTensor)
+TEST_CASE("CreateSubtensorOfImportTensor")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -63,12 +66,12 @@
     // Starting at an offset of 1x1.
     uint32_t origin[4] = { 1, 1, 0, 0 };
     auto subTensor     = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
-    BOOST_CHECK(subTensor);
-    BOOST_ASSERT(subTensor->GetShape() == subTensorShape);
-    BOOST_ASSERT(subTensor->GetParent() == tensorHandle.get());
+    CHECK(subTensor);
+    ARMNN_ASSERT(subTensor->GetShape() == subTensorShape);
+    ARMNN_ASSERT(subTensor->GetParent() == tensorHandle.get());
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtensorNonZeroXYIsInvalid)
+TEST_CASE("CreateSubtensorNonZeroXYIsInvalid")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -84,10 +87,10 @@
     uint32_t origin[4] = { 0, 0, 1, 1 };
     auto subTensor     = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
     // We expect a nullptr.
-    BOOST_ASSERT(subTensor == nullptr);
+    ARMNN_ASSERT(subTensor == nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtensorXYMustMatchParent)
+TEST_CASE("CreateSubtensorXYMustMatchParent")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -102,10 +105,10 @@
     uint32_t origin[4] = { 1, 1, 0, 0 };
     auto subTensor     = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
     // We expect a nullptr.
-    BOOST_ASSERT(subTensor == nullptr);
+    ARMNN_ASSERT(subTensor == nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtensorMustBeSmallerThanParent)
+TEST_CASE("CreateSubtensorMustBeSmallerThanParent")
 {
     // Create the factory to import tensors.
     ClImportTensorHandleFactory factory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
@@ -119,7 +122,7 @@
     uint32_t origin[4] = { 1, 1, 0, 0 };
     // This should result in a nullptr.
     auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
-    BOOST_ASSERT(subTensor == nullptr);
+    ARMNN_ASSERT(subTensor == nullptr);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 3c8bd67..931729a 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -9,16 +9,17 @@
 #include <cl/ClImportTensorHandleFactory.hpp>
 #include <cl/test/ClContextControlFixture.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
+
 
 #include <armnn/IRuntime.hpp>
 #include <armnn/INetwork.hpp>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(ClImportTensorHandleTests)
-
-BOOST_FIXTURE_TEST_CASE(ClMallocImport, ClContextControlFixture)
+TEST_SUITE("ClImportTensorHandleTests")
+{
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClMallocImport")
 {
     ClImportTensorHandleFactory handleFactory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
                                               static_cast<MemorySourceFlags>(MemorySource::Malloc));
@@ -44,10 +45,10 @@
     size_t space = totalBytes + alignment + alignment;
     auto testData = std::make_unique<uint8_t[]>(space);
     void* alignedPtr = testData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedPtr, space));
 
     // Import memory
-    BOOST_CHECK(handle->Import(alignedPtr, armnn::MemorySource::Malloc));
+    CHECK(handle->Import(alignedPtr, armnn::MemorySource::Malloc));
 
     // Input with negative values
     auto* typedPtr = reinterpret_cast<float*>(alignedPtr);
@@ -60,11 +61,11 @@
     // Validate result by checking that the output has no negative values
     for(unsigned int i = 0; i < numElements; ++i)
     {
-        BOOST_TEST(typedPtr[i] >= 0);
+        CHECK(typedPtr[i] >= 0);
     }
 }
 
-BOOST_FIXTURE_TEST_CASE(ClIncorrectMemorySourceImport, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClIncorrectMemorySourceImport")
 {
     ClImportTensorHandleFactory handleFactory(static_cast<MemorySourceFlags>(MemorySource::Malloc),
                                               static_cast<MemorySourceFlags>(MemorySource::Malloc));
@@ -84,13 +85,13 @@
     size_t space = totalBytes + alignment + alignment;
     auto testData = std::make_unique<uint8_t[]>(space);
     void* alignedPtr = testData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedPtr, space));
 
     // Import memory
-    BOOST_CHECK_THROW(handle->Import(alignedPtr, armnn::MemorySource::Undefined), MemoryImportException);
+    CHECK_THROWS_AS(handle->Import(alignedPtr, armnn::MemorySource::Undefined), MemoryImportException);
 }
 
-BOOST_FIXTURE_TEST_CASE(ClInvalidMemorySourceImport, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClInvalidMemorySourceImport")
 {
     MemorySource invalidMemSource = static_cast<MemorySource>(256);
     ClImportTensorHandleFactory handleFactory(static_cast<MemorySourceFlags>(invalidMemSource),
@@ -108,10 +109,10 @@
     };
 
     // Import non-support memory
-    BOOST_CHECK_THROW(handle->Import(inputData.data(), invalidMemSource), MemoryImportException);
+    CHECK_THROWS_AS(handle->Import(inputData.data(), invalidMemSource), MemoryImportException);
 }
 
-BOOST_FIXTURE_TEST_CASE(ClImportEndToEnd, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClImportEndToEnd")
 {
     // Create runtime in which test will run
     IRuntime::CreationOptions options;
@@ -143,7 +144,7 @@
     optOptions.m_ImportEnabled = true;
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     IOptimizedNetworkPtr optNet = Optimize(*net, backends, runtime->GetDeviceSpec(), optOptions);
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Loads it into the runtime.
     NetworkId netId;
@@ -158,7 +159,7 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr = inputData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
 
     // Input with negative values
     auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
@@ -166,7 +167,7 @@
 
     auto outputData = std::make_unique<uint8_t[]>(space);
     void* alignedOutputPtr = outputData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedOutputPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedOutputPtr, space));
     auto* outputPtr = reinterpret_cast<float*>(alignedOutputPtr);
     std::fill_n(outputPtr, numElements, -10.0f);
 
@@ -192,26 +193,26 @@
 
     // Contains ActivationWorkload
     std::size_t found = dump.find("ActivationWorkload");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     runtime->UnloadNetwork(netId);
 
     // Check output is as expected
     // Validate result by checking that the output has no negative values
     auto* outputResult = reinterpret_cast<float*>(alignedOutputPtr);
-    BOOST_TEST(outputResult);
+    CHECK(outputResult);
     for(unsigned int i = 0; i < numElements; ++i)
     {
-        BOOST_TEST(outputResult[i] >= 0);
+        CHECK(outputResult[i] >= 0);
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClJsonPrinterTests.cpp b/src/backends/cl/test/ClJsonPrinterTests.cpp
index d188a8e..2c24a53 100644
--- a/src/backends/cl/test/ClJsonPrinterTests.cpp
+++ b/src/backends/cl/test/ClJsonPrinterTests.cpp
@@ -8,16 +8,13 @@
 #include <cl/test/ClContextControlFixture.hpp>
 #include <backendsCommon/test/JsonPrinterTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
-BOOST_FIXTURE_TEST_SUITE(ClJsonPrinter, ClProfilingContextControlFixture)
-
-BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterGpuAccTest)
+TEST_CASE_FIXTURE(ClProfilingContextControlFixture, "SoftmaxProfilerJsonPrinterGpuAccTest")
 {
     std::vector<armnn::BackendId> backends = {armnn::Compute::GpuAcc};
     RunSoftmaxProfilerJsonPrinterTest(backends);
-}
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/cl/test/ClLayerSupportTests.cpp b/src/backends/cl/test/ClLayerSupportTests.cpp
index 794a45f..b18da11 100644
--- a/src/backends/cl/test/ClLayerSupportTests.cpp
+++ b/src/backends/cl/test/ClLayerSupportTests.cpp
@@ -16,176 +16,176 @@
 #include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
 #include <backendsCommon/test/LayerTests.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(ClLayerSupport)
-
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat16Cl, ClContextControlFixture)
+TEST_SUITE("ClLayerSupport")
+{
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedFloat16Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::Float16>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedFloat32Cl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedFloat32Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::Float32>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedQAsymmU8Cl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedQAsymmU8Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedQAsymmS8Cl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedQAsymmS8Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QAsymmS8>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLayerSupportedQSymmS8Cl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLayerSupportedQSymmS8Cl")
 {
     armnn::ClWorkloadFactory factory =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::ClWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp16ToFp32SupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp16ToFp32SupportedFp32InputCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float16");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Input should be Float16");
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp16ToFp32SupportedFp16OutputCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float32");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Output should be Float32");
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp32ToFp16SupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp32ToFp16SupportedFp16InputCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Input should be Float32");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Input should be Float32");
 }
 
-BOOST_FIXTURE_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsConvertFp32ToFp16SupportedFp32OutputCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::ClWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Output should be Float16");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Output should be Float16");
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLogicalBinarySupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLogicalBinarySupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsLogicalBinaryLayerSupportedTests<armnn::ClWorkloadFactory,
       armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsLogicalBinaryBroadcastSupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsLogicalBinaryBroadcastSupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsLogicalBinaryLayerBroadcastSupportedTests<armnn::ClWorkloadFactory,
       armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_FIXTURE_TEST_CASE(IsMeanSupportedCl, ClContextControlFixture)
+TEST_CASE_FIXTURE(ClContextControlFixture, "IsMeanSupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsMeanLayerSupportedTests<armnn::ClWorkloadFactory,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConstantSupportedCl)
+TEST_CASE("IsConstantSupportedCl")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::Float16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::Float32>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::QAsymmU8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::Boolean>(reasonIfUnsupported);
-    BOOST_CHECK(!result);
+    CHECK(!result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::QSymmS16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::QSymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::QAsymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::ClWorkloadFactory,
             armnn::DataType::BFloat16>(reasonIfUnsupported);
-    BOOST_CHECK(!result);
+    CHECK(!result);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClLayerTests.cpp b/src/backends/cl/test/ClLayerTests.cpp
index 918ef03..1c3c831 100644
--- a/src/backends/cl/test/ClLayerTests.cpp
+++ b/src/backends/cl/test/ClLayerTests.cpp
@@ -19,12 +19,13 @@
 #include <arm_compute/core/CL/CLKernelLibrary.h>
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <iostream>
 #include <string>
 
-BOOST_FIXTURE_TEST_SUITE(Compute_ArmComputeCl, ClContextControlFixture)
+TEST_SUITE("Compute_ArmComputeCl")
+{
 
 using namespace armnn;
 
@@ -34,1267 +35,1870 @@
 // UNIT tests
 
 // Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantLinearActivation, ConstantLinearActivationTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ConstantLinearActivation, ClContextControlFixture, ConstantLinearActivationTest)
 
 // Sigmoid Activation / Logistic
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSigmoid, SimpleSigmoidTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSigmoidUint8, SimpleSigmoidUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSigmoid, ClContextControlFixture, SimpleSigmoidTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSigmoidUint8, ClContextControlFixture, SimpleSigmoidUint8Test)
 
 // BoundedReLU Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu1, BoundedReLuUpperAndLowerBoundTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu6, BoundedReLuUpperBoundOnlyTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu1Uint8, BoundedReLuUint8UpperAndLowerBoundTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu6Uint8, BoundedReLuUint8UpperBoundOnlyTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu1, ClContextControlFixture, BoundedReLuUpperAndLowerBoundTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu6, ClContextControlFixture, BoundedReLuUpperBoundOnlyTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu1Uint8, ClContextControlFixture, BoundedReLuUint8UpperAndLowerBoundTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu6Uint8, ClContextControlFixture, BoundedReLuUint8UpperBoundOnlyTest)
 
 // ReLU Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLu, ReLuTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReLuUint8, ReLuUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLu, ClContextControlFixture, ReLuTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReLuUint8, ClContextControlFixture, ReLuUint8Test)
 
 // SoftReLU Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(SoftReLu, SoftReLuTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SoftReLu, ClContextControlFixture, SoftReLuTest)
 
 // LeakyReLU Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(LeakyReLu, LeakyReLuTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LeakyReLu, ClContextControlFixture, LeakyReLuTest)
 
 // Abs Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs, AbsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs, ClContextControlFixture, AbsTest)
 
 // Sqrt Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Sqrt, SqrtTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SqrtNN, SqrtNNTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Sqrt, ClContextControlFixture, SqrtTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SqrtNN, ClContextControlFixture, SqrtNNTest)
 
 // Square Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Square, SquareTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Square, ClContextControlFixture, SquareTest)
 
 // Tanh Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Tanh, TanhTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Tanh, ClContextControlFixture, TanhTest)
 
 // Elu Activation
-ARMNN_AUTO_TEST_CASE_WITH_THF(Elu, EluTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Elu, ClContextControlFixture, EluTest)
 
 // Batch To Space
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat321, BatchToSpaceNdNhwcTest1<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat322, BatchToSpaceNdNhwcTest2<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcFloat323, BatchToSpaceNdNhwcTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcFloat321,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcFloat322,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcFloat323,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest3<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwFloat321, BatchToSpaceNdNchwTest1<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwFloat322, BatchToSpaceNdNchwTest2<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwFloat323, BatchToSpaceNdNchwTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwFloat321,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwFloat322,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwFloat323,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest3<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt1, BatchToSpaceNdNhwcTest1<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt2, BatchToSpaceNdNhwcTest2<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcInt3, BatchToSpaceNdNhwcTest3<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcInt1,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcInt2,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcInt3,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest3<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwInt1, BatchToSpaceNdNchwTest1<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwInt2, BatchToSpaceNdNchwTest2<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwInt3, BatchToSpaceNdNchwTest3<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwInt1,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest1<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwInt2,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest2<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwInt3,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest3<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint1, BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint2, BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNhwcUint3, BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcUint1,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcUint2,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNhwcUint3,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNhwcTest3<DataType::QAsymmU8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwUint1, BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwUint2, BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchToSpaceNdNchwUint3, BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwUint1,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest1<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwUint2,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest2<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchToSpaceNdNchwUint3,
+                                 ClContextControlFixture,
+                                 BatchToSpaceNdNchwTest3<DataType::QAsymmU8>)
 
 // Fully Connected
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnected, FullyConnectedFloat32Test, false, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnectedWithBias, FullyConnectedFloat32Test, true, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFullyConnectedWithTranspose, FullyConnectedFloat32Test, false, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedUint8, FullyConnectedTest<DataType::QAsymmU8>, false, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedBiasedUint8, FullyConnectedTest<DataType::QAsymmU8>, true, true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFullyConnected,
+                                 ClContextControlFixture,
+                                 FullyConnectedFloat32Test,
+                                 false,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFullyConnectedWithBias,
+                                 ClContextControlFixture,
+                                 FullyConnectedFloat32Test,
+                                 true,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFullyConnectedWithTranspose,
+                                 ClContextControlFixture,
+                                 FullyConnectedFloat32Test,
+                                 false,
+                                 true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(FullyConnectedUint8,
+                                 ClContextControlFixture,
+                                 FullyConnectedTest<DataType::QAsymmU8>,
+                                 false,
+                                 true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(FullyConnectedBiasedUint8,
+                                 ClContextControlFixture,
+                                 FullyConnectedTest<DataType::QAsymmU8>,
+                                 true,
+                                 true)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedLarge, FullyConnectedLargeTest, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(FullyConnectedLargeTransposed, FullyConnectedLargeTest, true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(FullyConnectedLarge,
+                                 ClContextControlFixture,
+                                 FullyConnectedLargeTest,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(FullyConnectedLargeTransposed,
+                                 ClContextControlFixture,
+                                 FullyConnectedLargeTest,
+                                 true)
 
 // Convolution
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution1d, Convolution1dTest, true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution1d,
+                                 ClContextControlFixture,
+                                 Convolution1dTest,
+                                 true)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d, SimpleConvolution2d3x5Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dNhwc, SimpleConvolution2d3x5Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2d3x3Uint8Nhwc, SimpleConvolution2d3x3Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2d, SimpleConvolution2d3x5Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dNhwc, SimpleConvolution2d3x5Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dStride2x2Nhwc,
-                     SimpleConvolution2d3x3Stride2x2Test, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2d,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x5Test,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x5Test,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2d3x3Uint8,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Uint8Test,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2d3x3Uint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Uint8Test,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2d,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x5Test,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x5Test,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2dStride2x2Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Stride2x2Test,
+                                 false,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquare, SimpleConvolution2d3x3Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPadding,
-                              Convolution2dAsymmetricPaddingTest,
-                              DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2dSquare,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Test,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2dAsymmetricPadding,
+                                 ClContextControlFixture,
+                                 Convolution2dAsymmetricPaddingTest,
+                                 DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedConvolution2dSquareNhwc, SimpleConvolution2d3x3Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dAsymmetricPaddingNhwc,
-                     Convolution2dAsymmetricPaddingTest,
-                     DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedConvolution2dSquareNhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3Test,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2dAsymmetricPaddingNhwc,
+                                 ClContextControlFixture,
+                                 Convolution2dAsymmetricPaddingTest,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvolution2dSquareNhwc, SimpleConvolution2d3x3NhwcTest, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvolution2dSquareNhwc,
+                                 ClContextControlFixture,
+                                 SimpleConvolution2d3x3NhwcTest,
+                                 false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3,
-                     Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Nhwc,
-                     Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3Uint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d3x3Dilation3x3NhwcUint8,
-                     Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Nhwc,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3Uint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcUint8,
-                     Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
-                     false,
-                     DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
-                     Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
-                     false,
-                     DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d3x3Dilation3x3,
+                                 ClContextControlFixture,
+                                 Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d3x3Dilation3x3Nhwc,
+                                 ClContextControlFixture,
+                                 Convolution2d3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d3x3Dilation3x3Uint8,
+                                 ClContextControlFixture,
+                                 Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d3x3Dilation3x3NhwcUint8,
+                                 ClContextControlFixture,
+                                 Convolution2d3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x3x3Dilation3x3,
+                                 ClContextControlFixture,
+                                 Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x3x3Dilation3x3Nhwc,
+                                 ClContextControlFixture,
+                                 Convolution2d2x3x3Dilation3x3Test<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x3x3Dilation3x3Uint8,
+                                 ClContextControlFixture,
+                                 Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x3x3Dilation3x3NhwcUint8,
+                                 ClContextControlFixture,
+                                 Convolution2d2x3x3Dilation3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3,
+        ClContextControlFixture,
+        Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
+        false,
+        DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Nhwc,
+        ClContextControlFixture,
+        Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::Float32, DataType::Float32>,
+        false,
+        DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3Uint8,
+        ClContextControlFixture,
+        Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+        false,
+        DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2d2x2Dilation2x2Padding2x2Stride3x3NhwcUint8,
+        ClContextControlFixture,
+        Convolution2d2x2Dilation2x2Padding2x2Stride3x3Test<DataType::QAsymmU8, DataType::Signed32>,
+        false,
+        DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNchw, Convolution2dPerAxisQuantTest, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE_WITH_THF(Convolution2dPerAxisQuantTestNhwc, Convolution2dPerAxisQuantTest, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2dPerAxisQuantTestNchw,
+                                 ClContextControlFixture,
+                                 Convolution2dPerAxisQuantTest,
+                                 DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Convolution2dPerAxisQuantTestNhwc,
+                                 ClContextControlFixture,
+                                 Convolution2dPerAxisQuantTest,
+                                 DataLayout::NHWC);
 
 // Depthwise Convolution
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1,
-                     DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1,
-                     DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8,
-                     DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
-                     DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul1,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Test,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Test,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Uint8Test,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Uint8Test,
+                                 false,
+                                 DataLayout::NCHW)
 
 // NHWC Depthwise Convolution
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Nhwc,
-                     DepthwiseConvolution2dDepthMul1Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
-                     DepthwiseConvolution2dDepthMul1Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
-                     DepthwiseConvolution2dDepthMul1Uint8Test, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
-                     DepthwiseConvolution2dDepthMul1Uint8Test, false, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
-                     SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul1Nhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Test,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Nhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Test,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul1Uint8Nhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Uint8Test,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dDepthMul1Uint8Nhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul1Uint8Test,
+                                 false,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleDepthwiseConvolution2d3x3Dilation3x3Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleDepthwiseConvolution2d3x3Dilation3x3NhwcTest)
 
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthNhwc, DepthwiseConvolution2dDepthNhwcTest, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthNhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthNhwcTest,
+                                 false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetric,
-                     DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetric,
-                     DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dAsymmetricNhwc,
-                     DepthwiseConvolution2dAsymmetricTest, true, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
-                     DepthwiseConvolution2dAsymmetricTest, false, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dAsymmetric,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dAsymmetricTest,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetric,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dAsymmetricTest,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dAsymmetricNhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dAsymmetricTest,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedDepthwiseConvolution2dAsymmetricNhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dAsymmetricTest,
+                                 false,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dDepthMul64, DepthwiseConvolution2dDepthMul64Test);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dDepthMul64,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dDepthMul64Test);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNchw, DepthwiseConvolution2dPerAxisQuantTest,
-                     DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNhwc, DepthwiseConvolution2dPerAxisQuantTest,
-                     DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNchw,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dPerAxisQuantTest,
+                                 DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DepthwiseConvolution2dPerAxisQuantTestNhwc,
+                                 ClContextControlFixture,
+                                 DepthwiseConvolution2dPerAxisQuantTest,
+                                 DataLayout::NHWC);
 
 // Splitter
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSplitterFloat32, SplitterFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSplitterUint8, SplitterUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSplitterFloat32, ClContextControlFixture, SplitterFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSplitterUint8, ClContextControlFixture, SplitterUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterFloat32, CopyViaSplitterFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(CopyViaSplitterUint8, CopyViaSplitterUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CopyViaSplitterFloat32, ClContextControlFixture, CopyViaSplitterFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CopyViaSplitterUint8, ClContextControlFixture, CopyViaSplitterUint8Test)
 
 // Concat
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConcat, ConcatTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8, ConcatUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
-                     ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConcat, ClContextControlFixture, ConcatTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ConcatUint8, ClContextControlFixture, ConcatUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ConcatUint8DifferentInputOutputQParam,
+                                 ClContextControlFixture,
+                                 ConcatDifferentInputOutputQParamTest<DataType::QAsymmU8>,
+                                 false)
 
 // Normalization
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcross, SimpleNormalizationAcrossTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationWithin, SimpleNormalizationWithinTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleNormalizationAcrossNhwc, SimpleNormalizationAcrossNhwcTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AcrossChannelNormalization, AcrossChannelNormalizationTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleNormalizationAcross, ClContextControlFixture, SimpleNormalizationAcrossTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleNormalizationWithin, ClContextControlFixture, SimpleNormalizationWithinTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleNormalizationAcrossNhwc,
+                                 ClContextControlFixture,
+                                 SimpleNormalizationAcrossNhwcTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AcrossChannelNormalization,
+                                 ClContextControlFixture,
+                                 AcrossChannelNormalizationTest)
 
 // Pooling
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4, SimpleMaxPooling2dSize3x3Stride2x4Test, true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4Uint8,
-                              SimpleMaxPooling2dSize3x3Stride2x4Uint8Test, true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dSize3x3Stride2x4Test,
+                                 true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dSize3x3Stride2x4Uint8,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dSize3x3Stride2x4Uint8Test,
+                                 true)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleMaxPooling2d, IgnorePaddingSimpleMaxPooling2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleMaxPooling2dUint8, IgnorePaddingSimpleMaxPooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingMaxPooling2dSize3, IgnorePaddingMaxPooling2dSize3Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingMaxPooling2dSize3Uint8, IgnorePaddingMaxPooling2dSize3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleMaxPooling2d,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleMaxPooling2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleMaxPooling2dUint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleMaxPooling2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingMaxPooling2dSize3,
+                                 ClContextControlFixture,
+                                 IgnorePaddingMaxPooling2dSize3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingMaxPooling2dSize3Uint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingMaxPooling2dSize3Uint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleAveragePooling2d, IgnorePaddingSimpleAveragePooling2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleAveragePooling2dUint8, IgnorePaddingSimpleAveragePooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleAveragePooling2dNoPadding,
-                              IgnorePaddingSimpleAveragePooling2dNoPaddingTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8,
-                              IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingAveragePooling2dSize3, IgnorePaddingAveragePooling2dSize3Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingAveragePooling2dSize3Uint8, IgnorePaddingAveragePooling2dSize3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleAveragePooling2d,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleAveragePooling2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleAveragePooling2dUint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleAveragePooling2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleAveragePooling2dNoPadding,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleAveragePooling2dNoPaddingTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleAveragePooling2dNoPaddingUint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleAveragePooling2dNoPaddingUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingAveragePooling2dSize3,
+                                 ClContextControlFixture,
+                                 IgnorePaddingAveragePooling2dSize3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingAveragePooling2dSize3Uint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingAveragePooling2dSize3Uint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingSimpleL2Pooling2d, IgnorePaddingSimpleL2Pooling2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8, IgnorePaddingSimpleL2Pooling2dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingL2Pooling2dSize3, IgnorePaddingL2Pooling2dSize3Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8, IgnorePaddingL2Pooling2dSize3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingSimpleL2Pooling2d,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleL2Pooling2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_IgnorePaddingSimpleL2Pooling2dUint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingSimpleL2Pooling2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingL2Pooling2dSize3,
+                                 ClContextControlFixture,
+                                 IgnorePaddingL2Pooling2dSize3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_IgnorePaddingL2Pooling2dSize3Uint8,
+                                 ClContextControlFixture,
+                                 IgnorePaddingL2Pooling2dSize3Uint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2d, SimpleMaxPooling2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dNhwc, SimpleMaxPooling2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dUint8, SimpleMaxPooling2dUint8Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMaxPooling2dUint8Nhwc, SimpleMaxPooling2dUint8Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2d,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dTest,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dUint8,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dUint8Test,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMaxPooling2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleMaxPooling2dUint8Test,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAveragePooling2d, SimpleAveragePooling2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAveragePooling2dNhwc, SimpleAveragePooling2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAveragePooling2dUint8, SimpleAveragePooling2dUint8Test, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAveragePooling2dUint8Nhwc, SimpleAveragePooling2dUint8Test, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAveragePooling2d,
+                                 ClContextControlFixture,
+                                 SimpleAveragePooling2dTest,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAveragePooling2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleAveragePooling2dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAveragePooling2dUint8,
+                                 ClContextControlFixture,
+                                 SimpleAveragePooling2dUint8Test,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAveragePooling2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleAveragePooling2dUint8Test,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
-                              IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
-                              false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
-                              IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
-                              true)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LargeTensorsAveragePooling2d, LargeTensorsAveragePooling2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LargeTensorsAveragePooling2dUint8, LargeTensorsAveragePooling2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingAveragePooling2dSize3x2Stride2x2,
+                                 ClContextControlFixture,
+                                 IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(IgnorePaddingAveragePooling2dSize3x2Stride2x2NoPadding,
+                                 ClContextControlFixture,
+                                 IgnorePaddingAveragePooling2dSize3x2Stride2x2Test,
+                                 true)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LargeTensorsAveragePooling2d,
+                                 ClContextControlFixture,
+                                 LargeTensorsAveragePooling2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LargeTensorsAveragePooling2dUint8,
+                                 ClContextControlFixture,
+                                 LargeTensorsAveragePooling2dUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleL2Pooling2d, SimpleL2Pooling2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleL2Pooling2dNhwc, SimpleL2Pooling2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_SimpleL2Pooling2dUint8, SimpleL2Pooling2dUint8Test, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleL2Pooling2d,
+                                 ClContextControlFixture,
+                                 SimpleL2Pooling2dTest,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleL2Pooling2dNhwc,
+                                 ClContextControlFixture,
+                                 SimpleL2Pooling2dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_SimpleL2Pooling2dUint8,
+                                 ClContextControlFixture,
+                                 SimpleL2Pooling2dUint8Test,
+                                 DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize3Stride1, L2Pooling2dSize3Stride1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8, L2Pooling2dSize3Stride1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize3Stride3, L2Pooling2dSize3Stride3Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8, L2Pooling2dSize3Stride3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize3Stride4, L2Pooling2dSize3Stride4Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8, L2Pooling2dSize3Stride4Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize7, L2Pooling2dSize7Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize7Uint8, L2Pooling2dSize7Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Pooling2dSize9, L2Pooling2dSize9Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UNSUPPORTED_L2Pooling2dSize9Uint8, L2Pooling2dSize9Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize3Stride1, ClContextControlFixture, L2Pooling2dSize3Stride1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride1Uint8,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize3Stride3,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride3Uint8,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize3Stride4,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride4Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize3Stride4Uint8,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize3Stride4Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize7,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize7Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize7Uint8,
+                                 ClContextControlFixture,
+                                 L2Pooling2dSize7Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Pooling2dSize9, ClContextControlFixture, L2Pooling2dSize9Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UNSUPPORTED_L2Pooling2dSize9Uint8, ClContextControlFixture, L2Pooling2dSize9Uint8Test)
 
 // Add
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleAdd, AdditionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Add5d, Addition5dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AddBroadcast1Element, AdditionBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AddBroadcast, AdditionBroadcastTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleAdd, ClContextControlFixture, AdditionTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Add5d, ClContextControlFixture, Addition5dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AddBroadcast1Element, ClContextControlFixture, AdditionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AddBroadcast, ClContextControlFixture, AdditionBroadcastTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(AdditionUint8, AdditionUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AddBroadcastUint8, AdditionBroadcastUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AddBroadcast1ElementUint8, AdditionBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AdditionUint8, ClContextControlFixture, AdditionUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AddBroadcastUint8, ClContextControlFixture, AdditionBroadcastUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AddBroadcast1ElementUint8,
+                                 ClContextControlFixture,
+                                 AdditionBroadcast1ElementUint8Test)
 
 // Sub
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSub, SubtractionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubBroadcast1Element, SubtractionBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubBroadcast, SubtractionBroadcastTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSub, ClContextControlFixture, SubtractionTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubBroadcast1Element, ClContextControlFixture, SubtractionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubBroadcast, ClContextControlFixture, SubtractionBroadcastTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubtractionUint8, SubtractionUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubBroadcastUint8, SubtractionBroadcastUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SubBroadcast1ElementUint8, SubtractionBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubtractionUint8, ClContextControlFixture, SubtractionUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubBroadcastUint8, ClContextControlFixture, SubtractionBroadcastUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SubBroadcast1ElementUint8,
+                                 ClContextControlFixture,
+                                 SubtractionBroadcast1ElementUint8Test)
 
 // Div
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleDivision, DivisionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DivisionByZero, DivisionByZeroTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DivisionBroadcast1Element, DivisionBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(DivisionBroadcast1DVector, DivisionBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleDivision, ClContextControlFixture, DivisionTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DivisionByZero, ClContextControlFixture, DivisionByZeroTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DivisionBroadcast1Element, ClContextControlFixture, DivisionBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(DivisionBroadcast1DVector, ClContextControlFixture, DivisionBroadcast1DVectorTest)
 // NOTE: quantized division is not supported by CL and not required by the
 //       android NN api
 
 // Mul
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleMultiplication, MultiplicationTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1Element, MultiplicationBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1DVector, MultiplicationBroadcast1DVectorTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationUint8, MultiplicationUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1ElementUint8, MultiplicationBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiplicationBroadcast1DVectorUint8, MultiplicationBroadcast1DVectorUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Multiplication5d, Multiplication5dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleMultiplication, ClContextControlFixture, MultiplicationTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationBroadcast1Element,
+                                 ClContextControlFixture,
+                                 MultiplicationBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationBroadcast1DVector,
+                                 ClContextControlFixture,
+                                 MultiplicationBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationUint8, ClContextControlFixture, MultiplicationUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationBroadcast1ElementUint8,
+                                 ClContextControlFixture,
+                                 MultiplicationBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiplicationBroadcast1DVectorUint8,
+                                 ClContextControlFixture,
+                                 MultiplicationBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Multiplication5d, ClContextControlFixture, Multiplication5dTest)
 
 // Batch Norm
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchNormFloat32, BatchNormFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(BatchNormFloat32Nhwc, BatchNormFloat32NhwcTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchNormFloat32, ClContextControlFixture, BatchNormFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(BatchNormFloat32Nhwc, ClContextControlFixture, BatchNormFloat32NhwcTest)
 
 // Rank
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Float16,  RankDimSize1Test<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Float32,  RankDimSize1Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmU8, RankDimSize1Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1Signed32, RankDimSize1Test<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QSymmS16, RankDimSize1Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize1QAsymmS8, RankDimSize1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1Float16, ClContextControlFixture, RankDimSize1Test<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1Float32, ClContextControlFixture, RankDimSize1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1QAsymmU8, ClContextControlFixture, RankDimSize1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1Signed32, ClContextControlFixture, RankDimSize1Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1QSymmS16, ClContextControlFixture, RankDimSize1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize1QAsymmS8, ClContextControlFixture, RankDimSize1Test<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float16,  RankDimSize2Test<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Float32,  RankDimSize2Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmU8, RankDimSize2Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2Signed32, RankDimSize2Test<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QSymmS16, RankDimSize2Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize2QAsymmS8, RankDimSize2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2Float16, ClContextControlFixture, RankDimSize2Test<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2Float32, ClContextControlFixture, RankDimSize2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2QAsymmU8, ClContextControlFixture, RankDimSize2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2Signed32, ClContextControlFixture, RankDimSize2Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2QSymmS16, ClContextControlFixture, RankDimSize2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize2QAsymmS8, ClContextControlFixture, RankDimSize2Test<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float16,  RankDimSize3Test<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Float32,  RankDimSize3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmU8, RankDimSize3Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3Signed32, RankDimSize3Test<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QSymmS16, RankDimSize3Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize3QAsymmS8, RankDimSize3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3Float16, ClContextControlFixture, RankDimSize3Test<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3Float32, ClContextControlFixture, RankDimSize3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3QAsymmU8, ClContextControlFixture, RankDimSize3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3Signed32, ClContextControlFixture, RankDimSize3Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3QSymmS16, ClContextControlFixture, RankDimSize3Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize3QAsymmS8, ClContextControlFixture, RankDimSize3Test<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float16,  RankDimSize4Test<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Float32,  RankDimSize4Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmU8, RankDimSize4Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4Signed32, RankDimSize4Test<DataType::Signed32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QSymmS16, RankDimSize4Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RankDimSize4QAsymmS8, RankDimSize4Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4Float16, ClContextControlFixture, RankDimSize4Test<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4Float32, ClContextControlFixture, RankDimSize4Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4QAsymmU8, ClContextControlFixture, RankDimSize4Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4Signed32, ClContextControlFixture, RankDimSize4Test<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4QSymmS16, ClContextControlFixture, RankDimSize4Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RankDimSize4QAsymmS8, ClContextControlFixture, RankDimSize4Test<DataType::QAsymmS8>)
 
 // InstanceNormalization
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw, InstanceNormFloat32Test, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nchw, InstanceNormFloat16Test, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat32Nchw,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat32Test,
+                                 DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat16Nchw,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat16Test,
+                                 DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc, InstanceNormFloat32Test, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc, InstanceNormFloat16Test, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat32Nhwc,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat32Test,
+                                 DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat16Nhwc,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat16Test,
+                                 DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nchw2, InstanceNormFloat32Test2, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nchw2, InstanceNormFloat16Test2, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat32Nchw2,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat32Test2,
+                                 DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat16Nchw2,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat16Test2,
+                                 DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat32Nhwc2, InstanceNormFloat32Test2, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE_WITH_THF(InstanceNormFloat16Nhwc2, InstanceNormFloat16Test2, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat32Nhwc2,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat32Test2,
+                                 DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(InstanceNormFloat16Nhwc2,
+                                 ClContextControlFixture,
+                                 InstanceNormFloat16Test2,
+                                 DataLayout::NHWC);
 
 // L2 Normalization
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1d, L2Normalization1dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2d, L2Normalization2dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3d, L2Normalization3dTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4d, L2Normalization4dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization1d, ClContextControlFixture, L2Normalization1dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization2d, ClContextControlFixture, L2Normalization2dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization3d, ClContextControlFixture, L2Normalization3dTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization4d, ClContextControlFixture, L2Normalization4dTest, DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization1dNhwc, L2Normalization1dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dNhwc, L2Normalization2dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization3dNhwc, L2Normalization3dTest, DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization4dNhwc, L2Normalization4dTest, DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization1dNhwc,
+                                 ClContextControlFixture,
+                                 L2Normalization1dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization2dNhwc,
+                                 ClContextControlFixture,
+                                 L2Normalization2dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization3dNhwc,
+                                 ClContextControlFixture,
+                                 L2Normalization3dTest,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization4dNhwc,
+                                 ClContextControlFixture,
+                                 L2Normalization4dTest,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2Normalization2dShape, L2Normalization2dShapeTest);
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2Normalization2dShape, ClContextControlFixture, L2Normalization2dShapeTest);
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationDefaultEpsilon, L2NormalizationDefaultEpsilonTest, DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(L2NormalizationNonDefaultEpsilon, L2NormalizationNonDefaultEpsilonTest, DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2NormalizationDefaultEpsilon,
+                                 ClContextControlFixture,
+                                 L2NormalizationDefaultEpsilonTest,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(L2NormalizationNonDefaultEpsilon,
+                                 ClContextControlFixture,
+                                 L2NormalizationNonDefaultEpsilonTest,
+                                 DataLayout::NCHW)
 
 // Constant
-ARMNN_AUTO_TEST_CASE_WITH_THF(Constant, ConstantTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ConstantUint8, ConstantUint8SimpleQuantizationScaleNoOffsetTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Constant, ClContextControlFixture, ConstantTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ConstantUint8,
+                                 ClContextControlFixture,
+                                 ConstantUint8SimpleQuantizationScaleNoOffsetTest)
 
 // Concat
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1d, Concat1dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat1dUint8, Concat1dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat1d, ClContextControlFixture, Concat1dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat1dUint8, ClContextControlFixture, Concat1dUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0, Concat2dDim0Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0Uint8, Concat2dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1, Concat2dDim1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1Uint8, Concat2dDim1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim0, ClContextControlFixture, Concat2dDim0Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim0Uint8, ClContextControlFixture, Concat2dDim0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim1, ClContextControlFixture, Concat2dDim1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim1Uint8, ClContextControlFixture, Concat2dDim1Uint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDims, Concat2dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim0DiffInputDimsUint8, Concat2dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDims, Concat2dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat2dDim1DiffInputDimsUint8, Concat2dDim1DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim0DiffInputDims,
+                                 ClContextControlFixture,
+                                 Concat2dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim0DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat2dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim1DiffInputDims,
+                                 ClContextControlFixture,
+                                 Concat2dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat2dDim1DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat2dDim1DiffInputDimsUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0, Concat3dDim0Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0Uint8, Concat3dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1, Concat3dDim1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1Uint8, Concat3dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2, Concat3dDim2Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2Uint8, Concat3dDim2Uint8Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim0, ClContextControlFixture, Concat3dDim0Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim0Uint8, ClContextControlFixture, Concat3dDim0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim1, ClContextControlFixture, Concat3dDim1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim1Uint8, ClContextControlFixture, Concat3dDim1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim2, ClContextControlFixture, Concat3dDim2Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim2Uint8, ClContextControlFixture, Concat3dDim2Uint8Test, false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDims, Concat3dDim0DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim0DiffInputDimsUint8, Concat3dDim0DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDims, Concat3dDim1DiffInputDimsTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim1DiffInputDimsUint8, Concat3dDim1DiffInputDimsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDims, Concat3dDim2DiffInputDimsTest, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat3dDim2DiffInputDimsUint8, Concat3dDim2DiffInputDimsUint8Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim0DiffInputDims, ClContextControlFixture, Concat3dDim0DiffInputDimsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim0DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat3dDim0DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim1DiffInputDims,
+                                 ClContextControlFixture,
+                                 Concat3dDim1DiffInputDimsTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim1DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat3dDim1DiffInputDimsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim2DiffInputDims,
+                                 ClContextControlFixture,
+                                 Concat3dDim2DiffInputDimsTest,
+                                 false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat3dDim2DiffInputDimsUint8,
+                                 ClContextControlFixture,
+                                 Concat3dDim2DiffInputDimsUint8Test,
+                                 false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0, Concat4dDim0Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1, Concat4dDim1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3, Concat4dDim3Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim0Uint8, Concat4dDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim1Uint8, Concat4dDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDim3Uint8, Concat4dDim3Uint8Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim0, ClContextControlFixture, Concat4dDim0Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim1, ClContextControlFixture, Concat4dDim1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim3, ClContextControlFixture, Concat4dDim3Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim0Uint8, ClContextControlFixture, Concat4dDim0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim1Uint8, ClContextControlFixture, Concat4dDim1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDim3Uint8, ClContextControlFixture, Concat4dDim3Uint8Test, false)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0, Concat4dDiffShapeDim0Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1, Concat4dDiffShapeDim1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3, Concat4dDiffShapeDim3Test, false)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim0Uint8, Concat4dDiffShapeDim0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim1Uint8, Concat4dDiffShapeDim1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Concat4dDiffShapeDim3Uint8, Concat4dDiffShapeDim3Uint8Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim0, ClContextControlFixture, Concat4dDiffShapeDim0Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim1, ClContextControlFixture, Concat4dDiffShapeDim1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim3, ClContextControlFixture, Concat4dDiffShapeDim3Test, false)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim0Uint8, ClContextControlFixture, Concat4dDiffShapeDim0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim1Uint8, ClContextControlFixture, Concat4dDiffShapeDim1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Concat4dDiffShapeDim3Uint8,
+                                 ClContextControlFixture,
+                                 Concat4dDiffShapeDim3Uint8Test,
+                                 false)
 
 // DepthToSpace
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_3, DepthToSpaceTest3<DataType::Float32>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat32_4, DepthToSpaceTest4<DataType::Float32>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat32_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::Float32>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat32_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::Float32>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat32_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::Float32>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat32_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::Float32>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_1, DepthToSpaceTest1<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_2, DepthToSpaceTest2<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat16_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat16_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat16_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::Float16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwFloat16_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::Float16>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt8_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt8_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt8_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt8_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwUint8_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwUint8_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwUint8_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwUint8_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNchwInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt16_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt16_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt16_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NCHW);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNchwInt16_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NCHW);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_1, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_2, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_3, DepthToSpaceTest3<DataType::Float32>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat32_4, DepthToSpaceTest4<DataType::Float32>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat32_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::Float32>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat32_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::Float32>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat32_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::Float32>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat32_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::Float32>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_1, DepthToSpaceTest1<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_2, DepthToSpaceTest2<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_3, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcFloat16_4, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat16_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat16_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat16_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::Float16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcFloat16_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::Float16>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_1, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_2, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_3, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt8_4, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt8_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt8_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt8_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QAsymmS8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt8_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QAsymmS8>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_1, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_2, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_3, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcUint8_4, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcUint8_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcUint8_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcUint8_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QAsymmU8>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcUint8_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QAsymmU8>, DataLayout::NHWC);
 
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_1, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_2, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_3, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
-ARMNN_AUTO_TEST_CASE(DepthToSpaceNhwcInt16_4, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt16_1,
+    ClContextControlFixture, DepthToSpaceTest1<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt16_2,
+    ClContextControlFixture, DepthToSpaceTest2<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt16_3,
+    ClContextControlFixture, DepthToSpaceTest3<DataType::QSymmS16>, DataLayout::NHWC);
+ARMNN_AUTO_TEST_FIXTURE(DepthToSpaceNhwcInt16_4,
+    ClContextControlFixture, DepthToSpaceTest4<DataType::QSymmS16>, DataLayout::NHWC);
 
 // Fill
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFill, SimpleFillTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFillF16, SimpleFillTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFillS32, SimpleFillTest<DataType::Signed32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFill, ClContextControlFixture, SimpleFillTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFillF16, ClContextControlFixture, SimpleFillTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFillS32, ClContextControlFixture, SimpleFillTest<DataType::Signed32>)
 
 // FloorPreluUint8
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleFloor, SimpleFloorTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleFloor, ClContextControlFixture, SimpleFloorTest<DataType::Float32>)
 
 // Gather
-ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsFloat32, Gather1dParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Gather1dParamsUint8, Gather1dParamsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsFloat32, GatherMultiDimParamsFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GatherMultiDimParamsUint8, GatherMultiDimParamsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Gather1dParamsFloat32, ClContextControlFixture, Gather1dParamsFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Gather1dParamsUint8, ClContextControlFixture, Gather1dParamsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GatherMultiDimParamsFloat32, ClContextControlFixture, GatherMultiDimParamsFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GatherMultiDimParamsUint8, ClContextControlFixture, GatherMultiDimParamsUint8Test)
 
 // Reshape
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleReshapeFloat32, SimpleReshapeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleReshapeInt8, SimpleReshapeTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleReshapeUint8, SimpleReshapeTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Reshape5d, Reshape5dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReshapeBoolean, ReshapeBooleanTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleReshapeFloat32, ClContextControlFixture, SimpleReshapeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleReshapeInt8, ClContextControlFixture, SimpleReshapeTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleReshapeUint8, ClContextControlFixture, SimpleReshapeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Reshape5d, ClContextControlFixture, Reshape5dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReshapeBoolean, ClContextControlFixture, ReshapeBooleanTest)
 
 // Pad
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322d, PadFloat322dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat322dCustomPadding, PadFloat322dCustomPaddingTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat323d, PadFloat323dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadFloat324d, PadFloat324dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadFloat322d, ClContextControlFixture, PadFloat322dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadFloat322dCustomPadding, ClContextControlFixture, PadFloat322dCustomPaddingTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadFloat323d, ClContextControlFixture, PadFloat323dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadFloat324d, ClContextControlFixture, PadFloat324dTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint82d, PadUint82dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint82dCustomPadding, PadUint82dCustomPaddingTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint83d, PadUint83dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PadUint84d, PadUint84dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadUint82d, ClContextControlFixture, PadUint82dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadUint82dCustomPadding, ClContextControlFixture, PadUint82dCustomPaddingTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadUint83d, ClContextControlFixture, PadUint83dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PadUint84d, ClContextControlFixture, PadUint84dTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Pad2dQSymm16, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Pad2dQSymm16CustomPadding, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Pad3dQSymm16, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Pad4dQSymm16, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Pad2dQSymm16,
+    ClContextControlFixture, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 0.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Pad2dQSymm16CustomPadding,
+    ClContextControlFixture, Pad2dTestCommon<DataType::QSymmS16>, 2.0f, 0, 1.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Pad3dQSymm16, ClContextControlFixture, Pad3dTestCommon<DataType::QSymmS16>, 2.0f, 0)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Pad4dQSymm16, ClContextControlFixture, Pad4dTestCommon<DataType::QSymmS16>, 2.0f, 0)
 
 // PReLU
-ARMNN_AUTO_TEST_CASE_WITH_THF(PreluFloat32, PreluTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PreluUint8,   PreluTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PreluFloat32, ClContextControlFixture, PreluTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PreluUint8, ClContextControlFixture,  PreluTest<DataType::QAsymmU8>)
 
 // Permute
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimplePermuteFloat32, SimplePermuteTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteFloat32ValueSet1Test, PermuteValueSet1Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteFloat32ValueSet2Test, PermuteValueSet2Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteFloat32ValueSet3Test, PermuteValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimplePermuteQASymmS8, SimplePermuteTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymmS8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymmS8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymmS8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimplePermuteQASymm8, SimplePermuteTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymm8ValueSet1Test, PermuteValueSet1Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymm8ValueSet2Test, PermuteValueSet2Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQASymm8ValueSet3Test, PermuteValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimplePermuteFloat32, ClContextControlFixture, SimplePermuteTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteFloat32ValueSet1Test, ClContextControlFixture, PermuteValueSet1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteFloat32ValueSet2Test, ClContextControlFixture, PermuteValueSet2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteFloat32ValueSet3Test, ClContextControlFixture, PermuteValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimplePermuteQASymmS8, ClContextControlFixture, SimplePermuteTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymmS8ValueSet1Test, ClContextControlFixture, PermuteValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymmS8ValueSet2Test, ClContextControlFixture, PermuteValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymmS8ValueSet3Test, ClContextControlFixture, PermuteValueSet3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimplePermuteQASymm8, ClContextControlFixture, SimplePermuteTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymm8ValueSet1Test, ClContextControlFixture, PermuteValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymm8ValueSet2Test, ClContextControlFixture, PermuteValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    PermuteQASymm8ValueSet3Test, ClContextControlFixture, PermuteValueSet3Test<DataType::QAsymmU8>)
 
 // Lstm
-ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection, ClContextControlFixture,
                               LstmLayerFloat32WithCifgWithPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgNoPeepholeNoProjection,
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LstmLayerFloat32NoCifgNoPeepholeNoProjection, ClContextControlFixture,
                               LstmLayerFloat32NoCifgNoPeepholeNoProjectionTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjection,
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjection, ClContextControlFixture,
                               LstmLayerFloat32NoCifgWithPeepholeWithProjectionTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
-                              LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNorm,
+                                 ClContextControlFixture,
+                                 LstmLayerFloat32NoCifgWithPeepholeWithProjectionWithLayerNormTest)
 
 // QLstm
-ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm, QLstmTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm1, QLstmTest1)
-ARMNN_AUTO_TEST_CASE_WITH_THF(QLstm2, QLstmTest2)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QLstm, ClContextControlFixture, QLstmTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QLstm1, ClContextControlFixture, QLstmTest1)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QLstm2, ClContextControlFixture, QLstmTest2)
 
 // QuantizedLstm
-ARMNN_AUTO_TEST_CASE_WITH_THF(QuantizedLstm, QuantizedLstmTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QuantizedLstm, ClContextControlFixture, QuantizedLstmTest)
 
 // Convert from Float16 to Float32
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp16ToFp32, SimpleConvertFp16ToFp32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvertFp16ToFp32, ClContextControlFixture, SimpleConvertFp16ToFp32Test)
 // Convert from Float32 to Float16
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleConvertFp32ToFp16, SimpleConvertFp32ToFp16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleConvertFp32ToFp16, ClContextControlFixture, SimpleConvertFp32ToFp16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(AdditionAfterMaxPool, AdditionAfterMaxPoolTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AdditionAfterMaxPool, ClContextControlFixture, AdditionAfterMaxPoolTest)
 
 //Max
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumSimple, MaximumSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumBroadcast1Element, MaximumBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumBroadcast1DVector, MaximumBroadcast1DVectorTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumUint8, MaximumUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumBroadcast1ElementUint8, MaximumBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MaximumBroadcast1DVectorUint8, MaximumBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MaximumSimple, ClContextControlFixture, MaximumSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MaximumBroadcast1Element, ClContextControlFixture, MaximumBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MaximumBroadcast1DVector, ClContextControlFixture, MaximumBroadcast1DVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MaximumUint8, ClContextControlFixture, MaximumUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MaximumBroadcast1ElementUint8, ClContextControlFixture, MaximumBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MaximumBroadcast1DVectorUint8, ClContextControlFixture, MaximumBroadcast1DVectorUint8Test)
 
 // Mean
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleFloat32, MeanSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleAxisFloat32, MeanSimpleAxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanKeepDimsFloat32, MeanKeepDimsTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanMultipleDimsFloat32, MeanMultipleDimsTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts1Float32, MeanVts1Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts2Float32, MeanVts2Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts3Float32, MeanVts3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanSimpleFloat32, ClContextControlFixture, MeanSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanSimpleAxisFloat32, ClContextControlFixture, MeanSimpleAxisTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanKeepDimsFloat32, ClContextControlFixture, MeanKeepDimsTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanMultipleDimsFloat32, ClContextControlFixture, MeanMultipleDimsTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts1Float32, ClContextControlFixture, MeanVts1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts2Float32, ClContextControlFixture, MeanVts2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts3Float32, ClContextControlFixture, MeanVts3Test<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleQuantisedAsymmS8, MeanSimpleTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleAxisQuantisedAsymmS8, MeanSimpleAxisTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanKeepDimsQuantisedAsymmS8, MeanKeepDimsTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanMultipleDimsQuantisedAsymmS8, MeanMultipleDimsTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts1QuantisedAsymmS8, MeanVts1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts2QuantisedAsymmS8, MeanVts2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts3QuantisedAsymmS8, MeanVts3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanSimpleQuantisedAsymmS8, ClContextControlFixture, MeanSimpleTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanSimpleAxisQuantisedAsymmS8, ClContextControlFixture, MeanSimpleAxisTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanKeepDimsQuantisedAsymmS8, ClContextControlFixture, MeanKeepDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanMultipleDimsQuantisedAsymmS8, ClContextControlFixture, MeanMultipleDimsTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts1QuantisedAsymmS8, ClContextControlFixture, MeanVts1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts2QuantisedAsymmS8, ClContextControlFixture, MeanVts2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts3QuantisedAsymmS8, ClContextControlFixture, MeanVts3Test<DataType::QAsymmS8>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleQuantisedAsymm8, MeanSimpleTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanSimpleAxisQuantisedAsymm8, MeanSimpleAxisTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanKeepDimsQuantisedAsymm8, MeanKeepDimsTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanMultipleDimsQuantisedAsymm8, MeanMultipleDimsTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts1QuantisedAsymm8, MeanVts1Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts2QuantisedAsymm8, MeanVts2Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MeanVts3QuantisedAsymm8, MeanVts3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanSimpleQuantisedAsymm8, ClContextControlFixture, MeanSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanSimpleAxisQuantisedAsymm8, ClContextControlFixture, MeanSimpleAxisTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanKeepDimsQuantisedAsymm8, ClContextControlFixture, MeanKeepDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MeanMultipleDimsQuantisedAsymm8, ClContextControlFixture, MeanMultipleDimsTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts1QuantisedAsymm8, ClContextControlFixture, MeanVts1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts2QuantisedAsymm8, ClContextControlFixture, MeanVts2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MeanVts3QuantisedAsymm8, ClContextControlFixture, MeanVts3Test<DataType::QAsymmU8>)
 
 // Minimum
-ARMNN_AUTO_TEST_CASE_WITH_THF(MinimumBroadcast1Element1, MinimumBroadcast1ElementTest1)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MinimumBroadcast1Element2, MinimumBroadcast1ElementTest2)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MinimumBroadcast1DVectorUint8, MinimumBroadcast1DVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MinimumBroadcast1Element1, ClContextControlFixture, MinimumBroadcast1ElementTest1)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MinimumBroadcast1Element2, ClContextControlFixture, MinimumBroadcast1ElementTest2)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    MinimumBroadcast1DVectorUint8, ClContextControlFixture, MinimumBroadcast1DVectorUint8Test)
 
 // Equal
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimple,            EqualSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1Element, EqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVector, EqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualSimple, ClContextControlFixture, EqualSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualBroadcast1Element, ClContextControlFixture, EqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualBroadcast1dVector, ClContextControlFixture, EqualBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimpleFloat16,            EqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1ElementFloat16, EqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVectorFloat16, EqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualSimpleFloat16, ClContextControlFixture, EqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    EqualBroadcast1ElementFloat16, ClContextControlFixture, EqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    EqualBroadcast1dVectorFloat16, ClContextControlFixture, EqualBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualSimpleUint8,            EqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1ElementUint8, EqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(EqualBroadcast1dVectorUint8, EqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualSimpleUint8,  ClContextControlFixture, EqualSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualBroadcast1ElementUint8, ClContextControlFixture, EqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(EqualBroadcast1dVectorUint8, ClContextControlFixture, EqualBroadcast1dVectorUint8Test)
 
 // Greater
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimple,            GreaterSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1Element, GreaterBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVector, GreaterBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterSimple, ClContextControlFixture, GreaterSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterBroadcast1Element, ClContextControlFixture, GreaterBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterBroadcast1dVector, ClContextControlFixture, GreaterBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimpleFloat16,            GreaterSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1ElementFloat16, GreaterBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVectorFloat16, GreaterBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterSimpleFloat16, ClContextControlFixture, GreaterSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterBroadcast1ElementFloat16, ClContextControlFixture, GreaterBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterBroadcast1dVectorFloat16, ClContextControlFixture, GreaterBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterSimpleUint8,            GreaterSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1ElementUint8, GreaterBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterBroadcast1dVectorUint8, GreaterBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterSimpleUint8, ClContextControlFixture, GreaterSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterBroadcast1ElementUint8, ClContextControlFixture, GreaterBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterBroadcast1dVectorUint8, ClContextControlFixture, GreaterBroadcast1dVectorUint8Test)
 
 // GreaterOrEqual
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimple,            GreaterOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1Element, GreaterOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVector, GreaterOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterOrEqualSimple, ClContextControlFixture, GreaterOrEqualSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1Element, ClContextControlFixture, GreaterOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1dVector, ClContextControlFixture, GreaterOrEqualBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimpleFloat16,            GreaterOrEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1ElementFloat16, GreaterOrEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVectorFloat16, GreaterOrEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualSimpleFloat16, ClContextControlFixture, GreaterOrEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1ElementFloat16, ClContextControlFixture, GreaterOrEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1dVectorFloat16, ClContextControlFixture, GreaterOrEqualBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualSimpleUint8,            GreaterOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1ElementUint8, GreaterOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(GreaterOrEqualBroadcast1dVectorUint8, GreaterOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(GreaterOrEqualSimpleUint8, ClContextControlFixture, GreaterOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1ElementUint8, ClContextControlFixture, GreaterOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    GreaterOrEqualBroadcast1dVectorUint8, ClContextControlFixture, GreaterOrEqualBroadcast1dVectorUint8Test)
 
 // Less
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimple,            LessSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1Element, LessBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVector, LessBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessSimple, ClContextControlFixture, LessSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessBroadcast1Element, ClContextControlFixture, LessBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessBroadcast1dVector, ClContextControlFixture, LessBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimpleFloat16,            LessSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1ElementFloat16, LessBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVectorFloat16, LessBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessSimpleFloat16, ClContextControlFixture, LessSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessBroadcast1ElementFloat16, ClContextControlFixture, LessBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessBroadcast1dVectorFloat16, ClContextControlFixture, LessBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessSimpleUint8,            LessSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1ElementUint8, LessBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessBroadcast1dVectorUint8, LessBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessSimpleUint8, ClContextControlFixture, LessSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessBroadcast1ElementUint8, ClContextControlFixture, LessBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessBroadcast1dVectorUint8, ClContextControlFixture, LessBroadcast1dVectorUint8Test)
 
 // LessOrEqual
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimple,            LessOrEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1Element, LessOrEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVector, LessOrEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessOrEqualSimple, ClContextControlFixture, LessOrEqualSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1Element, ClContextControlFixture, LessOrEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1dVector, ClContextControlFixture, LessOrEqualBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimpleFloat16,            LessOrEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1ElementFloat16, LessOrEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVectorFloat16, LessOrEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessOrEqualSimpleFloat16, ClContextControlFixture, LessOrEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1ElementFloat16, ClContextControlFixture, LessOrEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1dVectorFloat16, ClContextControlFixture, LessOrEqualBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualSimpleUint8,            LessOrEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1ElementUint8, LessOrEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LessOrEqualBroadcast1dVectorUint8, LessOrEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LessOrEqualSimpleUint8, ClContextControlFixture, LessOrEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1ElementUint8, ClContextControlFixture, LessOrEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    LessOrEqualBroadcast1dVectorUint8, ClContextControlFixture, LessOrEqualBroadcast1dVectorUint8Test)
 
 // NotEqual
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimple,            NotEqualSimpleTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1Element, NotEqualBroadcast1ElementTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVector, NotEqualBroadcast1dVectorTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualSimple, ClContextControlFixture, NotEqualSimpleTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualBroadcast1Element, ClContextControlFixture, NotEqualBroadcast1ElementTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualBroadcast1dVector, ClContextControlFixture, NotEqualBroadcast1dVectorTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimpleFloat16,            NotEqualSimpleFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1ElementFloat16, NotEqualBroadcast1ElementFloat16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVectorFloat16, NotEqualBroadcast1dVectorFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualSimpleFloat16, ClContextControlFixture, NotEqualSimpleFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    NotEqualBroadcast1ElementFloat16, ClContextControlFixture, NotEqualBroadcast1ElementFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    NotEqualBroadcast1dVectorFloat16, ClContextControlFixture, NotEqualBroadcast1dVectorFloat16Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualSimpleUint8,            NotEqualSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1ElementUint8, NotEqualBroadcast1ElementUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NotEqualBroadcast1dVectorUint8, NotEqualBroadcast1dVectorUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NotEqualSimpleUint8, ClContextControlFixture, NotEqualSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    NotEqualBroadcast1ElementUint8, ClContextControlFixture, NotEqualBroadcast1ElementUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    NotEqualBroadcast1dVectorUint8, ClContextControlFixture, NotEqualBroadcast1dVectorUint8Test)
 
 // Softmax
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta1, SimpleSoftmaxTest, 1.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta2, SimpleSoftmaxTest, 2.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta1Uint8, SimpleSoftmaxUint8Test, 1.0f)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleSoftmaxBeta2Uint8, SimpleSoftmaxUint8Test, 2.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSoftmaxBeta1, ClContextControlFixture, SimpleSoftmaxTest, 1.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSoftmaxBeta2, ClContextControlFixture, SimpleSoftmaxTest, 2.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSoftmaxBeta1Uint8, ClContextControlFixture, SimpleSoftmaxUint8Test, 1.0f)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleSoftmaxBeta2Uint8, ClContextControlFixture, SimpleSoftmaxUint8Test, 2.0f)
 
 // LogSoftmax
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogSoftmaxFloat32_1, LogSoftmaxTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogSoftmaxFloat32_1, ClContextControlFixture, LogSoftmaxTest1<DataType::Float32>)
 
 // Space To Batch Nd
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleFloat32, SpaceToBatchNdSimpleFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsFloat32, SpaceToBatchNdMultiChannelsFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockFloat32, SpaceToBatchNdMultiBlockFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingFloat32, SpaceToBatchNdPaddingFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToBatchNdSimpleFloat32, ClContextControlFixture, SpaceToBatchNdSimpleFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiChannelsFloat32, ClContextControlFixture, SpaceToBatchNdMultiChannelsFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiBlockFloat32, ClContextControlFixture, SpaceToBatchNdMultiBlockFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdPaddingFloat32, ClContextControlFixture, SpaceToBatchNdPaddingFloat32Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleUint8, SpaceToBatchNdSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsUint8, SpaceToBatchNdMultiChannelsUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockUint8, SpaceToBatchNdMultiBlockUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingUint8, SpaceToBatchNdPaddingUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToBatchNdSimpleUint8, ClContextControlFixture, SpaceToBatchNdSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiChannelsUint8, ClContextControlFixture, SpaceToBatchNdMultiChannelsUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiBlockUint8, ClContextControlFixture, SpaceToBatchNdMultiBlockUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdPaddingUint8, ClContextControlFixture, SpaceToBatchNdPaddingUint8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwcFloat32, SpaceToBatchNdSimpleNhwcFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsNhwcFloat32, SpaceToBatchNdMultiChannelsNhwcFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockNhwcFloat32, SpaceToBatchNdMultiBlockNhwcFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingNhwcFloat32, SpaceToBatchNdPaddingNhwcFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdSimpleNhwcFloat32, ClContextControlFixture, SpaceToBatchNdSimpleNhwcFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiChannelsNhwcFloat32, ClContextControlFixture, SpaceToBatchNdMultiChannelsNhwcFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiBlockNhwcFloat32, ClContextControlFixture, SpaceToBatchNdMultiBlockNhwcFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdPaddingNhwcFloat32, ClContextControlFixture, SpaceToBatchNdPaddingNhwcFloat32Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdSimpleNhwcUint8, SpaceToBatchNdSimpleNhwcUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiChannelsNhwcUint8, SpaceToBatchNdMultiChannelsNhwcUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdMultiBlockNhwcUint8, SpaceToBatchNdMultiBlockNhwcUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToBatchNdPaddingNhwcUint8, SpaceToBatchNdPaddingNhwcUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdSimpleNhwcUint8, ClContextControlFixture, SpaceToBatchNdSimpleNhwcUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiChannelsNhwcUint8, ClContextControlFixture, SpaceToBatchNdMultiChannelsNhwcUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdMultiBlockNhwcUint8, ClContextControlFixture, SpaceToBatchNdMultiBlockNhwcUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SpaceToBatchNdPaddingNhwcUint8, ClContextControlFixture, SpaceToBatchNdPaddingNhwcUint8Test)
 
 // Space To Depth
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNhwcAsymmQ8, SpaceToDepthNhwcAsymmQ8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchwAsymmQ8, SpaceToDepthNchwAsymmQ8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNhwcAsymmQ8, ClContextControlFixture, SpaceToDepthNhwcAsymmQ8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNchwAsymmQ8, ClContextControlFixture, SpaceToDepthNchwAsymmQ8Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNhwx1Float32, SpaceToDepthNhwcFloat32Test1)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchw1Float32, SpaceToDepthNchwFloat32Test1)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNhwx1Float32, ClContextControlFixture, SpaceToDepthNhwcFloat32Test1)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNchw1Float32, ClContextControlFixture, SpaceToDepthNchwFloat32Test1)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNhwc2Float32, SpaceToDepthNhwcFloat32Test2)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchw2Float32, SpaceToDepthNchwFloat32Test2)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNhwc2Float32, ClContextControlFixture, SpaceToDepthNhwcFloat32Test2)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNchw2Float32, ClContextControlFixture, SpaceToDepthNchwFloat32Test2)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNhwcQSymm16, SpaceToDepthNhwcQSymm16Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SpaceToDepthNchwQSymm16, SpaceToDepthNchwQSymm16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNhwcQSymm16, ClContextControlFixture, SpaceToDepthNhwcQSymm16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SpaceToDepthNchwQSymm16, ClContextControlFixture, SpaceToDepthNchwQSymm16Test)
 
 // Stack
-ARMNN_AUTO_TEST_CASE_WITH_THF(Stack0Axis,           StackAxis0Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput4DAxis1,   StackOutput4DAxis1Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput4DAxis2,   StackOutput4DAxis2Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput4DAxis3,   StackOutput4DAxis3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput3DInputs3, StackOutput3DInputs3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackOutput5D,        StackOutput5DFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StackFloat16,         StackFloat16Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Stack0Axis, ClContextControlFixture, StackAxis0Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput4DAxis1, ClContextControlFixture, StackOutput4DAxis1Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput4DAxis2, ClContextControlFixture, StackOutput4DAxis2Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput4DAxis3, ClContextControlFixture, StackOutput4DAxis3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput3DInputs3, ClContextControlFixture, StackOutput3DInputs3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackOutput5D, ClContextControlFixture, StackOutput5DFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StackFloat16, ClContextControlFixture, StackFloat16Test)
 
 // Slice
-ARMNN_AUTO_TEST_CASE(Slice4dFloat32, Slice4dFloat32Test)
-ARMNN_AUTO_TEST_CASE(Slice3dFloat32, Slice3dFloat32Test)
-ARMNN_AUTO_TEST_CASE(Slice2dFloat32, Slice2dFloat32Test)
-ARMNN_AUTO_TEST_CASE(Slice1dFloat32, Slice1dFloat32Test)
-ARMNN_AUTO_TEST_CASE(Slice4dUint8, Slice4dUint8Test)
-ARMNN_AUTO_TEST_CASE(Slice3dUint8, Slice3dUint8Test)
-ARMNN_AUTO_TEST_CASE(Slice2dUint8, Slice2dUint8Test)
-ARMNN_AUTO_TEST_CASE(Slice1dUint8, Slice1dUint8Test)
-ARMNN_AUTO_TEST_CASE(Slice4dInt16, Slice4dInt16Test)
-ARMNN_AUTO_TEST_CASE(Slice3dInt16, Slice3dInt16Test)
-ARMNN_AUTO_TEST_CASE(Slice2dInt16, Slice2dInt16Test)
-ARMNN_AUTO_TEST_CASE(Slice1dInt16, Slice1dInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice4dFloat32, ClContextControlFixture, Slice4dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice3dFloat32, ClContextControlFixture, Slice3dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice2dFloat32, ClContextControlFixture, Slice2dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice1dFloat32, ClContextControlFixture, Slice1dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice4dUint8, ClContextControlFixture, Slice4dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice3dUint8, ClContextControlFixture, Slice3dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice2dUint8, ClContextControlFixture, Slice2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice1dUint8, ClContextControlFixture, Slice1dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice4dInt16, ClContextControlFixture, Slice4dInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice3dInt16, ClContextControlFixture, Slice3dInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice2dInt16, ClContextControlFixture, Slice2dInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(Slice1dInt16, ClContextControlFixture, Slice1dInt16Test)
 
 // Strided Slice
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dFloat32, StridedSlice4dFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dReverseFloat32, StridedSlice4dReverseFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceSimpleStrideFloat32, StridedSliceSimpleStrideFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceSimpleRangeMaskFloat32, StridedSliceSimpleRangeMaskFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceShrinkAxisMaskFloat32, StridedSliceShrinkAxisMaskFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceShrinkAxisMaskCTSFloat32, StridedSliceShrinkAxisMaskCTSFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Dim3Float32,
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice4dFloat32, ClContextControlFixture, StridedSlice4dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice4dReverseFloat32, ClContextControlFixture, StridedSlice4dReverseFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceSimpleStrideFloat32, ClContextControlFixture, StridedSliceSimpleStrideFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceSimpleRangeMaskFloat32, ClContextControlFixture, StridedSliceSimpleRangeMaskFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceShrinkAxisMaskFloat32, ClContextControlFixture, StridedSliceShrinkAxisMaskFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceShrinkAxisMaskCTSFloat32, ClContextControlFixture, StridedSliceShrinkAxisMaskCTSFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Dim3Float32, ClContextControlFixture,
                      StridedSliceShrinkAxisMaskBitPosition0Dim3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0Float32, StridedSliceShrinkAxisMaskBitPosition0Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition1Float32, StridedSliceShrinkAxisMaskBitPosition1Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition2Float32, StridedSliceShrinkAxisMaskBitPosition2Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition3Float32, StridedSliceShrinkAxisMaskBitPosition3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And1Float32, StridedSliceShrinkAxisMaskBitPosition0And1Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And2Float32, StridedSliceShrinkAxisMaskBitPosition0And2Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And3Float32, StridedSliceShrinkAxisMaskBitPosition0And3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And1And3Float32, StridedSliceShrinkAxisMaskBitPosition0And1And3Float32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dFloat32, StridedSlice3dFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dReverseFloat32, StridedSlice3dReverseFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dFloat32, StridedSlice2dFloat32Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dReverseFloat32, StridedSlice2dReverseFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition1Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition1Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition2Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition2Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition3Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And1Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And1Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And2Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And2Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And3Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And1And3Float32,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And1And3Float32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice3dFloat32,
+                                 ClContextControlFixture,
+                                 StridedSlice3dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice3dReverseFloat32, ClContextControlFixture, StridedSlice3dReverseFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice2dFloat32, ClContextControlFixture, StridedSlice2dFloat32Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice2dReverseFloat32, ClContextControlFixture, StridedSlice2dReverseFloat32Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dUint8, StridedSlice4dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice4dReverseUint8, StridedSlice4dReverseUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceSimpleStrideUint8, StridedSliceSimpleStrideUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceSimpleRangeMaskUint8, StridedSliceSimpleRangeMaskUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSliceShrinkAxisMaskUint8, StridedSliceShrinkAxisMaskUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8, StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0Uint8, StridedSliceShrinkAxisMaskBitPosition0Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition1Uint8, StridedSliceShrinkAxisMaskBitPosition1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition2Uint8, StridedSliceShrinkAxisMaskBitPosition2Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition3Uint8, StridedSliceShrinkAxisMaskBitPosition3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And1Uint8, StridedSliceShrinkAxisMaskBitPosition0And1Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And2Uint8, StridedSliceShrinkAxisMaskBitPosition0And2Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And3Uint8, StridedSliceShrinkAxisMaskBitPosition0And3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(
-    StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8, StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dUint8, StridedSlice3dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice3dReverseUint8, StridedSlice3dReverseUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dUint8, StridedSlice2dUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedSlice2dReverseUint8, StridedSlice2dReverseUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice4dUint8, ClContextControlFixture, StridedSlice4dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSlice4dReverseUint8, ClContextControlFixture, StridedSlice4dReverseUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceSimpleStrideUint8, ClContextControlFixture, StridedSliceSimpleStrideUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceSimpleRangeMaskUint8, ClContextControlFixture, StridedSliceSimpleRangeMaskUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    StridedSliceShrinkAxisMaskUint8, ClContextControlFixture, StridedSliceShrinkAxisMaskUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0Dim3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition1Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition2Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition2Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition3Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And1Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And1Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And2Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And2Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And3Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8,
+                                 ClContextControlFixture,
+                                 StridedSliceShrinkAxisMaskBitPosition0And1And3Uint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice3dUint8, ClContextControlFixture, StridedSlice3dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice3dReverseUint8, ClContextControlFixture, StridedSlice3dReverseUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice2dUint8, ClContextControlFixture, StridedSlice2dUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedSlice2dReverseUint8, ClContextControlFixture, StridedSlice2dReverseUint8Test)
 
 // Resize Bilinear - NCHW
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinear,
-                              SimpleResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearInt8,
-                              SimpleResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearUint8,
-                              SimpleResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNop,
-                              ResizeBilinearNopTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopInt8,
-                              ResizeBilinearNopTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopUint8,
-                              ResizeBilinearNopTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMin,
-                              ResizeBilinearSqMinTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinInt8,
-                              ResizeBilinearSqMinTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinUint8,
-                              ResizeBilinearSqMinTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMin,
-                              ResizeBilinearMinTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinInt8,
-                              ResizeBilinearMinTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinUint8,
-                              ResizeBilinearMinTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinear,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearInt8,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearUint8,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNop,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopInt8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopUint8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMin,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinInt8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinUint8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMin,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinInt8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinUint8,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinear,
-                              HalfPixelCentersResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinear,
-                              AlignCornersResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearInt8,
-                              HalfPixelCentersResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearInt8,
-                              AlignCornersResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearUint8,
-                              HalfPixelCentersResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearUint8,
-                              AlignCornersResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinear,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinear,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearInt8,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearInt8,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearUint8,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearUint8,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
 
 // Resize Bilinear - NHWC
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopNhwc,
-                              ResizeBilinearNopTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopInt8Nhwc,
-                              ResizeBilinearNopTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearNopUint8Nhwc,
-                              ResizeBilinearNopTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearNhwc,
-                              SimpleResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearInt8Nhwc,
-                              SimpleResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeBilinearUint8Nhwc,
-                              SimpleResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinNhwc,
-                              ResizeBilinearSqMinTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinInt8Nhwc,
-                              ResizeBilinearSqMinTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearSqMinUint8Nhwc,
-                              ResizeBilinearSqMinTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinNhwc,
-                              ResizeBilinearMinTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinInt8Nhwc,
-                              ResizeBilinearMinTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeBilinearMinUint8Nhwc,
-                              ResizeBilinearMinTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopNhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearNopUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearNopTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearNhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearInt8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeBilinearUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinNhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearSqMinUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearSqMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinNhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeBilinearMinUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeBilinearMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearNhwc,
-                              HalfPixelCentersResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearNhwc,
-                              AlignCornersResizeBilinearTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearInt8Nhwc,
-                              HalfPixelCentersResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearInt8Nhwc,
-                              AlignCornersResizeBilinearTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeBilinearUint8Nhwc,
-                              HalfPixelCentersResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeBilinearUint8Nhwc,
-                              AlignCornersResizeBilinearTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearNhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearNhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearInt8Nhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearInt8Nhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeBilinearUint8Nhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeBilinearUint8Nhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeBilinearTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
 
 // Resize NearestNeighbor - NCHW
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighbor,
-                              SimpleResizeNearestNeighborTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborInt8,
-                              SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborUint8,
-                              SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNop,
-                              ResizeNearestNeighborNopTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopInt8,
-                              ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopUint8,
-                              ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMin,
-                              ResizeNearestNeighborSqMinTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinInt8,
-                              ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinUint8,
-                              ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMin,
-                              ResizeNearestNeighborMinTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinInt8,
-                              ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinUint8,
-                              ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMag,
-                              ResizeNearestNeighborMagTest<DataType::Float32>,
-                              DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagInt8,
-                              ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagUint8,
-                              ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighbor,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborInt8,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborUint8,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNop,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopInt8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopUint8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMin,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinInt8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinUint8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMin,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinInt8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinUint8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMag,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::Float32>,
+                                 DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagInt8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagUint8,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW, 0.1f, 50, 0.1f, 50)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbour,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbour,
-                              AlignCornersResizeNearestNeighbourTest<DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourInt8,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourInt8,
-                              AlignCornersResizeNearestNeighbourTest<DataType::QAsymmS8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourUint8,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourUint8,
-                              AlignCornersResizeNearestNeighbourTest<DataType::QAsymmU8>,
-                              DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbour,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbour,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourInt8,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourInt8,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::QAsymmS8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourUint8,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourUint8,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::QAsymmU8>,
+                                 DataLayout::NCHW)
 
 // Resize NearestNeighbor - NHWC
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopNhwc,
-                              ResizeNearestNeighborNopTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopInt8Nhwc,
-                              ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborNopUint8Nhwc,
-                              ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborNhwc,
-                              SimpleResizeNearestNeighborTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborInt8Nhwc,
-                              SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleResizeNearestNeighborUint8Nhwc,
-                              SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinNhwc,
-                              ResizeNearestNeighborSqMinTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinInt8Nhwc,
-                              ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborSqMinUint8Nhwc,
-                              ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinNhwc,
-                              ResizeNearestNeighborMinTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinInt8Nhwc,
-                              ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMinUint8Nhwc,
-                              ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagNhwc,
-                              ResizeNearestNeighborMagTest<DataType::Float32>,
-                              DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagInt8Nhwc,
-                              ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ResizeNearestNeighborMagUint8Nhwc,
-                              ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopNhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborNopUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborNopTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborNhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborInt8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleResizeNearestNeighborUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleResizeNearestNeighborTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinNhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborSqMinUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborSqMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinNhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMinUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMinTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagNhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::Float32>,
+                                 DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagInt8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ResizeNearestNeighborMagUint8Nhwc,
+                                 ClContextControlFixture,
+                                 ResizeNearestNeighborMagTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC, 0.1f, 50, 0.1f, 50)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourNhwc,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourNhwc,
-                              AlignCornersResizeNearestNeighbourTest<DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourInt8Nhwc,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourInt8Nhwc,
-                              AlignCornersResizeNearestNeighbourTest<DataType::QAsymmS8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(HalfPixelCentersResizeNearestNeighbourUint8Nhwc,
-                              HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(AlignCornersResizeNearestNeighbourUint8Nhwc,
-                              AlignCornersResizeNearestNeighbourTest<DataType::QAsymmU8>,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourNhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourNhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourInt8Nhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourInt8Nhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::QAsymmS8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(HalfPixelCentersResizeNearestNeighbourUint8Nhwc,
+                                 ClContextControlFixture,
+                                 HalfPixelCentersResizeNearestNeighbourTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AlignCornersResizeNearestNeighbourUint8Nhwc,
+                                 ClContextControlFixture,
+                                 AlignCornersResizeNearestNeighbourTest<DataType::QAsymmU8>,
+                                 DataLayout::NHWC)
 
 // Rsqrt
-ARMNN_AUTO_TEST_CASE_WITH_THF(Rsqrt2d, Rsqrt2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Rsqrt3d, Rsqrt3dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RsqrtZero, RsqrtZeroTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(RsqrtNegative, RsqrtNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Rsqrt2d, ClContextControlFixture, Rsqrt2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Rsqrt3d, ClContextControlFixture, Rsqrt3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RsqrtZero, ClContextControlFixture, RsqrtZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(RsqrtNegative, ClContextControlFixture, RsqrtNegativeTest<DataType::Float32>)
 
 // Quantize
-ARMNN_AUTO_TEST_CASE_WITH_THF(QuantizeSimpleUint8, QuantizeSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(QuantizeClampUint8, QuantizeClampUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QuantizeSimpleUint8, ClContextControlFixture, QuantizeSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(QuantizeClampUint8, ClContextControlFixture, QuantizeClampUint8Test)
 
 // Dequantize
-ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8, DequantizeSimpleUint8Test)
-ARMNN_AUTO_TEST_CASE(DequantizeOffsetUint8, DequantizeOffsetUint8Test)
-ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16, DequantizeSimpleInt16Test)
-ARMNN_AUTO_TEST_CASE(DequantizeSimpleUint8ToFp16, DequantizeSimpleUint8ToFp16Test)
-ARMNN_AUTO_TEST_CASE(DequantizeSimpleInt16ToFp16, DequantizeSimpleInt16ToFp16Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeSimpleUint8, ClContextControlFixture, DequantizeSimpleUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeOffsetUint8, ClContextControlFixture, DequantizeOffsetUint8Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeSimpleInt16, ClContextControlFixture, DequantizeSimpleInt16Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeSimpleUint8ToFp16, ClContextControlFixture, DequantizeSimpleUint8ToFp16Test)
+ARMNN_AUTO_TEST_FIXTURE(DequantizeSimpleInt16ToFp16, ClContextControlFixture, DequantizeSimpleInt16ToFp16Test)
 
 // Transpose
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeFloat32, SimpleTransposeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeFloat32ValueSet1Test, TransposeValueSet1Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeFloat32ValueSet2Test, TransposeValueSet2Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeFloat32ValueSet3Test, TransposeValueSet3Test<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeQASymmS8, SimpleTransposeTest<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymmS8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymmS8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymmS8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmS8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeQASymm8, SimpleTransposeTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymm8ValueSet1Test, TransposeValueSet1Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymm8ValueSet2Test, TransposeValueSet2Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQASymm8ValueSet3Test, TransposeValueSet3Test<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeQSymm16, SimpleTransposeTest<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQSymm16ValueSet1Test, TransposeValueSet1Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQSymm16ValueSet2Test, TransposeValueSet2Test<DataType::QSymmS16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(TransposeQSymm16ValueSet3Test, TransposeValueSet3Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimpleTransposeFloat32, ClContextControlFixture, SimpleTransposeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeFloat32ValueSet1Test, ClContextControlFixture, TransposeValueSet1Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeFloat32ValueSet2Test, ClContextControlFixture, TransposeValueSet2Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeFloat32ValueSet3Test, ClContextControlFixture, TransposeValueSet3Test<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimpleTransposeQASymmS8, ClContextControlFixture, SimpleTransposeTest<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymmS8ValueSet1Test, ClContextControlFixture, TransposeValueSet1Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymmS8ValueSet2Test, ClContextControlFixture, TransposeValueSet2Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymmS8ValueSet3Test, ClContextControlFixture, TransposeValueSet3Test<DataType::QAsymmS8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimpleTransposeQASymm8, ClContextControlFixture, SimpleTransposeTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymm8ValueSet1Test, ClContextControlFixture, TransposeValueSet1Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymm8ValueSet2Test, ClContextControlFixture, TransposeValueSet2Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQASymm8ValueSet3Test, ClContextControlFixture, TransposeValueSet3Test<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    SimpleTransposeQSymm16, ClContextControlFixture, SimpleTransposeTest<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQSymm16ValueSet1Test, ClContextControlFixture, TransposeValueSet1Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQSymm16ValueSet2Test, ClContextControlFixture, TransposeValueSet2Test<DataType::QSymmS16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    TransposeQSymm16ValueSet3Test, ClContextControlFixture, TransposeValueSet3Test<DataType::QSymmS16>)
 
 // TransposeConvolution2d
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeConvolution2dFloatNchw,
-                              SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeConvolution2dFloatNhwc,
-                              SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeConvolution2dUint8Nchw,
-                              SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(SimpleTransposeConvolution2dUint8Nhwc,
-                              SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(SimpleTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedSimpleTransposeConvolution2dFloatNchw,
-                              SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              false,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
-                              SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
-                              SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
-                              SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedSimpleTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedSimpleTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedSimpleTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedSimpleTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 SimpleTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(PaddedTransposeConvolution2dFloatNchw,
-                              PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PaddedTransposeConvolution2dFloatNhwc,
-                              PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PaddedTransposeConvolution2dUint8Nchw,
-                              PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(PaddedTransposeConvolution2dUint8Nhwc,
-                              PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PaddedTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PaddedTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PaddedTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(PaddedTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedPaddedTransposeConvolution2dFloatNchw,
-                              PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              false,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
-                              PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
-                              PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
-                              PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedPaddedTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedPaddedTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedPaddedTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedPaddedTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 PaddedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedTransposeConvolution2dFloatNchw,
-                              StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedTransposeConvolution2dFloatNhwc,
-                              StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedTransposeConvolution2dUint8Nchw,
-                              StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(StridedTransposeConvolution2dUint8Nhwc,
-                              StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(StridedTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedStridedTransposeConvolution2dFloatNchw,
-                              StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              false,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedStridedTransposeConvolution2dFloatNhwc,
-                              StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              true,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedStridedTransposeConvolution2dUint8Nchw,
-                              StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
-                              StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              true,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedStridedTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 false,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedStridedTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 true,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedStridedTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(UnbiasedStridedTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 StridedTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 true,
+                                 DataLayout::NHWC)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiChannelTransposeConvolution2dFloatNchw,
-                              MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiChannelTransposeConvolution2dFloatNhwc,
-                              MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
-                              DataLayout::NHWC)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiChannelTransposeConvolution2dUint8Nchw,
-                              MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              DataLayout::NCHW)
-ARMNN_AUTO_TEST_CASE_WITH_THF(MultiChannelTransposeConvolution2dUint8Nhwc,
-                              MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
-                              DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiChannelTransposeConvolution2dFloatNchw,
+                                 ClContextControlFixture,
+                                 MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiChannelTransposeConvolution2dFloatNhwc,
+                                 ClContextControlFixture,
+                                 MultiChannelTransposeConvolution2dTest<DataType::Float32, DataType::Float32>,
+                                 DataLayout::NHWC)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiChannelTransposeConvolution2dUint8Nchw,
+                                 ClContextControlFixture,
+                                 MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 DataLayout::NCHW)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(MultiChannelTransposeConvolution2dUint8Nhwc,
+                                 ClContextControlFixture,
+                                 MultiChannelTransposeConvolution2dTest<DataType::QAsymmU8, DataType::Signed32>,
+                                 DataLayout::NHWC)
 
 // Abs
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs2d, Abs2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs3d, Abs3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs2d, ClContextControlFixture, Abs2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs3d, ClContextControlFixture, Abs3dTest<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(AbsZero, AbsZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AbsZero, ClContextControlFixture, AbsZeroTest<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs2dFloat16, Abs2dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Abs3dFloat16, Abs3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs2dFloat16, ClContextControlFixture, Abs2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Abs3dFloat16, ClContextControlFixture, Abs3dTest<DataType::Float16>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(AbsZeroFloat16, AbsZeroTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(AbsZeroFloat16, ClContextControlFixture, AbsZeroTest<DataType::Float16>)
 
 // ArgMinMax
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinFloat32, ArgMinSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxFloat32, ArgMaxSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinChannel, ArgMinChannelTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxChannel, ArgMaxChannelTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxHeight, ArgMaxHeightTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinWidth, ArgMinWidthTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinFloat32, ClContextControlFixture, ArgMinSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxFloat32, ClContextControlFixture, ArgMaxSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinChannel, ClContextControlFixture, ArgMinChannelTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxChannel, ClContextControlFixture, ArgMaxChannelTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxHeight, ClContextControlFixture, ArgMaxHeightTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinWidth, ClContextControlFixture, ArgMinWidthTest<DataType::Float32>)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinQAsymm8, ArgMinSimpleTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxQAsymm8, ArgMaxSimpleTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinChannelQAsymm8, ArgMinChannelTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxChannelQAsymm8, ArgMaxChannelTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMaxHeightQAsymm8, ArgMaxHeightTest<DataType::QAsymmU8>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ArgMinWidthQAsymm8, ArgMinWidthTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinQAsymm8, ClContextControlFixture, ArgMinSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxQAsymm8, ClContextControlFixture, ArgMaxSimpleTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinChannelQAsymm8, ClContextControlFixture, ArgMinChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxChannelQAsymm8, ClContextControlFixture, ArgMaxChannelTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMaxHeightQAsymm8, ClContextControlFixture, ArgMaxHeightTest<DataType::QAsymmU8>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ArgMinWidthQAsymm8, ClContextControlFixture, ArgMinWidthTest<DataType::QAsymmU8>)
 
 // Neg
-ARMNN_AUTO_TEST_CASE_WITH_THF(Neg2d, Neg2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Neg3d, Neg3dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NegZero, NegZeroTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(NegNegative, NegNegativeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Neg2dFloat16, Neg2dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Neg3dFloat16, Neg3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Neg2d, ClContextControlFixture, Neg2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Neg3d, ClContextControlFixture, Neg3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NegZero, ClContextControlFixture, NegZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(NegNegative, ClContextControlFixture, NegNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Neg2dFloat16, ClContextControlFixture, Neg2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Neg3dFloat16, ClContextControlFixture, Neg3dTest<DataType::Float16>)
 
 // Exp
-ARMNN_AUTO_TEST_CASE_WITH_THF(Exp2d, Exp2dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Exo3d, Exp3dTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ExpZero, ExpZeroTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ExpNegative, ExpNegativeTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Exp2dFloat16, Exp2dTest<DataType::Float16>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(Exp3dFloat16, Exp3dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp2d, ClContextControlFixture, Exp2dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exo3d, ClContextControlFixture, Exp3dTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ExpZero, ClContextControlFixture, ExpZeroTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ExpNegative, ClContextControlFixture, ExpNegativeTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp2dFloat16, ClContextControlFixture, Exp2dTest<DataType::Float16>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(Exp3dFloat16, ClContextControlFixture, Exp3dTest<DataType::Float16>)
 
 // Logical
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNot, LogicalNotTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalNotInt, LogicalNotIntTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalNot, ClContextControlFixture, LogicalNotTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalNotInt, ClContextControlFixture, LogicalNotIntTest)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAnd, LogicalAndTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndInt, LogicalAndIntTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast1, LogicalAndBroadcast1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast2, LogicalAndBroadcast2Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalAndBroadcast3, LogicalAndBroadcast3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAnd, ClContextControlFixture, LogicalAndTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAndInt, ClContextControlFixture, LogicalAndIntTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAndBroadcast1, ClContextControlFixture, LogicalAndBroadcast1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAndBroadcast2, ClContextControlFixture, LogicalAndBroadcast2Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalAndBroadcast3, ClContextControlFixture, LogicalAndBroadcast3Test)
 
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOr, LogicalOrTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrInt, LogicalOrIntTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast1, LogicalOrBroadcast1Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast2, LogicalOrBroadcast2Test)
-ARMNN_AUTO_TEST_CASE_WITH_THF(LogicalOrBroadcast3, LogicalOrBroadcast3Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOr, ClContextControlFixture, LogicalOrTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOrInt, ClContextControlFixture, LogicalOrIntTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOrBroadcast1, ClContextControlFixture, LogicalOrBroadcast1Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOrBroadcast2, ClContextControlFixture, LogicalOrBroadcast2Test)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(LogicalOrBroadcast3, ClContextControlFixture, LogicalOrBroadcast3Test)
 
 // ReduceSum
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumFloat32, ReduceSumSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_1, ReduceSumSingleAxisTest1<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_2, ReduceSumSingleAxisTest2<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceSumSingleAxisFloat32_3, ReduceSumSingleAxisTest3<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceSumFloat32, ClContextControlFixture, ReduceSumSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceSumSingleAxisFloat32_1, ClContextControlFixture, ReduceSumSingleAxisTest1<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceSumSingleAxisFloat32_2, ClContextControlFixture, ReduceSumSingleAxisTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceSumSingleAxisFloat32_3, ClContextControlFixture, ReduceSumSingleAxisTest3<DataType::Float32>)
 
 // ReduceMax
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMaxFloat32, ReduceMaxSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMaxNegativeAxisFloat32, ReduceMaxNegativeAxisTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMax2Float32, ReduceMaxSimpleTest2<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceMaxFloat32, ClContextControlFixture, ReduceMaxSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceMaxNegativeAxisFloat32, ClContextControlFixture, ReduceMaxNegativeAxisTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceMax2Float32, ClContextControlFixture, ReduceMaxSimpleTest2<DataType::Float32>)
 
 // ReduceMin
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinFloat32, ReduceMinSimpleTest<DataType::Float32>)
-ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinNegativeAxisFloat32, ReduceMinNegativeAxisTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(ReduceMinFloat32, ClContextControlFixture, ReduceMinSimpleTest<DataType::Float32>)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(
+    ReduceMinNegativeAxisFloat32, ClContextControlFixture, ReduceMinNegativeAxisTest<DataType::Float32>)
 
 // Cast
-ARMNN_AUTO_TEST_CASE_WITH_THF(CastInt32ToFloat, CastInt32ToFloat2dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloat16ToFloat32, CastFloat16ToFloat322dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToFloat16, CastFloat32ToFloat162dTest)
-ARMNN_AUTO_TEST_CASE_WITH_THF(CastFloatToUInt8, CastFloat32ToUInt82dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastInt32ToFloat, ClContextControlFixture, CastInt32ToFloat2dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloat16ToFloat32, ClContextControlFixture, CastFloat16ToFloat322dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloatToFloat16, ClContextControlFixture, CastFloat32ToFloat162dTest)
+ARMNN_AUTO_TEST_FIXTURE_WITH_THF(CastFloatToUInt8, ClContextControlFixture, CastFloat32ToUInt82dTest)
 
 #if defined(ARMNNREF_ENABLED)
 
+TEST_CASE_FIXTURE(ClContextControlFixture, "ClContextControlFixture") {}
+
 // The ARMNN_COMPARE_REF_AUTO_TEST_CASE and the ARMNN_COMPARE_REF_FIXTURE_TEST_CASE test units are not available
 // if the reference backend is not built
 
@@ -1386,4 +1990,4 @@
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClMemCopyTests.cpp b/src/backends/cl/test/ClMemCopyTests.cpp
index 1048e73..98b873f 100644
--- a/src/backends/cl/test/ClMemCopyTests.cpp
+++ b/src/backends/cl/test/ClMemCopyTests.cpp
@@ -11,44 +11,44 @@
 #include <reference/RefWorkloadFactory.hpp>
 #include <reference/test/RefWorkloadFactoryHelper.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(ClMemCopy)
-
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpu)
+TEST_SUITE("ClMemCopy")
+{
+TEST_CASE("CopyBetweenCpuAndGpu")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpu)
+TEST_CASE("CopyBetweenGpuAndCpu")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpuWithSubtensors)
+TEST_CASE("CopyBetweenCpuAndGpuWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpuWithSubtensors)
+TEST_CASE("CopyBetweenGpuAndCpuWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/ClOptimizedNetworkTests.cpp b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
index a41c5f8..d91e9b4 100644
--- a/src/backends/cl/test/ClOptimizedNetworkTests.cpp
+++ b/src/backends/cl/test/ClOptimizedNetworkTests.cpp
@@ -14,12 +14,11 @@
 
 #include <Filesystem.hpp>
 
+#include <doctest/doctest.h>
 
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(ClOptimizedNetwork)
-
-BOOST_AUTO_TEST_CASE(OptimizeValidateGpuDeviceSupportLayerNoFallback)
+TEST_SUITE("ClOptimizedNetwork")
+{
+TEST_CASE("OptimizeValidateGpuDeviceSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -35,7 +34,7 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
     // validate workloads
     armnn::ClWorkloadFactory fact =
         ClWorkloadFactoryHelper::GetFactory(ClWorkloadFactoryHelper::GetMemoryManager());
@@ -43,13 +42,13 @@
     const armnn::Graph& theGraph = GetGraphForTesting(optNet.get());
     for (auto&& layer : theGraph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
-        BOOST_CHECK_NO_THROW(
+        CHECK(layer->GetBackendId() == armnn::Compute::GpuAcc);
+        CHECK_NOTHROW(
             layer->CreateWorkload(fact));
     }
 }
 
-BOOST_AUTO_TEST_CASE(FP16TurboModeTestOnGpuAcc)
+TEST_CASE("FP16TurboModeTestOnGpuAcc")
 {
     // Test to check when Fp16 Turbo mode set
     // it converts the Fp32 network to Fp16 Network
@@ -96,17 +95,17 @@
     const armnn::Graph& graph = GetGraphForTesting(optimizedNet.get());
 
     // Tests that all layers are present in the graph.
-    BOOST_TEST(graph.GetNumLayers() == 5);
+    CHECK(graph.GetNumLayers() == 5);
 
     // Tests that the vertices exist and have correct names.
-    BOOST_TEST(GraphHasNamedLayer(graph, "input layer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp32_to_fp16-0-input layer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "activation layer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "convert_fp16_to_fp32-0-output layer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "output layer"));
+    CHECK(GraphHasNamedLayer(graph, "input layer"));
+    CHECK(GraphHasNamedLayer(graph, "convert_fp32_to_fp16-0-input layer"));
+    CHECK(GraphHasNamedLayer(graph, "activation layer"));
+    CHECK(GraphHasNamedLayer(graph, "convert_fp16_to_fp32-0-output layer"));
+    CHECK(GraphHasNamedLayer(graph, "output layer"));
 }
 
-BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnGpuAcc)
+TEST_CASE("FastMathEnabledTestOnGpuAcc")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -127,16 +126,16 @@
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
     *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
-    BOOST_CHECK(optimizedNet);
+    CHECK(optimizedNet);
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    BOOST_TEST(modelOptionsOut.size() == 1);
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+    CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckMLGOTuningFile)
+TEST_CASE("CheckMLGOTuningFile")
 {
     class ClBackendContextTestClass : public armnn::ClBackendContext
     {
@@ -202,7 +201,7 @@
     catch (std::exception &e)
     {
         std::cerr << "Unable to write to file at location [" << validFile.c_str() << "] : " << e.what() << std::endl;
-        BOOST_TEST(false);
+        CHECK(false);
     }
 
     armnn::IRuntime::CreationOptions creationOptions1;
@@ -216,7 +215,7 @@
 
     creationOptions1.m_BackendOptions.emplace_back(validOptions);
     ClBackendContextTestClass clBackendContext1(creationOptions1);
-    BOOST_TEST(clBackendContext1.call_reload_from_file());
+    CHECK(clBackendContext1.call_reload_from_file());
 
     armnn::BackendOptions invalidOptions
             {
@@ -229,7 +228,7 @@
     armnn::IRuntime::CreationOptions creationOptions2;
     creationOptions2.m_BackendOptions.emplace_back(invalidOptions);
     ClBackendContextTestClass clBackendContext2(creationOptions2);
-    BOOST_TEST(clBackendContext2.call_reload_from_file() == false);
+    CHECK(clBackendContext2.call_reload_from_file() == false);
 
     armnn::BackendOptions invalidPathOptions
             {
@@ -242,7 +241,7 @@
     armnn::IRuntime::CreationOptions creationOptions3;
     creationOptions3.m_BackendOptions.emplace_back(invalidPathOptions);
     ClBackendContextTestClass clBackendContext3(creationOptions3);
-    BOOST_TEST(clBackendContext3.call_reload_from_file() == false);
+    CHECK(clBackendContext3.call_reload_from_file() == false);
 }
 
-BOOST_AUTO_TEST_SUITE_END();
+}
diff --git a/src/backends/cl/test/ClRuntimeTests.cpp b/src/backends/cl/test/ClRuntimeTests.cpp
index 33e86b6..db01fa7 100644
--- a/src/backends/cl/test/ClRuntimeTests.cpp
+++ b/src/backends/cl/test/ClRuntimeTests.cpp
@@ -11,15 +11,15 @@
 #include <test/ProfilingTestUtils.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #ifdef WITH_VALGRIND
 #include <valgrind/memcheck.h>
 #endif
 
-BOOST_AUTO_TEST_SUITE(ClRuntime)
-
-BOOST_AUTO_TEST_CASE(RuntimeValidateGpuDeviceSupportLayerNoFallback)
+TEST_SUITE("ClRuntime")
+{
+TEST_CASE("RuntimeValidateGpuDeviceSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -35,17 +35,17 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::GpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Load it into the runtime. It should success.
     armnn::NetworkId netId;
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
 }
 
 #ifdef ARMNN_LEAK_CHECKING_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksGpuAcc)
+TEST_CASE("RuntimeMemoryLeaksGpuAcc")
 {
-    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+    CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
     armnn::IRuntime::CreationOptions options;
     armnn::RuntimeImpl runtime(options);
     armnn::RuntimeLoadedNetworksReserve(&runtime);
@@ -59,21 +59,21 @@
 
     {
         ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkGpuAcc");
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
         // In the second run we check for all remaining memory
         // in use after the network was unloaded. If there is any
         // then it will be treated as a memory leak.
         CreateAndDropDummyNetwork(backends, runtime);
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
-        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
-        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
     }
 }
 #endif
 
 // Note: this part of the code is due to be removed when we fully trust the gperftools based results.
 #if defined(WITH_VALGRIND)
-BOOST_AUTO_TEST_CASE(RuntimeMemoryUsage)
+TEST_CASE("RuntimeMemoryUsage")
 {
     // From documentation:
 
@@ -135,12 +135,12 @@
     VALGRIND_COUNT_LEAKS(leakedAfter, dubious, reachableAfter, suppressed);
 
     // If we're not running under Valgrind, these vars will have been initialised to 0, so this will always pass.
-    BOOST_TEST(leakedBefore == leakedAfter);
+    CHECK(leakedBefore == leakedAfter);
 
     // Add resonable threshold after and before running valgrind with the ACL clear cache function.
     // TODO Threshold set to 80k until the root cause of the memory leakage is found and fixed. Revert threshold
     // value to 1024 when fixed.
-    BOOST_TEST(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
+    CHECK(static_cast<long>(reachableAfter) - static_cast<long>(reachableBefore) < 81920);
 
     // These are needed because VALGRIND_COUNT_LEAKS is a macro that assigns to the parameters
     // so they are assigned to, but still considered unused, causing a warning.
@@ -149,9 +149,9 @@
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(ProfilingPostOptimisationStructureGpuAcc)
+TEST_CASE("ProfilingPostOptimisationStructureGpuAcc")
 {
     VerifyPostOptimisationStructureTestImpl(armnn::Compute::GpuAcc);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/Fp16SupportTest.cpp b/src/backends/cl/test/Fp16SupportTest.cpp
index 5afafcb..1974d4d 100644
--- a/src/backends/cl/test/Fp16SupportTest.cpp
+++ b/src/backends/cl/test/Fp16SupportTest.cpp
@@ -13,15 +13,15 @@
 #include <backendsCommon/TensorHandle.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <set>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(Fp16Support)
-
-BOOST_AUTO_TEST_CASE(Fp16DataTypeSupport)
+TEST_SUITE("Fp16Support")
+{
+TEST_CASE("Fp16DataTypeSupport")
 {
     Graph graph;
 
@@ -40,12 +40,12 @@
     inputLayer2->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
     additionLayer->GetOutputSlot().SetTensorInfo(fp16TensorInfo);
 
-    BOOST_CHECK(inputLayer1->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
-    BOOST_CHECK(inputLayer2->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
-    BOOST_CHECK(additionLayer->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+    CHECK(inputLayer1->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+    CHECK(inputLayer2->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
+    CHECK(additionLayer->GetOutputSlot(0).GetTensorInfo().GetDataType() == armnn::DataType::Float16);
 }
 
-BOOST_AUTO_TEST_CASE(Fp16AdditionTest)
+TEST_CASE("Fp16AdditionTest")
 {
    using namespace half_float::literal;
    // Create runtime in which test will run
@@ -104,7 +104,7 @@
    runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
    // Checks the results.
-   BOOST_TEST(outputData == std::vector<Half>({ 101.0_h, 202.0_h, 303.0_h, 404.0_h})); // Add
+   CHECK(outputData == std::vector<Half>({ 101.0_h, 202.0_h, 303.0_h, 404.0_h})); // Add
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/cl/test/OpenClTimerTest.cpp b/src/backends/cl/test/OpenClTimerTest.cpp
index 7c8e277..0da1db7 100644
--- a/src/backends/cl/test/OpenClTimerTest.cpp
+++ b/src/backends/cl/test/OpenClTimerTest.cpp
@@ -21,7 +21,7 @@
 
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <iostream>
 
@@ -38,11 +38,10 @@
     ClContextControl m_ClContextControl;
 };
 
-BOOST_FIXTURE_TEST_SUITE(OpenClTimerBatchNorm, OpenClFixture)
-using FactoryType = ClWorkloadFactory;
-
-BOOST_AUTO_TEST_CASE(OpenClTimerBatchNorm)
+TEST_CASE_FIXTURE(OpenClFixture, "OpenClTimerBatchNorm")
 {
+//using FactoryType = ClWorkloadFactory;
+
     auto memoryManager = ClWorkloadFactoryHelper::GetMemoryManager();
     ClWorkloadFactory workloadFactory = ClWorkloadFactoryHelper::GetFactory(memoryManager);
 
@@ -109,7 +108,7 @@
 
     OpenClTimer openClTimer;
 
-    BOOST_CHECK_EQUAL(openClTimer.GetName(), "OpenClKernelTimer");
+    CHECK_EQ(openClTimer.GetName(), "OpenClKernelTimer");
 
     //Start the timer
     openClTimer.Start();
@@ -120,15 +119,13 @@
     //Stop the timer
     openClTimer.Stop();
 
-    BOOST_CHECK_EQUAL(openClTimer.GetMeasurements().size(), 1);
+    CHECK_EQ(openClTimer.GetMeasurements().size(), 1);
 
-    BOOST_CHECK_EQUAL(openClTimer.GetMeasurements().front().m_Name,
+    CHECK_EQ(openClTimer.GetMeasurements().front().m_Name,
                       "OpenClKernelTimer/0: batchnormalization_layer_nchw GWS[1,3,2]");
 
-    BOOST_CHECK(openClTimer.GetMeasurements().front().m_Value > 0);
+    CHECK(openClTimer.GetMeasurements().front().m_Value > 0);
 
 }
 
-BOOST_AUTO_TEST_SUITE_END()
-
 #endif //aarch64 or x86_64
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index a8c0c8a..e3d73be 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -18,8 +18,10 @@
 #include <neon/workloads/NeonWorkloadUtils.hpp>
 #include <neon/workloads/NeonWorkloads.hpp>
 
-BOOST_AUTO_TEST_SUITE(CreateWorkloadNeon)
+#include <doctest/doctest.h>
 
+TEST_SUITE("CreateWorkloadNeon")
+{
 namespace
 {
 
@@ -77,18 +79,18 @@
     ActivationQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({1, 1}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 1}, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateActivationFloat16Workload)
+TEST_CASE("CreateActivationFloat16Workload")
 {
     NeonCreateActivationWorkloadTest<DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
+TEST_CASE("CreateActivationFloatWorkload")
 {
     NeonCreateActivationWorkloadTest<DataType::Float32>();
 }
@@ -109,13 +111,13 @@
     auto inputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto inputHandle2 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({2, 3}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle2, TensorInfo({2, 3}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({2, 3}, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateAdditionFloat16Workload)
+TEST_CASE("CreateAdditionFloat16Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
                                       AdditionQueueDescriptor,
@@ -124,7 +126,7 @@
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+TEST_CASE("CreateAdditionFloatWorkload")
 {
     NeonCreateElementwiseWorkloadTest<NeonAdditionWorkload,
                                       AdditionQueueDescriptor,
@@ -133,7 +135,7 @@
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+TEST_CASE("CreateSubtractionFloat16Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
@@ -142,7 +144,7 @@
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloatWorkload)
+TEST_CASE("CreateSubtractionFloatWorkload")
 {
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
@@ -150,7 +152,7 @@
                                       DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
+TEST_CASE("CreateSubtractionUint8Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonSubtractionWorkload,
                                       SubtractionQueueDescriptor,
@@ -159,7 +161,7 @@
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloat16Workload)
+TEST_CASE("CreateMultiplicationFloat16Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
@@ -168,7 +170,7 @@
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
+TEST_CASE("CreateMultiplicationFloatWorkload")
 {
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
@@ -176,7 +178,7 @@
                                       DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
+TEST_CASE("CreateMultiplicationUint8Workload")
 {
     NeonCreateElementwiseWorkloadTest<NeonMultiplicationWorkload,
                                       MultiplicationQueueDescriptor,
@@ -184,7 +186,7 @@
                                       DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloatWorkloadTest)
+TEST_CASE("CreateDivisionFloatWorkloadTest")
 {
     NeonCreateElementwiseWorkloadTest<NeonDivisionWorkload,
                                       DivisionQueueDescriptor,
@@ -210,28 +212,28 @@
     TensorShape inputShape  = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{2, 3, 4, 4} : TensorShape{2, 4, 4, 3};
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NchwWorkload)
+TEST_CASE("CreateBatchNormalizationFloat16NchwWorkload")
 {
     NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16NhwcWorkload)
+TEST_CASE("CreateBatchNormalizationFloat16NhwcWorkload")
 {
     NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float16>(DataLayout::NHWC);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNchwWorkload)
+TEST_CASE("CreateBatchNormalizationFloatNchwWorkload")
 {
     NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloatNhwcWorkload)
+TEST_CASE("CreateBatchNormalizationFloatNhwcWorkload")
 {
     NeonCreateBatchNormalizationWorkloadTest<NeonBatchNormalizationWorkload, DataType::Float32>(DataLayout::NHWC);
 }
@@ -252,33 +254,33 @@
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle,  TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle,  TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NchwWorkload)
+TEST_CASE("CreateConvolution2dFloat16NchwWorkload")
 {
     NeonCreateConvolution2dWorkloadTest<DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloat16NhwcWorkload)
+TEST_CASE("CreateConvolution2dFloat16NhwcWorkload")
 {
     NeonCreateConvolution2dWorkloadTest<DataType::Float16>(DataLayout::NHWC);
 }
 
 #endif
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+TEST_CASE("CreateConvolution2dFloatNchwWorkload")
 {
     NeonCreateConvolution2dWorkloadTest<DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+TEST_CASE("CreateConvolution2dFloatNhwcWorkload")
 {
     NeonCreateConvolution2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFastMathEnabledWorkload)
+TEST_CASE("CreateConvolution2dFastMathEnabledWorkload")
 {
     Graph graph;
     using ModelOptions = std::vector<BackendOptions>;
@@ -324,17 +326,17 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? std::initializer_list<unsigned int>({ 2, 2, 5, 5 })
                                                                : std::initializer_list<unsigned int>({ 2, 5, 5, 2 });
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat32NhwcWorkload)
+TEST_CASE("CreateDepthWiseConvolution2dFloat32NhwcWorkload")
 {
     NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float32>(DataLayout::NHWC);
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateDepthWiseConvolution2dFloat16NhwcWorkload)
+TEST_CASE("CreateDepthWiseConvolution2dFloat16NhwcWorkload")
 {
     NeonCreateDepthWiseConvolutionWorkloadTest<DataType::Float16>(DataLayout::NHWC);
 }
@@ -357,28 +359,28 @@
     // Checks that outputs and inputs are as we expect them (see definition of CreateFullyConnectedWorkloadTest).
     float inputsQScale = DataType == armnn::DataType::QAsymmU8 ? 1.0f : 0.0;
     float outputQScale = DataType == armnn::DataType::QAsymmU8 ? 2.0f : 0.0;
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType, outputQScale)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({3, 1, 4, 5}, DataType, inputsQScale)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({3, 7}, DataType, outputQScale)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloat16Workload)
+TEST_CASE("CreateFullyConnectedFloat16Workload")
 {
     NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedFloatWorkload)
+TEST_CASE("CreateFullyConnectedFloatWorkload")
 {
     NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedQAsymmU8Workload)
+TEST_CASE("CreateFullyConnectedQAsymmU8Workload")
 {
     NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedQAsymmS8Workload)
+TEST_CASE("CreateFullyConnectedQAsymmS8Workload")
 {
     NeonCreateFullyConnectedWorkloadTest<NeonFullyConnectedWorkload, DataType::QAsymmS8>();
 }
@@ -400,28 +402,28 @@
     TensorShape inputShape  = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ? TensorShape{3, 5, 5, 1} : TensorShape{3, 1, 5, 5};
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NchwWorkload)
+TEST_CASE("CreateNormalizationFloat16NchwWorkload")
 {
     NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloat16NhwcWorkload)
+TEST_CASE("CreateNormalizationFloat16NhwcWorkload")
 {
     NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNchwWorkload)
+TEST_CASE("CreateNormalizationFloatNchwWorkload")
 {
     NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNormalizationFloatNhwcWorkload)
+TEST_CASE("CreateNormalizationFloatNhwcWorkload")
 {
     NeonCreateNormalizationWorkloadTest<NeonNormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
 }
@@ -443,33 +445,33 @@
     Pooling2dQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat16Workload)
+TEST_CASE("CreatePooling2dFloat16Workload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNchwWorkload)
+TEST_CASE("CreatePooling2dFloatNchwWorkload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloatNhwcWorkload)
+TEST_CASE("CreatePooling2dFloatNhwcWorkload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NchwWorkload)
+TEST_CASE("CreatePooling2dUint8NchwWorkload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
+TEST_CASE("CreatePooling2dUint8NhwcWorkload")
 {
     NeonCreatePooling2dWorkloadTest<DataType::QAsymmU8>(DataLayout::NHWC);
 }
@@ -495,24 +497,24 @@
     auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto alphaHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, dataType)));
+    CHECK(TestNeonTensorHandleInfo(alphaHandle, TensorInfo(alphaShape, dataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, dataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-    BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+TEST_CASE("CreatePreluFloat16Workload")
 {
     NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float16);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloatWorkload)
+TEST_CASE("CreatePreluFloatWorkload")
 {
     NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
+TEST_CASE("CreatePreluUint8Workload")
 {
     NeonCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, DataType::QAsymmU8);
 }
@@ -530,23 +532,23 @@
     ReshapeQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({4, 1}, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({1, 4}, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateReshapeFloat16Workload)
+TEST_CASE("CreateReshapeFloat16Workload")
 {
     NeonCreateReshapeWorkloadTest<DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
+TEST_CASE("CreateReshapeFloatWorkload")
 {
     NeonCreateReshapeWorkloadTest<DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeUint8Workload)
+TEST_CASE("CreateReshapeUint8Workload")
 {
     NeonCreateReshapeWorkloadTest<DataType::QAsymmU8>();
 }
@@ -569,34 +571,34 @@
     {
         case DataLayout::NHWC:
             predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             break;
         default: // DataLayout::NCHW
             predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
             predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
-            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
     }
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NchwWorkload)
+TEST_CASE("CreateResizeFloat32NchwWorkload")
 {
     NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NchwWorkload)
+TEST_CASE("CreateResizeUint8NchwWorkload")
 {
     NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeFloat32NhwcWorkload)
+TEST_CASE("CreateResizeFloat32NhwcWorkload")
 {
     NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeUint8NhwcWorkload)
+TEST_CASE("CreateResizeUint8NhwcWorkload")
 {
     NeonCreateResizeWorkloadTest<NeonResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
@@ -625,28 +627,28 @@
         tensorInfo.SetQuantizationOffset(-128);
         tensorInfo.SetQuantizationScale(1.f / 256);
     }
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
+TEST_CASE("CreateSoftmaxFloat16Workload")
 {
     NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloatWorkload)
+TEST_CASE("CreateSoftmaxFloatWorkload")
 {
     NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmU8Workload)
+TEST_CASE("CreateSoftmaxQAsymmU8Workload")
 {
     NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQAsymmS8Workload)
+TEST_CASE("CreateSoftmaxQAsymmS8Workload")
 {
     NeonCreateSoftmaxWorkloadTest<NeonSoftmaxWorkload, DataType::QAsymmS8>();
 }
@@ -664,31 +666,31 @@
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 1, 2, 2, 1 }, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 1, 1, 1, 4 }, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 1, 2, 2, 1 }, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 1, 1, 1, 4 }, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
+TEST_CASE("CreateSpaceToDepthFloat32Workload")
 {
     NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat16Workload)
+TEST_CASE("CreateSpaceToDepthFloat16Workload")
 {
     NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQAsymm8Workload)
+TEST_CASE("CreateSpaceToDepthQAsymm8Workload")
 {
     NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthQSymm16Workload)
+TEST_CASE("CreateSpaceToDepthQSymm16Workload")
 {
     NeonSpaceToDepthWorkloadTest<NeonSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterWorkload)
+TEST_CASE("CreateSplitterWorkload")
 {
     Graph graph;
     NeonWorkloadFactory factory =
@@ -699,19 +701,19 @@
     // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
     SplitterQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({5, 7, 7}, DataType::Float32)));
 
     auto outputHandle0 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle0, TensorInfo({1, 7, 7}, DataType::Float32)));
 
     auto outputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle1, TensorInfo({2, 7, 7}, DataType::Float32)));
 
     auto outputHandle2 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle2, TensorInfo({2, 7, 7}, DataType::Float32)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcat)
+TEST_CASE("CreateSplitterConcat")
 {
     // Tests that it is possible to decide which output of the splitter layer
     // should be lined to which input of the concat layer.
@@ -736,17 +738,17 @@
     armnn::IAclTensorHandle* mIn0 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
     armnn::IAclTensorHandle* mIn1 = dynamic_cast<armnn::IAclTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(mIn0);
-    BOOST_TEST(mIn1);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(mIn0);
+    CHECK(mIn1);
 
     bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputs)
+TEST_CASE("CreateSingleOutputMultipleInputs")
 {
     // Tests that it is possible to assign multiple (two) different layers to each of the outputs of a splitter layer.
     // We created a splitter with two outputs. That each of those outputs is used by two different activation layers
@@ -773,24 +775,24 @@
     armnn::IAclTensorHandle* activ1_1Im = dynamic_cast<armnn::IAclTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
 
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(activ0_0Im);
-    BOOST_TEST(activ0_1Im);
-    BOOST_TEST(activ1_0Im);
-    BOOST_TEST(activ1_1Im);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(activ0_0Im);
+    CHECK(activ0_1Im);
+    CHECK(activ1_0Im);
+    CHECK(activ1_1Im);
 
     bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
                              (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
 #if defined(ARMNNREF_ENABLED)
 
 // This test unit needs the reference backend, it's not available if the reference backend is not built
 
-BOOST_AUTO_TEST_CASE(CreateMemCopyWorkloadsNeon)
+TEST_CASE("CreateMemCopyWorkloadsNeon")
 {
     NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
@@ -819,28 +821,28 @@
     TensorShape outputShape = (dataLayout == DataLayout::NCHW) ?
                 TensorShape{ 5, 20, 50, 67 } : TensorShape{ 5, 50, 67, 20 };
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NchwWorkload)
+TEST_CASE("CreateL2NormalizationFloat16NchwWorkload")
 {
     NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat16NhwcWorkload)
+TEST_CASE("CreateL2NormalizationFloat16NhwcWorkload")
 {
     NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float16>(DataLayout::NHWC);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationNchwWorkload)
+TEST_CASE("CreateL2NormalizationNchwWorkload")
 {
     NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationNhwcWorkload)
+TEST_CASE("CreateL2NormalizationNhwcWorkload")
 {
     NeonCreateL2NormalizationWorkloadTest<NeonL2NormalizationFloatWorkload, DataType::Float32>(DataLayout::NHWC);
 }
@@ -860,18 +862,18 @@
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
     armnn::TensorInfo tensorInfo({4, 1}, DataType);
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, tensorInfo));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, tensorInfo));
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat16Workload)
+TEST_CASE("CreateLogSoftmaxFloat16Workload")
 {
     NeonCreateLogSoftmaxWorkloadTest<NeonLogSoftmaxWorkload, DataType::Float16>();
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloatWorkload)
+TEST_CASE("CreateLogSoftmaxFloatWorkload")
 {
     NeonCreateLogSoftmaxWorkloadTest<NeonLogSoftmaxWorkload, DataType::Float32>();
 }
@@ -890,11 +892,11 @@
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 2, 2 }, DataType::Float32)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 2, 4 }, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo({ 2, 2 }, DataType::Float32)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo({ 2, 4 }, DataType::Float32)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
+TEST_CASE("CreateLSTMWorkloadFloatWorkload")
 {
     NeonCreateLstmWorkloadTest<NeonLstmFloatWorkload>();
 }
@@ -914,37 +916,37 @@
     auto inputHandle1 = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType)));
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle0, TensorInfo({ 2, 3, 2, 5 }, DataType)));
+    CHECK(TestNeonTensorHandleInfo(inputHandle1, TensorInfo({ 2, 3, 2, 5 }, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
+TEST_CASE("CreateConcatDim0Float32Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
+TEST_CASE("CreateConcatDim1Float32Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
+TEST_CASE("CreateConcatDim3Float32Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
+TEST_CASE("CreateConcatDim0Uint8Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
+TEST_CASE("CreateConcatDim1Uint8Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
+TEST_CASE("CreateConcatDim3Uint8Workload")
 {
     NeonCreateConcatWorkloadTest<NeonConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
 }
@@ -971,25 +973,25 @@
     for (unsigned int i = 0; i < numInputs; ++i)
     {
         auto inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[i]);
-        BOOST_TEST(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
+        CHECK(TestNeonTensorHandleInfo(inputHandle, TensorInfo(inputShape, DataType)));
     }
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
+    CHECK(TestNeonTensorHandleInfo(outputHandle, TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+TEST_CASE("CreateStackFloat32Workload")
 {
     NeonCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
-BOOST_AUTO_TEST_CASE(CreateStackFloat16Workload)
+TEST_CASE("CreateStackFloat16Workload")
 {
     NeonCreateStackWorkloadTest<armnn::DataType::Float16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+TEST_CASE("CreateStackUint8Workload")
 {
     NeonCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
@@ -1005,27 +1007,27 @@
     QuantizedLstmQueueDescriptor queueDescriptor = workload->GetData();
 
     IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 2})));
-    BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((inputHandle->GetShape() == TensorShape({2, 2})));
+    CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 
     IAclTensorHandle* cellStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[1]);
-    BOOST_TEST((cellStateInHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateInHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateInHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputStateInHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[2]);
-    BOOST_TEST((outputStateInHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((outputStateInHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputStateInHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 
     IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
+    CHECK((outputStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputStateOutHandle->GetDataType() == arm_compute::DataType::QASYMM8));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQuantizedLstmWorkload)
+TEST_CASE("CreateQuantizedLstmWorkload")
 {
     NeonCreateQuantizedLstmWorkloadTest<NeonQuantizedLstmWorkload>();
 }
@@ -1040,21 +1042,21 @@
     QLstmQueueDescriptor queueDescriptor = workload->GetData();
 
     IAclTensorHandle* inputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+    CHECK((inputHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((inputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
 
     IAclTensorHandle* cellStateOutHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
+    CHECK((cellStateOutHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((cellStateOutHandle->GetDataType() == arm_compute::DataType::QSYMM16));
 
     IAclTensorHandle* outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[2]);
-    BOOST_TEST((outputHandle->GetShape() == TensorShape({2, 4})));
-    BOOST_TEST((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
+    CHECK((outputHandle->GetShape() == TensorShape({2, 4})));
+    CHECK((outputHandle->GetDataType() == arm_compute::DataType::QASYMM8_SIGNED));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQLstmWorkloadTest)
+TEST_CASE("CreateQLstmWorkloadTest")
 {
     NeonCreateQLstmWorkloadTest<NeonQLstmWorkload>();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonEndToEndTests.cpp b/src/backends/neon/test/NeonEndToEndTests.cpp
index dc0a609..5190e2f 100644
--- a/src/backends/neon/test/NeonEndToEndTests.cpp
+++ b/src/backends/neon/test/NeonEndToEndTests.cpp
@@ -22,14 +22,14 @@
 #include <backendsCommon/test/SplitterEndToEndTestImpl.hpp>
 #include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonEndToEnd)
-
-std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuAcc};
+TEST_SUITE("NeonEndToEnd")
+{
+std::vector<armnn::BackendId> neonDefaultBackends = {armnn::Compute::CpuAcc};
 
 // Abs
-BOOST_AUTO_TEST_CASE(NeonAbsEndToEndTestFloat32)
+TEST_CASE("NeonAbsEndToEndTestFloat32")
 {
     std::vector<float> expectedOutput =
     {
@@ -37,22 +37,22 @@
         3.f, 3.f, 3.f, 3.f, 4.f, 4.f, 4.f, 4.f
     };
 
-    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ElementwiseUnarySimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
                                                              UnaryOperation::Abs,
                                                              expectedOutput);
 }
 
 // Constant
-BOOST_AUTO_TEST_CASE(ConstantUsage_Neon_Float32)
+TEST_CASE("ConstantUsage_Neon_Float32")
 {
-    BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
+    CHECK(ConstantUsageFloat32Test(neonDefaultBackends));
 }
 
 #if defined(ARMNNREF_ENABLED)
 
 // This test unit needs the reference backend, it's not available if the reference backend is not built
 
-BOOST_AUTO_TEST_CASE(FallbackToCpuRef)
+TEST_CASE("FallbackToCpuRef")
 {
     using namespace armnn;
 
@@ -83,519 +83,523 @@
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == Status::Success);
 }
 
 #endif
 
-BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndTest)
+TEST_CASE("NeonGreaterSimpleEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
                                                        ComparisonOperation::Greater,
                                                        expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonGreaterSimpleEndToEndUint8Test)
+TEST_CASE("NeonGreaterSimpleEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
 
-    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+    ComparisonSimpleEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends,
                                                                ComparisonOperation::Greater,
                                                                expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndTest)
+TEST_CASE("NeonGreaterBroadcastEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
                                                           ComparisonOperation::Greater,
                                                           expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonGreaterBroadcastEndToEndUint8Test)
+TEST_CASE("NeonGreaterBroadcastEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
 
-    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends,
+    ComparisonBroadcastEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends,
                                                                   ComparisonOperation::Greater,
                                                                   expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Test)
+TEST_CASE("NeonConcatEndToEndDim0Test")
 {
-    ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim0Uint8Test)
+TEST_CASE("NeonConcatEndToEndDim0Uint8Test")
 {
-    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Test)
+TEST_CASE("NeonConcatEndToEndDim1Test")
 {
-    ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim1Uint8Test)
+TEST_CASE("NeonConcatEndToEndDim1Uint8Test")
 {
-    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Test)
+TEST_CASE("NeonConcatEndToEndDim3Test")
 {
-    ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonConcatEndToEndDim3Uint8Test)
+TEST_CASE("NeonConcatEndToEndDim3Uint8Test")
 {
-    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
 // DepthToSpace
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat32)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat32")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
+TEST_CASE("DephtToSpaceEndToEndNchwUint8")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
+TEST_CASE("DephtToSpaceEndToEndNchwInt16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat32")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::Float32>(neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::Float16>(neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
+TEST_CASE("DephtToSpaceEndToEndNhwcUint8")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
+TEST_CASE("DephtToSpaceEndToEndNhwcInt16")
 {
-    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
+    DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Dequantize
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+TEST_CASE("DequantizeEndToEndSimpleTest")
 {
-    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+TEST_CASE("DequantizeEndToEndOffsetTest")
 {
-    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
+    DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat32)
+TEST_CASE("NeonEluEndToEndTestFloat32")
 {
-    EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+    EluEndToEndTest<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonEluEndToEndTestFloat16)
+TEST_CASE("NeonEluEndToEndTestFloat16")
 {
-    EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+    EluEndToEndTest<armnn::DataType::Float16>(neonDefaultBackends);
 }
 
 // HardSwish
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestFloat32)
+TEST_CASE("NeonHardSwishEndToEndTestFloat32")
 {
-    HardSwishEndToEndTest<armnn::DataType::Float32>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestFloat16)
+TEST_CASE("NeonHardSwishEndToEndTestFloat16")
 {
-    HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::Float16>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestQAsymmS8)
+TEST_CASE("NeonHardSwishEndToEndTestQAsymmS8")
 {
-    HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonHardSwishEndToEndTestQAsymmU8)
+TEST_CASE("NeonHardSwishEndToEndTestQAsymmU8")
 {
-    HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonPreluEndToEndFloat32Test)
+TEST_CASE("NeonPreluEndToEndFloat32Test")
 {
-    PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
+    PreluEndToEndNegativeTest<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonPreluEndToEndTestUint8Test)
+TEST_CASE("NeonPreluEndToEndTestUint8Test")
 {
-    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
+    PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest1)
+TEST_CASE("NeonSpaceToDepthNhwcEndToEndTest1")
 {
-    SpaceToDepthNhwcEndToEndTest1(defaultBackends);
+    SpaceToDepthNhwcEndToEndTest1(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNchwEndToEndTest1)
+TEST_CASE("NeonSpaceToDepthNchwEndToEndTest1")
 {
-    SpaceToDepthNchwEndToEndTest1(defaultBackends);
+    SpaceToDepthNchwEndToEndTest1(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNhwcEndToEndTest2)
+TEST_CASE("NeonSpaceToDepthNhwcEndToEndTest2")
 {
-    SpaceToDepthNhwcEndToEndTest2(defaultBackends);
+    SpaceToDepthNhwcEndToEndTest2(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSpaceToDepthNchwEndToEndTest2)
+TEST_CASE("NeonSpaceToDepthNchwEndToEndTest2")
 {
-    SpaceToDepthNchwEndToEndTest2(defaultBackends);
+    SpaceToDepthNchwEndToEndTest2(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndTest)
+TEST_CASE("NeonSplitter1dEndToEndTest")
 {
-    Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter1dEndToEndUint8Test)
+TEST_CASE("NeonSplitter1dEndToEndUint8Test")
 {
-    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndTest)
+TEST_CASE("NeonSplitter2dDim0EndToEndTest")
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndTest)
+TEST_CASE("NeonSplitter2dDim1EndToEndTest")
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim0EndToEndUint8Test)
+TEST_CASE("NeonSplitter2dDim0EndToEndUint8Test")
 {
-    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter2dDim1EndToEndUint8Test)
+TEST_CASE("NeonSplitter2dDim1EndToEndUint8Test")
 {
-    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndTest)
+TEST_CASE("NeonSplitter3dDim0EndToEndTest")
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndTest)
+TEST_CASE("NeonSplitter3dDim1EndToEndTest")
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndTest)
+TEST_CASE("NeonSplitter3dDim2EndToEndTest")
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim0EndToEndUint8Test)
+TEST_CASE("NeonSplitter3dDim0EndToEndUint8Test")
 {
-    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim1EndToEndUint8Test)
+TEST_CASE("NeonSplitter3dDim1EndToEndUint8Test")
 {
-    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter3dDim2EndToEndUint8Test)
+TEST_CASE("NeonSplitter3dDim2EndToEndUint8Test")
 {
-    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndTest)
+TEST_CASE("NeonSplitter4dDim0EndToEndTest")
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndTest)
+TEST_CASE("NeonSplitter4dDim1EndToEndTest")
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndTest)
+TEST_CASE("NeonSplitter4dDim2EndToEndTest")
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndTest)
+TEST_CASE("NeonSplitter4dDim3EndToEndTest")
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim0EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim0EndToEndUint8Test")
 {
-    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim1EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim1EndToEndUint8Test")
 {
-    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim2EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim2EndToEndUint8Test")
 {
-    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonSplitter4dDim3EndToEndUint8Test)
+TEST_CASE("NeonSplitter4dDim3EndToEndUint8Test")
 {
-    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonQuantizedLstmEndToEndTest)
+TEST_CASE("NeonQuantizedLstmEndToEndTest")
 {
-    QuantizedLstmEndToEnd(defaultBackends);
+    QuantizedLstmEndToEnd(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNchwTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndFloatNchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
-        defaultBackends, armnn::DataLayout::NCHW);
+        neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NchwTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndUint8NchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-        defaultBackends, armnn::DataLayout::NCHW);
+        neonDefaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndFloatNhwcTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndFloatNhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
-        defaultBackends, armnn::DataLayout::NHWC);
+        neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTransposeConvolution2dEndToEndUint8NhwcTest)
+TEST_CASE("NeonTransposeConvolution2dEndToEndUint8NhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
-        defaultBackends, armnn::DataLayout::NHWC);
+        neonDefaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportNonAlignedInputPointerTest)
+TEST_CASE("NeonImportNonAlignedInputPointerTest")
 {
-    ImportNonAlignedInputPointerTest(defaultBackends);
+    ImportNonAlignedInputPointerTest(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonExportNonAlignedOutputPointerTest)
+TEST_CASE("NeonExportNonAlignedOutputPointerTest")
 {
-    ExportNonAlignedOutputPointerTest(defaultBackends);
+    ExportNonAlignedOutputPointerTest(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportAlignedPointerTest)
+TEST_CASE("NeonImportAlignedPointerTest")
 {
-    ImportAlignedPointerTest(defaultBackends);
+    ImportAlignedPointerTest(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportOnlyWorkload)
+TEST_CASE("NeonImportOnlyWorkload")
 {
-    ImportOnlyWorkload(defaultBackends);
+    ImportOnlyWorkload(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonExportOnlyWorkload)
+TEST_CASE("NeonExportOnlyWorkload")
 {
-    ExportOnlyWorkload(defaultBackends);
+    ExportOnlyWorkload(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportAndExportWorkload)
+TEST_CASE("NeonImportAndExportWorkload")
 {
-    ImportAndExportWorkload(defaultBackends);
+    ImportAndExportWorkload(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonExportOutputWithSeveralOutputSlotConnectionsTest)
+TEST_CASE("NeonExportOutputWithSeveralOutputSlotConnectionsTest")
 {
-    ExportOutputWithSeveralOutputSlotConnectionsTest(defaultBackends);
+    ExportOutputWithSeveralOutputSlotConnectionsTest(neonDefaultBackends);
 }
 
 // InstanceNormalization
-BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest1)
+TEST_CASE("NeonInstanceNormalizationNchwEndToEndTest1")
 {
-    InstanceNormalizationNchwEndToEndTest1(defaultBackends);
+    InstanceNormalizationNchwEndToEndTest1(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonInstanceNormalizationNchwEndToEndTest2)
+TEST_CASE("NeonInstanceNormalizationNchwEndToEndTest2")
 {
-    InstanceNormalizationNchwEndToEndTest2(defaultBackends);
+    InstanceNormalizationNchwEndToEndTest2(neonDefaultBackends);
 }
 
 // Fill
-BOOST_AUTO_TEST_CASE(NeonFillEndToEndTest)
+TEST_CASE("NeonFillEndToEndTest")
 {
-    FillEndToEnd<armnn::DataType::Float32>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTestFloat16)
+TEST_CASE("RefFillEndToEndTestFloat16")
 {
-    FillEndToEnd<armnn::DataType::Float16>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Float16>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonFillEndToEndTestInt32)
+TEST_CASE("NeonFillEndToEndTestInt32")
 {
-    FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
+    FillEndToEnd<armnn::DataType::Signed32>(neonDefaultBackends);
 }
 
 // ArgMinMax
-BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTest)
+TEST_CASE("NeonArgMaxSimpleTest")
 {
-    ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTest)
+TEST_CASE("NeonArgMinSimpleTest")
 {
-    ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0Test)
+TEST_CASE("NeonArgMaxAxis0Test")
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis0Test)
+TEST_CASE("NeonArgMinAxis0Test")
 {
-    ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1Test)
+TEST_CASE("NeonArgMaxAxis1Test")
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis1Test)
+TEST_CASE("NeonArgMinAxis1Test")
 {
-    ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2Test)
+TEST_CASE("NeonArgMaxAxis2Test")
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis2Test)
+TEST_CASE("NeonArgMinAxis2Test")
 {
-    ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3Test)
+TEST_CASE("NeonArgMaxAxis3Test")
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis3Test)
+TEST_CASE("NeonArgMinAxis3Test")
 {
-    ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::Float32>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxSimpleTestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxSimpleTestQuantisedAsymm8")
 {
-    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinSimpleTestQuantisedAsymm8)
+TEST_CASE("NeonArgMinSimpleTestQuantisedAsymm8")
 {
-    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis0TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis0TestQuantisedAsymm8")
 {
-    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis0TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis0TestQuantisedAsymm8")
 {
-    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis1TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis1TestQuantisedAsymm8")
 {
-    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis1TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis1TestQuantisedAsymm8")
 {
-    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis2TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis2TestQuantisedAsymm8")
 {
-    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis2TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis2TestQuantisedAsymm8")
 {
-    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMaxAxis3TestQuantisedAsymm8)
+TEST_CASE("NeonArgMaxAxis3TestQuantisedAsymm8")
 {
-    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonArgMinAxis3TestQuantisedAsymm8)
+TEST_CASE("NeonArgMinAxis3TestQuantisedAsymm8")
 {
-    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
+    ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonStridedSliceInvalidSliceEndToEndTest)
+TEST_CASE("NeonStridedSliceInvalidSliceEndToEndTest")
 {
-    StridedSliceInvalidSliceEndToEndTest(defaultBackends);
+    StridedSliceInvalidSliceEndToEndTest(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsTest, * boost::unit_test::disabled())
-{
-    std::vector<float> boxEncodings({
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, -1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f
-                                    });
-    std::vector<float> scores({
-                                  0.0f, 0.9f, 0.8f,
-                                  0.0f, 0.75f, 0.72f,
-                                  0.0f, 0.6f, 0.5f,
-                                  0.0f, 0.93f, 0.95f,
-                                  0.0f, 0.5f, 0.4f,
-                                  0.0f, 0.3f, 0.2f
-                              });
-    std::vector<float> anchors({
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 100.5f, 1.0f, 1.0f
-                               });
-    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
-}
+// DISABLED
+//TEST_CASE("NeonDetectionPostProcessRegularNmsTest")
+//{
+//    std::vector<float> boxEncodings({
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, -1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f
+//                                    });
+//    std::vector<float> scores({
+//                                  0.0f, 0.9f, 0.8f,
+//                                  0.0f, 0.75f, 0.72f,
+//                                  0.0f, 0.6f, 0.5f,
+//                                  0.0f, 0.93f, 0.95f,
+//                                  0.0f, 0.5f, 0.4f,
+//                                  0.0f, 0.3f, 0.2f
+//                              });
+//    std::vector<float> anchors({
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 100.5f, 1.0f, 1.0f
+//                               });
+//    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
+//                                                                     boxEncodings,
+//                                                                     scores,
+//                                                                     anchors);
+//}
 
 inline void QuantizeData(uint8_t* quant, const float* dequant, const TensorInfo& info)
 {
@@ -605,136 +609,141 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessRegularNmsUint8Test, * boost::unit_test::disabled())
+// DISABLED
+//TEST_CASE("NeonDetectionPostProcessRegularNmsUint8Test")
+//{
+//    armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
+//    armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
+//    armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
+//
+//    boxEncodingsInfo.SetQuantizationScale(1.0f);
+//    boxEncodingsInfo.SetQuantizationOffset(1);
+//    scoresInfo.SetQuantizationScale(0.01f);
+//    scoresInfo.SetQuantizationOffset(0);
+//    anchorsInfo.SetQuantizationScale(0.5f);
+//    anchorsInfo.SetQuantizationOffset(0);
+//
+//    std::vector<float> boxEncodings({
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, -1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f
+//                                    });
+//    std::vector<float> scores({
+//                                  0.0f, 0.9f, 0.8f,
+//                                  0.0f, 0.75f, 0.72f,
+//                                  0.0f, 0.6f, 0.5f,
+//                                  0.0f, 0.93f, 0.95f,
+//                                  0.0f, 0.5f, 0.4f,
+//                                  0.0f, 0.3f, 0.2f
+//                              });
+//    std::vector<float> anchors({
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 100.5f, 1.0f, 1.0f
+//                               });
+//
+//    std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
+//    std::vector<uint8_t> qScores(scores.size(), 0);
+//    std::vector<uint8_t> qAnchors(anchors.size(), 0);
+//    QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
+//    QuantizeData(qScores.data(), scores.data(), scoresInfo);
+//    QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
+//    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, qBoxEncodings,
+//                                                                             qScores, qAnchors,
+//                                                                             1.0f, 1, 0.01f, 0, 0.5f, 0);
+//}
+//
+//TEST_CASE("NeonDetectionPostProcessFastNmsTest")
+//{
+//    std::vector<float> boxEncodings({
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, -1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f
+//                                    });
+//    std::vector<float> scores({
+//                                  0.0f, 0.9f, 0.8f,
+//                                  0.0f, 0.75f, 0.72f,
+//                                  0.0f, 0.6f, 0.5f,
+//                                  0.0f, 0.93f, 0.95f,
+//                                  0.0f, 0.5f, 0.4f,
+//                                  0.0f, 0.3f, 0.2f
+//                              });
+//    std::vector<float> anchors({
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 100.5f, 1.0f, 1.0f
+//                               });
+//    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(neonDefaultBackends,
+//                                                                  boxEncodings,
+//                                                                  scores,
+//                                                                  anchors);
+//}
+//
+// DISABLED
+//TEST_CASE("NeonDetectionPostProcessFastNmsUint8Test")
+//{
+//    armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
+//    armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
+//    armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
+//
+//    boxEncodingsInfo.SetQuantizationScale(1.0f);
+//    boxEncodingsInfo.SetQuantizationOffset(1);
+//    scoresInfo.SetQuantizationScale(0.01f);
+//    scoresInfo.SetQuantizationOffset(0);
+//    anchorsInfo.SetQuantizationScale(0.5f);
+//    anchorsInfo.SetQuantizationOffset(0);
+//
+//    std::vector<float> boxEncodings({
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, -1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f,
+//                                        0.0f, 1.0f, 0.0f, 0.0f,
+//                                        0.0f, 0.0f, 0.0f, 0.0f
+//                                    });
+//    std::vector<float> scores({
+//                                  0.0f, 0.9f, 0.8f,
+//                                  0.0f, 0.75f, 0.72f,
+//                                  0.0f, 0.6f, 0.5f,
+//                                  0.0f, 0.93f, 0.95f,
+//                                  0.0f, 0.5f, 0.4f,
+//                                  0.0f, 0.3f, 0.2f
+//                              });
+//    std::vector<float> anchors({
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 0.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 10.5f, 1.0f, 1.0f,
+//                                   0.5f, 100.5f, 1.0f, 1.0f
+//                               });
+//
+//    std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
+//    std::vector<uint8_t> qScores(scores.size(), 0);
+//    std::vector<uint8_t> qAnchors(anchors.size(), 0);
+//    QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
+//    QuantizeData(qScores.data(), scores.data(), scoresInfo);
+//    QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
+//    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(neonDefaultBackends, qBoxEncodings,
+//                                                                          qScores, qAnchors,
+//                                                                          1.0f, 1, 0.01f, 0, 0.5f, 0);
+//}
+
+TEST_CASE("NeonQLstmEndToEndTest")
 {
-    armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
-    armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
-    armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
-
-    boxEncodingsInfo.SetQuantizationScale(1.0f);
-    boxEncodingsInfo.SetQuantizationOffset(1);
-    scoresInfo.SetQuantizationScale(0.01f);
-    scoresInfo.SetQuantizationOffset(0);
-    anchorsInfo.SetQuantizationScale(0.5f);
-    anchorsInfo.SetQuantizationOffset(0);
-
-    std::vector<float> boxEncodings({
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, -1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f
-                                    });
-    std::vector<float> scores({
-                                  0.0f, 0.9f, 0.8f,
-                                  0.0f, 0.75f, 0.72f,
-                                  0.0f, 0.6f, 0.5f,
-                                  0.0f, 0.93f, 0.95f,
-                                  0.0f, 0.5f, 0.4f,
-                                  0.0f, 0.3f, 0.2f
-                              });
-    std::vector<float> anchors({
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 100.5f, 1.0f, 1.0f
-                               });
-
-    std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
-    std::vector<uint8_t> qScores(scores.size(), 0);
-    std::vector<uint8_t> qAnchors(anchors.size(), 0);
-    QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
-    QuantizeData(qScores.data(), scores.data(), scoresInfo);
-    QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
-    DetectionPostProcessRegularNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
-                                                                             qScores, qAnchors,
-                                                                             1.0f, 1, 0.01f, 0, 0.5f, 0);
+    QLstmEndToEnd(neonDefaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsTest, * boost::unit_test::disabled())
-{
-    std::vector<float> boxEncodings({
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, -1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f
-                                    });
-    std::vector<float> scores({
-                                  0.0f, 0.9f, 0.8f,
-                                  0.0f, 0.75f, 0.72f,
-                                  0.0f, 0.6f, 0.5f,
-                                  0.0f, 0.93f, 0.95f,
-                                  0.0f, 0.5f, 0.4f,
-                                  0.0f, 0.3f, 0.2f
-                              });
-    std::vector<float> anchors({
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 100.5f, 1.0f, 1.0f
-                               });
-    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
 }
-
-BOOST_AUTO_TEST_CASE(NeonDetectionPostProcessFastNmsUint8Test, * boost::unit_test::disabled())
-{
-    armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
-    armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
-    armnn::TensorInfo anchorsInfo({ 6, 4 }, armnn::DataType::Float32);
-
-    boxEncodingsInfo.SetQuantizationScale(1.0f);
-    boxEncodingsInfo.SetQuantizationOffset(1);
-    scoresInfo.SetQuantizationScale(0.01f);
-    scoresInfo.SetQuantizationOffset(0);
-    anchorsInfo.SetQuantizationScale(0.5f);
-    anchorsInfo.SetQuantizationOffset(0);
-
-    std::vector<float> boxEncodings({
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, -1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f,
-                                        0.0f, 1.0f, 0.0f, 0.0f,
-                                        0.0f, 0.0f, 0.0f, 0.0f
-                                    });
-    std::vector<float> scores({
-                                  0.0f, 0.9f, 0.8f,
-                                  0.0f, 0.75f, 0.72f,
-                                  0.0f, 0.6f, 0.5f,
-                                  0.0f, 0.93f, 0.95f,
-                                  0.0f, 0.5f, 0.4f,
-                                  0.0f, 0.3f, 0.2f
-                              });
-    std::vector<float> anchors({
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 0.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 10.5f, 1.0f, 1.0f,
-                                   0.5f, 100.5f, 1.0f, 1.0f
-                               });
-
-    std::vector<uint8_t> qBoxEncodings(boxEncodings.size(), 0);
-    std::vector<uint8_t> qScores(scores.size(), 0);
-    std::vector<uint8_t> qAnchors(anchors.size(), 0);
-    QuantizeData(qBoxEncodings.data(), boxEncodings.data(), boxEncodingsInfo);
-    QuantizeData(qScores.data(), scores.data(), scoresInfo);
-    QuantizeData(qAnchors.data(), anchors.data(), anchorsInfo);
-    DetectionPostProcessFastNmsEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, qBoxEncodings,
-                                                                          qScores, qAnchors,
-                                                                          1.0f, 1, 0.01f, 0, 0.5f, 0);
-}
-
-BOOST_AUTO_TEST_CASE(NeonQLstmEndToEndTest)
-{
-    QLstmEndToEnd(defaultBackends);
-}
-
-BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonFallbackTests.cpp b/src/backends/neon/test/NeonFallbackTests.cpp
index 383a5f6..e7a56a4 100644
--- a/src/backends/neon/test/NeonFallbackTests.cpp
+++ b/src/backends/neon/test/NeonFallbackTests.cpp
@@ -8,24 +8,24 @@
 
 #include <test/GraphUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonFallback)
-
-BOOST_AUTO_TEST_CASE(FallbackImportToCpuAcc)
+TEST_SUITE("NeonFallback")
+{
+TEST_CASE("FallbackImportToCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend objectN
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -73,12 +73,12 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -131,37 +131,37 @@
 
     // Contains ImportMemGeneric
     std::size_t found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemImport));
+    CHECK((layer4->GetType() == LayerType::MemImport));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(FallbackPaddingCopyToCpuAcc)
+TEST_CASE("FallbackPaddingCopyToCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend object
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -208,11 +208,11 @@
     armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -261,37 +261,37 @@
 
     // Contains CopyMemGeneric between the backends
     std::size_t found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric for the output
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain ImportMemGeneric
     found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer3->GetType() == LayerType::MemCopy));
+    CHECK((layer3->GetType() == LayerType::MemCopy));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(FallbackImportFromCpuAcc)
+TEST_CASE("FallbackImportFromCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend object
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -339,12 +339,12 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -398,37 +398,37 @@
 
     // Contains ImportMemGeneric
     std::size_t found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemImport));
+    CHECK((layer4->GetType() == LayerType::MemImport));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(FallbackPaddingCopyFromCpuAcc)
+TEST_CASE("FallbackPaddingCopyFromCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend object
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -475,11 +475,11 @@
     armnn::Layer* const layer5 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -528,37 +528,37 @@
 
     // Contains CopyMemGeneric between the backends
     std::size_t found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric for the output
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain ImportMemGeneric
     found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer3->GetType() == LayerType::MemCopy));
+    CHECK((layer3->GetType() == LayerType::MemCopy));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(FallbackDisableImportFromCpuAcc)
+TEST_CASE("FallbackDisableImportFromCpuAcc")
 {
     using namespace armnn;
 
     // Create a mock backend object
     MockImportBackendInitialiser initialiser; // Register the Mock Backend
     auto backendObjPtr = CreateBackendObject(MockImportBackendId());
-    BOOST_TEST((backendObjPtr != nullptr));
+    CHECK((backendObjPtr != nullptr));
 
     BackendIdSet backendIds = BackendRegistryInstance().GetBackendIds();
     if (backendIds.find("MockRef") == backendIds.end())
     {
         std::string message = "Cannot load MockRef";
-        BOOST_FAIL(message);
+        FAIL(message);
     }
 
     // Create runtime in which test will run and allow fallback to CpuRef.
@@ -604,12 +604,12 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -663,21 +663,21 @@
 
     // Contains CopyMemGeneric between the backends
     std::size_t found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Does not contain ImportMemGeneric
     found = dump.find("ImportMemGeneric");
-    BOOST_TEST(found == std::string::npos);
+    CHECK(found == std::string::npos);
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
 #if defined(ARMCOMPUTECL_ENABLED)
-BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackToCl)
+TEST_CASE("NeonImportEnabledFallbackToCl")
 {
     using namespace armnn;
 
@@ -728,18 +728,18 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -779,7 +779,7 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr = inputData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
 
     auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
     std::copy(inputData2.begin(), inputData2.end(), intputPtr);
@@ -808,21 +808,21 @@
 
     // Executed Subtraction using GpuAcc
     std::size_t found = dump.find("ClSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
     for(unsigned int i = 0; i < numElements; ++i)
     {
-        BOOST_TEST(outputData[i] == expectedOutput[i]);
+        CHECK(outputData[i] == expectedOutput[i]);
     }
     runtime->UnloadNetwork(netId);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportDisabledFallbackToCl)
+TEST_CASE("NeonImportDisabledFallbackToCl")
 {
     using namespace armnn;
 
@@ -872,18 +872,18 @@
     armnn::Layer* const layer6 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -934,17 +934,17 @@
 
     // Executed Subtraction using GpuAcc
     std::size_t found = dump.find("ClSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportEnabledFallbackSubgraphToCl)
+TEST_CASE("NeonImportEnabledFallbackSubgraphToCl")
 {
     using namespace armnn;
 
@@ -1007,21 +1007,21 @@
     armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
-    BOOST_TEST(CheckOrder(graph, layer6, layer7));
-    BOOST_TEST(CheckOrder(graph, layer7, layer8));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer6, layer7));
+    CHECK(CheckOrder(graph, layer7, layer8));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
-    BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer6->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -1056,7 +1056,7 @@
     size_t space = totalBytes + alignment + alignment;
     auto inputData = std::make_unique<uint8_t[]>(space);
     void* alignedInputPtr = inputData.get();
-    BOOST_CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
+    CHECK(std::align(alignment, totalBytes, alignedInputPtr, space));
 
     auto* intputPtr = reinterpret_cast<float*>(alignedInputPtr);
     std::copy(inputData2.begin(), inputData2.end(), intputPtr);
@@ -1085,26 +1085,26 @@
 
     // Executed Subtraction using GpuAcc
     std::size_t found = dump.find("ClSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Correctly switch back to CpuAcc
     found = dump.find("NeonPooling2dWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contains SyncMemGeneric for output
     found = dump.find("SyncMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
     runtime->UnloadNetwork(netId);
 }
 
-BOOST_AUTO_TEST_CASE(NeonImportDisableFallbackSubgraphToCl)
+TEST_CASE("NeonImportDisableFallbackSubgraphToCl")
 {
     using namespace armnn;
 
@@ -1162,21 +1162,21 @@
     armnn::Layer* const layer8 = GetFirstLayerWithName(graph, "output");
 
     // Checks order is valid.
-    BOOST_TEST(CheckOrder(graph, layer0, layer1));
-    BOOST_TEST(CheckOrder(graph, layer1, layer2));
-    BOOST_TEST(CheckOrder(graph, layer2, layer3));
-    BOOST_TEST(CheckOrder(graph, layer3, layer4));
-    BOOST_TEST(CheckOrder(graph, layer4, layer5));
-    BOOST_TEST(CheckOrder(graph, layer5, layer6));
-    BOOST_TEST(CheckOrder(graph, layer6, layer7));
-    BOOST_TEST(CheckOrder(graph, layer7, layer8));
+    CHECK(CheckOrder(graph, layer0, layer1));
+    CHECK(CheckOrder(graph, layer1, layer2));
+    CHECK(CheckOrder(graph, layer2, layer3));
+    CHECK(CheckOrder(graph, layer3, layer4));
+    CHECK(CheckOrder(graph, layer4, layer5));
+    CHECK(CheckOrder(graph, layer5, layer6));
+    CHECK(CheckOrder(graph, layer6, layer7));
+    CHECK(CheckOrder(graph, layer7, layer8));
 
     // Use memory import between backends
-    BOOST_TEST((layer4->GetType() == LayerType::MemCopy));
-    BOOST_TEST((layer6->GetType() == LayerType::MemCopy));
+    CHECK((layer4->GetType() == LayerType::MemCopy));
+    CHECK((layer6->GetType() == LayerType::MemCopy));
 
     // Correctly use backend hint
-    BOOST_TEST((layer5->GetBackendId() == Compute::GpuAcc ));
+    CHECK((layer5->GetBackendId() == Compute::GpuAcc ));
 
     // Load it into the runtime. It should pass.
     NetworkId netId;
@@ -1224,19 +1224,19 @@
 
     // Executed Subtraction using GpuAcc
     std::size_t found = dump.find("ClSubtractionWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Correctly switch back to CpuAcc
     found = dump.find("NeonPooling2dWorkload_Execute");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Contain CopyMemGeneric
     found = dump.find("CopyMemGeneric");
-    BOOST_TEST(found != std::string::npos);
+    CHECK(found != std::string::npos);
 
     // Check output is as expected
-    BOOST_TEST(outputData == expectedOutput);
+    CHECK(outputData == expectedOutput);
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonJsonPrinterTests.cpp b/src/backends/neon/test/NeonJsonPrinterTests.cpp
index a8d90fd..6139c75 100644
--- a/src/backends/neon/test/NeonJsonPrinterTests.cpp
+++ b/src/backends/neon/test/NeonJsonPrinterTests.cpp
@@ -7,16 +7,16 @@
 
 #include <backendsCommon/test/JsonPrinterTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
-BOOST_AUTO_TEST_SUITE(NeonJsonPrinter)
-
-BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterCpuAccTest)
+TEST_SUITE("NeonJsonPrinter")
+{
+TEST_CASE("SoftmaxProfilerJsonPrinterCpuAccTest")
 {
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuAcc};
     RunSoftmaxProfilerJsonPrinterTest(backends);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/neon/test/NeonLayerSupportTests.cpp b/src/backends/neon/test/NeonLayerSupportTests.cpp
index 13a4c73..494c8f9 100644
--- a/src/backends/neon/test/NeonLayerSupportTests.cpp
+++ b/src/backends/neon/test/NeonLayerSupportTests.cpp
@@ -14,132 +14,132 @@
 #include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
 #include <backendsCommon/test/LayerTests.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <string>
 
-BOOST_AUTO_TEST_SUITE(NeonLayerSupport)
-
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Neon)
+TEST_SUITE("NeonLayerSupport")
+{
+TEST_CASE("IsLayerSupportedFloat16Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::Float16>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Neon)
+TEST_CASE("IsLayerSupportedFloat32Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::Float32>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedQAsymmU8Neon)
+TEST_CASE("IsLayerSupportedQAsymmU8Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedQAsymmS8Neon)
+TEST_CASE("IsLayerSupportedQAsymmS8Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QAsymmS8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedQSymmS8Neon)
+TEST_CASE("IsLayerSupportedQSymmS8Neon")
 {
     armnn::NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
     IsLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedNeon)
+TEST_CASE("IsConvertFp16ToFp32SupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedNeon)
+TEST_CASE("IsConvertFp32ToFp16SupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::NeonWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsLogicalBinarySupportedNeon)
+TEST_CASE("IsLogicalBinarySupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsLogicalBinaryLayerSupportedTests<armnn::NeonWorkloadFactory,
       armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsLogicalBinaryBroadcastSupportedNeon)
+TEST_CASE("IsLogicalBinaryBroadcastSupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsLogicalBinaryLayerBroadcastSupportedTests<armnn::NeonWorkloadFactory,
       armnn::DataType::Boolean, armnn::DataType::Boolean>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsMeanSupportedNeon)
+TEST_CASE("IsMeanSupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsMeanLayerSupportedTests<armnn::NeonWorkloadFactory,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConstantSupportedNeon)
+TEST_CASE("IsConstantSupportedNeon")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::Float16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::Float32>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::QAsymmU8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::Boolean>(reasonIfUnsupported);
-    BOOST_CHECK(!result);
+    CHECK(!result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::QSymmS16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::QSymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::QAsymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::NeonWorkloadFactory,
             armnn::DataType::BFloat16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonLayerTests.cpp b/src/backends/neon/test/NeonLayerTests.cpp
index d12817e..edc8cb9 100644
--- a/src/backends/neon/test/NeonLayerTests.cpp
+++ b/src/backends/neon/test/NeonLayerTests.cpp
@@ -14,10 +14,10 @@
 #include <backendsCommon/test/ActivationFixture.hpp>
 #include <backendsCommon/test/LayerTests.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(Compute_ArmComputeNeon)
-
+TEST_SUITE("Compute_ArmComputeNeon")
+{
 using namespace armnn;
 
 using FactoryType = NeonWorkloadFactory;
@@ -308,7 +308,7 @@
 }
 }
 
-BOOST_AUTO_TEST_CASE(DepthwiseConv2dUtils)
+TEST_CASE("DepthwiseConv2dUtils")
 {
     const DataType dataType = DataType::Float32;
 
@@ -323,73 +323,73 @@
     // Strides supported: 1,2,3
     descriptor = MakeDepthwiseConv2dDesc(1, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(1, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(1, 3);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(2, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(2, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(2, 3);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(3, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(3, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     descriptor = MakeDepthwiseConv2dDesc(3, 3);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     // Supported stride 4
     descriptor = MakeDepthwiseConv2dDesc(4, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 
     // Supported weights shape 1x1
     TensorInfo weightsInfo1x1({ 1, 1, 1, 1 }, DataType::Float32);
     descriptor = MakeDepthwiseConv2dDesc(1, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo1x1, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo1x1, biasesInfo));
 
     // Supported shape 2x2
     TensorInfo weightsInfo2x2({ 1, 1, 2, 2 }, DataType::Float32);
     descriptor = MakeDepthwiseConv2dDesc(1, 1);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo2x2, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo2x2, biasesInfo));
 
     // Asymmetric padding
     descriptor = MakeDepthwiseConv2dDesc(1, 1, 1, 1, 2, 1, 2);
     outputInfo = CreateOutputTensorInfo(inputInfo, weightsInfo3x3, descriptor, dataType);
-    BOOST_TEST(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
+    CHECK(layerSupport.IsDepthwiseConvolutionSupported(inputInfo, outputInfo, descriptor,
                                                             weightsInfo3x3, biasesInfo));
 }
 
@@ -1498,4 +1498,4 @@
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp b/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp
index 1fef439..5a65b15 100644
--- a/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp
+++ b/src/backends/neon/test/NeonLayerTests_NDK_Bug.cpp
@@ -8,8 +8,10 @@
 #include <neon/NeonWorkloadFactory.hpp>
 #include <test/UnitTests.hpp>
 
-BOOST_AUTO_TEST_SUITE(Compute_ArmComputeNeon)
+#include <doctest/doctest.h>
 
+TEST_SUITE("Compute_ArmComputeNeon")
+{
 using namespace armnn;
 
 using FactoryType = NeonWorkloadFactory;
@@ -44,4 +46,4 @@
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonMemCopyTests.cpp b/src/backends/neon/test/NeonMemCopyTests.cpp
index 2bb9e3d..0485092 100644
--- a/src/backends/neon/test/NeonMemCopyTests.cpp
+++ b/src/backends/neon/test/NeonMemCopyTests.cpp
@@ -12,44 +12,44 @@
 #include <reference/RefWorkloadFactory.hpp>
 #include <reference/test/RefWorkloadFactoryHelper.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonMemCopy)
-
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeon)
+TEST_SUITE("NeonMemCopy")
+{
+TEST_CASE("CopyBetweenCpuAndNeon")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu)
+TEST_CASE("CopyBetweenNeonAndCpu")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors)
+TEST_CASE("CopyBetweenCpuAndNeonWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors)
+TEST_CASE("CopyBetweenNeonAndCpuWithSubtensors")
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
     auto predResult = CompareTensors(result.m_ActualData,  result.m_ExpectedData,
                                      result.m_ActualShape, result.m_ExpectedShape);
-    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    CHECK_MESSAGE(predResult.m_Result, predResult.m_Message.str());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
index 4944c31..9b448b2 100644
--- a/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
+++ b/src/backends/neon/test/NeonOptimizedNetworkTests.cpp
@@ -10,11 +10,11 @@
 
 #include <neon/NeonWorkloadFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonOptimizedNetwork)
-
-BOOST_AUTO_TEST_CASE(OptimizeValidateCpuAccDeviceSupportLayerNoFallback)
+TEST_SUITE("NeonOptimizedNetwork")
+{
+TEST_CASE("OptimizeValidateCpuAccDeviceSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -30,7 +30,7 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
     // validate workloads
     armnn::NeonWorkloadFactory fact =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
@@ -38,13 +38,13 @@
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     for (auto&& layer : graph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
-        BOOST_CHECK_NO_THROW(
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuAcc);
+        CHECK_NOTHROW(
             layer->CreateWorkload(fact));
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateDeviceNonSupportLayerNoFallback)
+TEST_CASE("OptimizeValidateDeviceNonSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -72,16 +72,16 @@
     try
     {
         Optimize(*net, backends, runtime->GetDeviceSpec(), armnn::OptimizerOptions(), errMessages);
-        BOOST_FAIL("Should have thrown an exception.");
+        FAIL("Should have thrown an exception.");
     }
     catch (const armnn::InvalidArgumentException& e)
     {
         // Different exceptions are thrown on different backends
     }
-    BOOST_CHECK(errMessages.size() > 0);
+    CHECK(errMessages.size() > 0);
 }
 
-BOOST_AUTO_TEST_CASE(FastMathEnabledTestOnCpuAcc)
+TEST_CASE("FastMathEnabledTestOnCpuAcc")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -102,16 +102,16 @@
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
     *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
-    BOOST_CHECK(optimizedNet);
+    CHECK(optimizedNet);
 
     auto modelOptionsOut = GetModelOptionsForTesting(optimizedNet.get());
 
-    BOOST_TEST(modelOptionsOut.size() == 1);
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
+    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut[0].GetOption(0).GetName() == "FastMathEnabled");
+    CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsBool() == true);
 }
 
-BOOST_AUTO_TEST_CASE(NumberOfThreadsTestOnCpuAcc)
+TEST_CASE("NumberOfThreadsTestOnCpuAcc")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -134,15 +134,15 @@
     armnn::IOptimizedNetworkPtr optimizedNet = armnn::Optimize(
             *net, backends, runtime->GetDeviceSpec(), optimizerOptions);
 
-    BOOST_CHECK(optimizedNet);
+    CHECK(optimizedNet);
     std::unique_ptr<armnn::Graph> graphPtr;
     armnn::OptimizedNetworkImpl impl(std::move(graphPtr), optimizerOptions.m_ModelOptions);
 
     auto modelOptionsOut = impl.GetModelOptions();
 
-    BOOST_TEST(modelOptionsOut.size() == 1);
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
-    BOOST_TEST(modelOptionsOut[0].GetOption(0).GetValue().AsUnsignedInt() == numberOfThreads);
+    CHECK(modelOptionsOut.size() == 1);
+    CHECK(modelOptionsOut[0].GetOption(0).GetName() == "NumberOfThreads");
+    CHECK(modelOptionsOut[0].GetOption(0).GetValue().AsUnsignedInt() == numberOfThreads);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/neon/test/NeonRuntimeTests.cpp b/src/backends/neon/test/NeonRuntimeTests.cpp
index 27361dd..ee5666d 100644
--- a/src/backends/neon/test/NeonRuntimeTests.cpp
+++ b/src/backends/neon/test/NeonRuntimeTests.cpp
@@ -10,11 +10,11 @@
 #include <backendsCommon/test/RuntimeTestImpl.hpp>
 #include <test/ProfilingTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(NeonRuntime)
-
-BOOST_AUTO_TEST_CASE(RuntimeValidateCpuAccDeviceSupportLayerNoFallback)
+TEST_SUITE("NeonRuntime")
+{
+TEST_CASE("RuntimeValidateCpuAccDeviceSupportLayerNoFallback")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -30,17 +30,17 @@
 
     std::vector<armnn::BackendId> backends = { armnn::Compute::CpuAcc };
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Load it into the runtime. It should success.
     armnn::NetworkId netId;
-    BOOST_TEST(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
+    CHECK(runtime->LoadNetwork(netId, std::move(optNet)) == armnn::Status::Success);
 }
 
 #ifdef ARMNN_LEAK_CHECKING_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuAcc)
+TEST_CASE("RuntimeMemoryLeaksCpuAcc")
 {
-    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+    CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
     armnn::IRuntime::CreationOptions options;
     armnn::RuntimeImpl runtime(options);
     armnn::RuntimeLoadedNetworksReserve(&runtime);
@@ -54,21 +54,21 @@
 
     {
         ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuAcc");
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
         // In the second run we check for all remaining memory
         // in use after the network was unloaded. If there is any
         // then it will be treated as a memory leak.
         CreateAndDropDummyNetwork(backends, runtime);
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
-        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
-        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
     }
 }
 #endif
 
-BOOST_AUTO_TEST_CASE(ProfilingPostOptimisationStructureCpuAcc)
+TEST_CASE("ProfilingPostOptimisationStructureCpuAcc")
 {
     VerifyPostOptimisationStructureTestImpl(armnn::Compute::CpuAcc);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index 0e24e95..eabf3c8 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -15,13 +15,14 @@
 #include <arm_compute/runtime/Allocator.h>
 #include <backendsCommon/test/CommonTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include <armnn/utility/Assert.hpp>
 
-BOOST_AUTO_TEST_SUITE(NeonTensorHandleTests)
+TEST_SUITE("NeonTensorHandleTests")
+{
 using namespace armnn;
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesNoPadding)
+TEST_CASE("NeonTensorHandleGetCapabilitiesNoPadding")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
     NeonTensorHandleFactory handleFactory(memoryManager);
@@ -43,18 +44,18 @@
     std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
                                                                          softmax,
                                                                          CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 
     // No padding required for Softmax
     capabilities = handleFactory.GetCapabilities(softmax, output, CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 
     // No padding required for output
     capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 }
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleGetCapabilitiesPadding)
+TEST_CASE("NeonTensorHandleGetCapabilitiesPadding")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
     NeonTensorHandleFactory handleFactory(memoryManager);
@@ -75,20 +76,20 @@
     std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
                                                                          pooling,
                                                                          CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 
     // No padding required for output
     capabilities = handleFactory.GetCapabilities(output, nullptr, CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.empty());
+    CHECK(capabilities.empty());
 
     // Padding required for Pooling2d
     capabilities = handleFactory.GetCapabilities(pooling, output, CapabilityClass::PaddingRequired);
-    BOOST_TEST(capabilities.size() == 1);
-    BOOST_TEST((capabilities[0].m_CapabilityClass == CapabilityClass::PaddingRequired));
-    BOOST_TEST(capabilities[0].m_Value);
+    CHECK(capabilities.size() == 1);
+    CHECK((capabilities[0].m_CapabilityClass == CapabilityClass::PaddingRequired));
+    CHECK(capabilities[0].m_Value);
 }
 
-BOOST_AUTO_TEST_CASE(ConcatOnXorYSubTensorsNoPaddingRequiredTest)
+TEST_CASE("ConcatOnXorYSubTensorsNoPaddingRequiredTest")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -163,7 +164,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(ConcatonXorYPaddingRequiredTest)
+TEST_CASE("ConcatonXorYPaddingRequiredTest")
 {
     armnn::INetworkPtr net(armnn::INetwork::Create());
 
@@ -246,7 +247,7 @@
     ARMNN_ASSERT(numberOfSubTensors == 0);
 }
 
-BOOST_AUTO_TEST_CASE(SplitteronXorYNoPaddingRequiredTest)
+TEST_CASE("SplitteronXorYNoPaddingRequiredTest")
 {
     using namespace armnn;
 
@@ -443,14 +444,14 @@
         std::vector<float> out = outputStorage.at(it.first);
         for (unsigned int i = 0; i < out.size(); ++i)
         {
-            BOOST_CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
+            CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
                     "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
 
         }
     }
 }
 
-BOOST_AUTO_TEST_CASE(SplitteronXorYPaddingRequiredTest)
+TEST_CASE("SplitteronXorYPaddingRequiredTest")
 {
     using namespace armnn;
 
@@ -618,14 +619,14 @@
         std::vector<float> out = outputStorage.at(it.first);
         for (unsigned int i = 0; i < out.size(); ++i)
         {
-            BOOST_CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
+            CHECK_MESSAGE(Compare<armnn::DataType::Float32>(it.second[i], out[i], tolerance) == true,
                     "Actual output: " << out[i] << ". Expected output:" << it.second[i]);
 
         }
     }
 }
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryMemoryManaged)
+TEST_CASE("NeonTensorHandleFactoryMemoryManaged")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(
         std::make_unique<arm_compute::Allocator>(),
@@ -641,31 +642,31 @@
     memoryManager->Acquire();
     {
         float* buffer = reinterpret_cast<float*>(handle->Map());
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
         buffer[0] = 1.5f;
         buffer[1] = 2.5f;
-        BOOST_CHECK(buffer[0] == 1.5f); // Memory is writable and readable
-        BOOST_CHECK(buffer[1] == 2.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 1.5f); // Memory is writable and readable
+        CHECK(buffer[1] == 2.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 
     memoryManager->Acquire();
     {
         float* buffer = reinterpret_cast<float*>(handle->Map());
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
         buffer[0] = 3.5f;
         buffer[1] = 4.5f;
-        BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable
-        BOOST_CHECK(buffer[1] == 4.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 3.5f); // Memory is writable and readable
+        CHECK(buffer[1] == 4.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Cannot import as import is disabled
-    BOOST_CHECK_THROW(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc), MemoryImportException);
+    CHECK_THROWS_AS(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc), MemoryImportException);
 }
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleFactoryImport)
+TEST_CASE("NeonTensorHandleFactoryImport")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>(
         std::make_unique<arm_compute::Allocator>(),
@@ -680,25 +681,25 @@
     memoryManager->Acquire();
 
     // No buffer allocated when import is enabled
-    BOOST_CHECK((PolymorphicDowncast<NeonTensorHandle*>(handle.get()))->GetTensor().buffer() == nullptr);
+    CHECK((PolymorphicDowncast<NeonTensorHandle*>(handle.get()))->GetTensor().buffer() == nullptr);
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Correctly import
-    BOOST_CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
     float* buffer = reinterpret_cast<float*>(handle->Map());
-    BOOST_CHECK(buffer != nullptr); // Yields a valid pointer after import
-    BOOST_CHECK(buffer == testPtr); // buffer is pointing to testPtr
+    CHECK(buffer != nullptr); // Yields a valid pointer after import
+    CHECK(buffer == testPtr); // buffer is pointing to testPtr
     // Memory is writable and readable with correct value
-    BOOST_CHECK(buffer[0] == 2.5f);
-    BOOST_CHECK(buffer[1] == 5.5f);
+    CHECK(buffer[0] == 2.5f);
+    CHECK(buffer[1] == 5.5f);
     buffer[0] = 3.5f;
     buffer[1] = 10.0f;
-    BOOST_CHECK(buffer[0] == 3.5f);
-    BOOST_CHECK(buffer[1] == 10.0f);
+    CHECK(buffer[0] == 3.5f);
+    CHECK(buffer[1] == 10.0f);
     memoryManager->Release();
 }
 
-BOOST_AUTO_TEST_CASE(NeonTensorHandleSupportsInPlaceComputation)
+TEST_CASE("NeonTensorHandleSupportsInPlaceComputation")
 {
     std::shared_ptr<NeonMemoryManager> memoryManager = std::make_shared<NeonMemoryManager>();
     NeonTensorHandleFactory handleFactory(memoryManager);
@@ -707,4 +708,4 @@
     ARMNN_ASSERT(handleFactory.SupportsInPlaceComputation());
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/neon/test/NeonTimerTest.cpp b/src/backends/neon/test/NeonTimerTest.cpp
index df014d5..d2bb97c 100644
--- a/src/backends/neon/test/NeonTimerTest.cpp
+++ b/src/backends/neon/test/NeonTimerTest.cpp
@@ -18,23 +18,23 @@
 #include <backendsCommon/test/TensorCopyUtils.hpp>
 #include <backendsCommon/test/WorkloadTestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <cstdlib>
 #include <algorithm>
 
 using namespace armnn;
 
-BOOST_AUTO_TEST_SUITE(NeonTimerInstrument)
+TEST_SUITE("NeonTimerInstrument")
+{
 
-
-BOOST_AUTO_TEST_CASE(NeonTimerGetName)
+TEST_CASE("NeonTimerGetName")
 {
     NeonTimer neonTimer;
-    BOOST_CHECK_EQUAL(neonTimer.GetName(), "NeonKernelTimer");
+    CHECK_EQ(std::string(neonTimer.GetName()), "NeonKernelTimer");
 }
 
-BOOST_AUTO_TEST_CASE(NeonTimerMeasure)
+TEST_CASE("NeonTimerMeasure")
 {
     NeonWorkloadFactory workloadFactory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager());
@@ -95,19 +95,19 @@
 
     std::vector<Measurement> measurements = neonTimer.GetMeasurements();
 
-    BOOST_CHECK(measurements.size() <= 2);
+    CHECK(measurements.size() <= 2);
     if (measurements.size() > 1)
     {
-        BOOST_CHECK_EQUAL(measurements[0].m_Name, "NeonKernelTimer/0: NEFillBorderKernel");
-        BOOST_CHECK(measurements[0].m_Value > 0.0);
+        CHECK_EQ(measurements[0].m_Name, "NeonKernelTimer/0: NEFillBorderKernel");
+        CHECK(measurements[0].m_Value > 0.0);
     }
     std::ostringstream oss_neon;
     std::ostringstream oss_cpu;
     oss_neon << "NeonKernelTimer/" << measurements.size()-1 << ": NEActivationLayerKernel";
     oss_cpu << "NeonKernelTimer/" << measurements.size()-1 << ": CpuActivationKernel";
-    BOOST_CHECK(measurements[measurements.size()-1].m_Name == oss_neon.str() ||
-                measurements[measurements.size()-1].m_Name == oss_cpu.str());
-    BOOST_CHECK(measurements[measurements.size()-1].m_Value > 0.0);
+    CHECK((measurements[measurements.size()-1].m_Name == oss_neon.str() ||
+                measurements[measurements.size()-1].m_Name == oss_cpu.str()));
+    CHECK(measurements[measurements.size()-1].m_Value > 0.0);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/ArgMinMaxTests.cpp b/src/backends/reference/test/ArgMinMaxTests.cpp
index dce15b2..b79a108 100644
--- a/src/backends/reference/test/ArgMinMaxTests.cpp
+++ b/src/backends/reference/test/ArgMinMaxTests.cpp
@@ -5,11 +5,11 @@
 
 #include <reference/workloads/ArgMinMax.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefArgMinMax)
-
-BOOST_AUTO_TEST_CASE(ArgMinTest)
+TEST_SUITE("RefArgMinMax")
+{
+TEST_CASE("ArgMinTest")
 {
     const armnn::TensorInfo inputInfo({ 1, 2, 3 } , armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Signed64);
@@ -25,14 +25,11 @@
                armnn::ArgMinMaxFunction::Min,
                -2);
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputValues.begin(),
-                                  outputValues.end(),
-                                  expectedValues.begin(),
-                                  expectedValues.end());
+    CHECK(std::equal(outputValues.begin(), outputValues.end(), expectedValues.begin(), expectedValues.end()));
 
 }
 
-BOOST_AUTO_TEST_CASE(ArgMaxTest)
+TEST_CASE("ArgMaxTest")
 {
     const armnn::TensorInfo inputInfo({ 1, 2, 3 } , armnn::DataType::Float32);
     const armnn::TensorInfo outputInfo({ 1, 3 }, armnn::DataType::Signed64);
@@ -48,11 +45,8 @@
                armnn::ArgMinMaxFunction::Max,
                -2);
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(outputValues.begin(),
-                                  outputValues.end(),
-                                  expectedValues.begin(),
-                                  expectedValues.end());
+    CHECK(std::equal(outputValues.begin(), outputValues.end(), expectedValues.begin(), expectedValues.end()));
 
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/reference/test/CMakeLists.txt b/src/backends/reference/test/CMakeLists.txt
index c71c9d7..76541cf 100644
--- a/src/backends/reference/test/CMakeLists.txt
+++ b/src/backends/reference/test/CMakeLists.txt
@@ -24,3 +24,4 @@
 target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/backends)
 target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/src/profiling)
 target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/profiling/common/include)
+target_include_directories(armnnRefBackendUnitTests PRIVATE ${PROJECT_SOURCE_DIR}/third-party/doctest)
diff --git a/src/backends/reference/test/RefCreateWorkloadTests.cpp b/src/backends/reference/test/RefCreateWorkloadTests.cpp
index 0f86e7e..4293ef5 100644
--- a/src/backends/reference/test/RefCreateWorkloadTests.cpp
+++ b/src/backends/reference/test/RefCreateWorkloadTests.cpp
@@ -10,6 +10,8 @@
 #include <reference/RefWorkloadFactory.hpp>
 #include <reference/workloads/RefWorkloads.hpp>
 
+#include <doctest/doctest.h>
+
 namespace
 {
 
@@ -19,8 +21,8 @@
     auto queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
-    BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
+    CHECK((inputHandle->GetTensorInfo() == inputInfo));
+    CHECK((outputHandle->GetTensorInfo() == outputInfo));
 }
 
 template <typename Workload>
@@ -33,9 +35,9 @@
     auto inputHandle0     = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto inputHandle1     = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle    = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((inputHandle0->GetTensorInfo() == inputInfo0));
-    BOOST_TEST((inputHandle1->GetTensorInfo() == inputInfo1));
-    BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
+    CHECK((inputHandle0->GetTensorInfo() == inputInfo0));
+    CHECK((inputHandle1->GetTensorInfo() == inputInfo1));
+    CHECK((outputHandle->GetTensorInfo() == outputInfo));
 }
 
 armnn::RefWorkloadFactory GetFactory()
@@ -47,8 +49,8 @@
 
 }
 
-BOOST_AUTO_TEST_SUITE(CreateWorkloadRef)
-
+TEST_SUITE("CreateWorkloadRef")
+{
 template <typename ActivationWorkloadType, armnn::DataType DataType>
 static void RefCreateActivationWorkloadTest()
 {
@@ -62,12 +64,12 @@
         TensorInfo({ 1, 1 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateActivationFloat32Workload)
+TEST_CASE("CreateActivationFloat32Workload")
 {
     RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateActivationUint8Workload)
+TEST_CASE("CreateActivationUint8Workload")
 {
     RefCreateActivationWorkloadTest<RefActivationWorkload, armnn::DataType::QAsymmU8>();
 }
@@ -89,7 +91,7 @@
         TensorInfo({ 2, 3 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionWorkloadWithBlobTest)
+TEST_CASE("CreateSubtractionWorkloadWithBlobTest")
 {
     Graph graph;
     RefWorkloadFactory factory = GetFactory();
@@ -106,7 +108,7 @@
         TensorInfo({ 2, 3 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionWorkloadWithBlobTest)
+TEST_CASE("CreateAdditionWorkloadWithBlobTest")
 {
     Graph graph;
     RefWorkloadFactory factory = GetFactory();
@@ -122,7 +124,7 @@
         TensorInfo({ 2, 3 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationWorkloadWithBlobTest)
+TEST_CASE("CreateMultiplicationWorkloadWithBlobTest")
 {
     Graph              graph;
     RefWorkloadFactory factory  = GetFactory();
@@ -138,7 +140,7 @@
                       TensorInfo({2, 3}, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
+TEST_CASE("CreateAdditionFloatWorkload")
 {
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
         AdditionQueueDescriptor,
@@ -146,7 +148,7 @@
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionUint8Workload)
+TEST_CASE("CreateAdditionUint8Workload")
 {
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
         AdditionQueueDescriptor,
@@ -154,7 +156,7 @@
         armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionInt16Workload)
+TEST_CASE("CreateAdditionInt16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<>,
         AdditionQueueDescriptor,
@@ -162,7 +164,7 @@
         armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateAdditionInt32Workload)
+TEST_CASE("CreateAdditionInt32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefAdditionWorkload<int32_t>,
             AdditionQueueDescriptor,
@@ -170,7 +172,7 @@
             armnn::DataType::Signed32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat32Workload)
+TEST_CASE("CreateSubtractionFloat32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
@@ -178,7 +180,7 @@
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionFloat16Workload)
+TEST_CASE("CreateSubtractionFloat16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
@@ -186,7 +188,7 @@
         armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionUint8Workload)
+TEST_CASE("CreateSubtractionUint8Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
@@ -194,7 +196,7 @@
         armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionInt16Workload)
+TEST_CASE("CreateSubtractionInt16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<>,
         SubtractionQueueDescriptor,
@@ -202,7 +204,7 @@
         armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSubtractionInt32Workload)
+TEST_CASE("CreateSubtractionInt32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefSubtractionWorkload<int32_t>,
             SubtractionQueueDescriptor,
@@ -210,7 +212,7 @@
             armnn::DataType::Signed32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationFloatWorkload)
+TEST_CASE("CreateMultiplicationFloatWorkload")
 {
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
         MultiplicationQueueDescriptor,
@@ -218,7 +220,7 @@
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationUint8Workload)
+TEST_CASE("CreateMultiplicationUint8Workload")
 {
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
         MultiplicationQueueDescriptor,
@@ -226,7 +228,7 @@
         armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationInt16Workload)
+TEST_CASE("CreateMultiplicationInt16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<>,
         MultiplicationQueueDescriptor,
@@ -234,7 +236,7 @@
         armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateMultiplicationInt32Workload)
+TEST_CASE("CreateMultiplicationInt32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefMultiplicationWorkload<int32_t>,
             MultiplicationQueueDescriptor,
@@ -242,7 +244,7 @@
             armnn::DataType::Signed32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloat32Workload)
+TEST_CASE("CreateDivisionFloat32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
@@ -250,7 +252,7 @@
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionFloat16Workload)
+TEST_CASE("CreateDivisionFloat16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
@@ -258,7 +260,7 @@
         armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionUint8Workload)
+TEST_CASE("CreateDivisionUint8Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
@@ -266,7 +268,7 @@
         armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionInt16Workload)
+TEST_CASE("CreateDivisionInt16Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<>,
         DivisionQueueDescriptor,
@@ -274,7 +276,7 @@
         armnn::DataType::QSymmS16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateDivisionInt32Workload)
+TEST_CASE("CreateDivisionInt32Workload")
 {
     RefCreateElementwiseWorkloadTest<RefDivisionWorkload<int32_t>,
             DivisionQueueDescriptor,
@@ -311,7 +313,7 @@
     CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationWithBlobFloat32Workload)
+TEST_CASE("CreateBatchNormalizationWithBlobFloat32Workload")
 {
     Graph graph;
     RefWorkloadFactory factory = GetFactory();
@@ -329,55 +331,55 @@
     CheckInputOutput(std::move(workload), TensorInfo(inputShape, dataType), TensorInfo(outputShape, dataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32Workload)
+TEST_CASE("CreateBatchNormalizationFloat32Workload")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float32>
             (DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat32WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationFloat32WorkloadNhwc")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float32>
             (DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16Workload)
+TEST_CASE("CreateBatchNormalizationFloat16Workload")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload,armnn::DataType::Float16>
             (DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationFloat16WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationFloat16WorkloadNhwc")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::Float16>
             (DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8Workload)
+TEST_CASE("CreateBatchNormalizationUint8Workload")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
             (DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationUint8WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationUint8WorkloadNhwc")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QAsymmU8>
             (DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16Workload)
+TEST_CASE("CreateBatchNormalizationInt16Workload")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
             (DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchNormalizationInt16WorkloadNhwc)
+TEST_CASE("CreateBatchNormalizationInt16WorkloadNhwc")
 {
     RefCreateBatchNormalizationWorkloadTest<RefBatchNormalizationWorkload, armnn::DataType::QSymmS16>
             (DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvertFp16ToFp32Float32Workload)
+TEST_CASE("CreateConvertFp16ToFp32Float32Workload")
 {
     Graph                graph;
     RefWorkloadFactory factory = GetFactory();
@@ -388,7 +390,7 @@
         std::move(workload), TensorInfo({1, 3, 2, 3}, DataType::Float16), TensorInfo({1, 3, 2, 3}, DataType::Float32));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvertFp32ToFp16Float16Workload)
+TEST_CASE("CreateConvertFp32ToFp16Float16Workload")
 {
     Graph                graph;
     RefWorkloadFactory factory = GetFactory();
@@ -417,17 +419,17 @@
                      TensorInfo(outputShape, DataType::Float32));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNchwWorkload)
+TEST_CASE("CreateConvolution2dFloatNchwWorkload")
 {
     RefCreateConvolution2dWorkloadTest(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dFloatNhwcWorkload)
+TEST_CASE("CreateConvolution2dFloatNhwcWorkload")
 {
     RefCreateConvolution2dWorkloadTest(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConvolution2dWithBlobWorkload)
+TEST_CASE("CreateConvolution2dWithBlobWorkload")
 {
     DataLayout dataLayout = DataLayout::NHWC;
     Graph graph;
@@ -464,12 +466,12 @@
                      TensorInfo(outputShape, DataType::Float32));
 }
 
-BOOST_AUTO_TEST_CASE(CreateDepthwiseConvolutionFloat32NhwcWorkload)
+TEST_CASE("CreateDepthwiseConvolutionFloat32NhwcWorkload")
 {
     RefCreateDepthwiseConvolutionWorkloadTest(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefCreateFullyConnectedWithBlobWorkloadTest)
+TEST_CASE("RefCreateFullyConnectedWithBlobWorkloadTest")
 {
     Graph graph;
     RefWorkloadFactory factory = GetFactory();
@@ -499,17 +501,17 @@
         TensorInfo({ 3, 7 }, DataType, outputQScale));
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadFloat32)
+TEST_CASE("CreateFullyConnectedWorkloadFloat32")
 {
     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedAsymm8)
+TEST_CASE("CreateFullyConnectedWorkloadQuantisedAsymm8")
 {
     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateFullyConnectedWorkloadQuantisedSymm16)
+TEST_CASE("CreateFullyConnectedWorkloadQuantisedSymm16")
 {
     RefCreateFullyConnectedWorkloadTest<RefFullyConnectedWorkload, armnn::DataType::QSymmS16>();
 }
@@ -541,32 +543,32 @@
     CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NchwWorkload)
+TEST_CASE("CreateRefNormalizationFloat32NchwWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationFloat32NhwcWorkload)
+TEST_CASE("CreateRefNormalizationFloat32NhwcWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NchwWorkload)
+TEST_CASE("CreateRefNormalizationUint8NchwWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationUint8NhwcWorkload)
+TEST_CASE("CreateRefNormalizationUint8NhwcWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NchwWorkload)
+TEST_CASE("CreateRefNormalizationInt16NchwWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateRefNormalizationInt16NhwcWorkload)
+TEST_CASE("CreateRefNormalizationInt16NhwcWorkload")
 {
     RefCreateNormalizationWorkloadTest<RefNormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
 }
@@ -599,32 +601,32 @@
                      TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32Workload)
+TEST_CASE("CreatePooling2dFloat32Workload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dFloat32NhwcWorkload)
+TEST_CASE("CreatePooling2dFloat32NhwcWorkload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8Workload)
+TEST_CASE("CreatePooling2dUint8Workload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dUint8NhwcWorkload)
+TEST_CASE("CreatePooling2dUint8NhwcWorkload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dInt16Workload)
+TEST_CASE("CreatePooling2dInt16Workload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePooling2dInt16NhwcWorkload)
+TEST_CASE("CreatePooling2dInt16NhwcWorkload")
 {
     RefCreatePooling2dWorkloadTest<RefPooling2dWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
 }
@@ -655,22 +657,22 @@
         tensorInfo);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat32Workload)
+TEST_CASE("CreateSoftmaxFloat32Workload")
 {
     RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxFloat16Workload)
+TEST_CASE("CreateSoftmaxFloat16Workload")
 {
     RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedAsymm8Workload)
+TEST_CASE("CreateSoftmaxQuantisedAsymm8Workload")
 {
     RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSoftmaxQuantisedSymm16Workload)
+TEST_CASE("CreateSoftmaxQuantisedSymm16Workload")
 {
     RefCreateSoftmaxWorkloadTest<RefSoftmaxWorkload, armnn::DataType::QSymmS16>();
 }
@@ -685,29 +687,29 @@
     // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
     SplitterQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
+    CHECK((inputHandle->GetTensorInfo() == TensorInfo({ 5, 7, 7 }, DataType)));
 
     auto outputHandle0 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
+    CHECK((outputHandle0->GetTensorInfo() == TensorInfo({ 1, 7, 7 }, DataType)));
 
     auto outputHandle1 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
+    CHECK((outputHandle1->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
 
     auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
-    BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
+    CHECK((outputHandle2->GetTensorInfo() == TensorInfo({ 2, 7, 7 }, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat32Workload)
+TEST_CASE("CreateSplitterFloat32Workload")
 {
     RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterFloat16Workload)
+TEST_CASE("CreateSplitterFloat16Workload")
 {
     RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterUint8Workload)
+TEST_CASE("CreateSplitterUint8Workload")
 {
     RefCreateSplitterWorkloadTest<RefSplitterWorkload, armnn::DataType::QAsymmU8>();
 }
@@ -735,27 +737,27 @@
     armnn::RefTensorHandle* mIn0 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[0]);
     armnn::RefTensorHandle* mIn1 = dynamic_cast<armnn::RefTensorHandle*>(wlConcat->GetData().m_Inputs[1]);
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(mIn0);
-    BOOST_TEST(mIn1);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(mIn0);
+    CHECK(mIn1);
 
     bool validDataPointers = (sOut0 == mIn1) && (sOut1 == mIn0);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat32)
+TEST_CASE("CreateSplitterConcatFloat32")
 {
     RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatFloat16)
+TEST_CASE("CreateSplitterConcatFloat16")
 {
     RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSplitterConcatUint8)
+TEST_CASE("CreateSplitterConcatUint8")
 {
     RefCreateSplitterConcatWorkloadTest<RefSplitterWorkload, RefConcatWorkload, DataType::QAsymmU8>();
 }
@@ -785,26 +787,26 @@
     armnn::RefTensorHandle* activ1_1Im = dynamic_cast<armnn::RefTensorHandle*>(wlActiv1_1->GetData().m_Inputs[0]);
 
 
-    BOOST_TEST(sOut0);
-    BOOST_TEST(sOut1);
-    BOOST_TEST(activ0_0Im);
-    BOOST_TEST(activ0_1Im);
-    BOOST_TEST(activ1_0Im);
-    BOOST_TEST(activ1_1Im);
+    CHECK(sOut0);
+    CHECK(sOut1);
+    CHECK(activ0_0Im);
+    CHECK(activ0_1Im);
+    CHECK(activ1_0Im);
+    CHECK(activ1_1Im);
 
     bool validDataPointers = (sOut0 == activ0_0Im) && (sOut0 == activ0_1Im) &&
                              (sOut1 == activ1_0Im) && (sOut1 == activ1_1Im);
 
-    BOOST_TEST(validDataPointers);
+    CHECK(validDataPointers);
 }
 
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsFloat32)
+TEST_CASE("CreateSingleOutputMultipleInputsFloat32")
 {
     RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
         armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSingleOutputMultipleInputsUint8)
+TEST_CASE("CreateSingleOutputMultipleInputsUint8")
 {
     RefCreateSingleOutputMultipleInputsTest<RefSplitterWorkload, RefActivationWorkload,
         armnn::DataType::QAsymmU8>();
@@ -838,27 +840,27 @@
                      TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32)
+TEST_CASE("CreateResizeBilinearFloat32")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat16)
+TEST_CASE("CreateResizeBilinearFloat16")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearUint8)
+TEST_CASE("CreateResizeBilinearUint8")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearQuantisedAsymm16)
+TEST_CASE("CreateResizeBilinearQuantisedAsymm16")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateResizeBilinearFloat32Nhwc)
+TEST_CASE("CreateResizeBilinearFloat32Nhwc")
 {
     RefCreateResizeBilinearTest<RefResizeWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
@@ -876,22 +878,22 @@
                      TensorInfo({ 1, 1, 1, 1 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat32)
+TEST_CASE("CreateBatchToSpaceNdFloat32")
 {
     RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdFloat16)
+TEST_CASE("CreateBatchToSpaceNdFloat16")
 {
     RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdUint8)
+TEST_CASE("CreateBatchToSpaceNdUint8")
 {
     RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateBatchToSpaceNdQSymm16)
+TEST_CASE("CreateBatchToSpaceNdQSymm16")
 {
     RefCreateBatchToSpaceNdTest<RefBatchToSpaceNdWorkload, armnn::DataType::QSymmS16>();
 }
@@ -924,32 +926,32 @@
     CheckInputOutput(std::move(workload), TensorInfo(inputShape, DataType), TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32)
+TEST_CASE("CreateL2NormalizationFloat32")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationFloat32Nhwc)
+TEST_CASE("CreateL2NormalizationFloat32Nhwc")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::Float32>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16)
+TEST_CASE("CreateL2NormalizationInt16")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationInt16Nhwc)
+TEST_CASE("CreateL2NormalizationInt16Nhwc")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QSymmS16>(DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8)
+TEST_CASE("CreateL2NormalizationUint8")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(CreateL2NormalizationUint8Nhwc)
+TEST_CASE("CreateL2NormalizationUint8Nhwc")
 {
     RefCreateL2NormalizationTest<RefL2NormalizationWorkload, armnn::DataType::QAsymmU8>(DataLayout::NHWC);
 }
@@ -968,17 +970,17 @@
         TensorInfo({ 1, 4 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadFloat32)
+TEST_CASE("CreateReshapeWorkloadFloat32")
 {
     RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedAsymm8)
+TEST_CASE("CreateReshapeWorkloadQuantisedAsymm8")
 {
     RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateReshapeWorkloadQuantisedSymm16)
+TEST_CASE("CreateReshapeWorkloadQuantisedSymm16")
 {
     RefCreateReshapeWorkloadTest<RefReshapeWorkload, armnn::DataType::QSymmS16>();
 }
@@ -997,52 +999,52 @@
                       TensorInfo(outputShape, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
+TEST_CASE("CreateConcatDim0Float32Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Float16Workload)
+TEST_CASE("CreateConcatDim0Float16Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float16>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint8Workload)
+TEST_CASE("CreateConcatDim0Uint8Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim0Uint16Workload)
+TEST_CASE("CreateConcatDim0Uint16Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QSymmS16>({ 4, 3, 2, 5 }, 0);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Float32Workload)
+TEST_CASE("CreateConcatDim1Float32Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim1Uint8Workload)
+TEST_CASE("CreateConcatDim1Uint8Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 6, 2, 5 }, 1);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim2Float32Workload)
+TEST_CASE("CreateConcatDim2Float32Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 4, 5 }, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim2Uint8Workload)
+TEST_CASE("CreateConcatDim2Uint8Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 4, 5 }, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Float32Workload)
+TEST_CASE("CreateConcatDim3Float32Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 }, 3);
 }
 
-BOOST_AUTO_TEST_CASE(CreateConcatDim3Uint8Workload)
+TEST_CASE("CreateConcatDim3Uint8Workload")
 {
     RefCreateConcatWorkloadTest<RefConcatWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 }, 3);
 }
@@ -1057,25 +1059,25 @@
     // Check output is as expected
     auto queueDescriptor = workload->GetData();
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
+    CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateConstantUint8Workload)
+TEST_CASE("CreateConstantUint8Workload")
 {
     RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QAsymmU8>({ 2, 3, 2, 10 });
 }
 
-BOOST_AUTO_TEST_CASE(CreateConstantInt16Workload)
+TEST_CASE("CreateConstantInt16Workload")
 {
     RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::QSymmS16>({ 2, 3, 2, 10 });
 }
 
-BOOST_AUTO_TEST_CASE(CreateConstantFloat32Workload)
+TEST_CASE("CreateConstantFloat32Workload")
 {
     RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Float32>({ 2, 3, 2, 10 });
 }
 
-BOOST_AUTO_TEST_CASE(CreateConstantSigned32Workload)
+TEST_CASE("CreateConstantSigned32Workload")
 {
     RefCreateConstantWorkloadTest<RefConstantWorkload, armnn::DataType::Signed32>({ 2, 3, 2, 10 });
 }
@@ -1097,53 +1099,53 @@
     // Check output is as expected
     auto queueDescriptor = workload->GetData();
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
+    CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, dataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat32Workload)
+TEST_CASE("CreatePreluFloat32Workload")
 {
     RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float32);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat16Workload)
+TEST_CASE("CreatePreluFloat16Workload")
 {
     RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::Float16);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluUint8Workload)
+TEST_CASE("CreatePreluUint8Workload")
 {
     RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QAsymmU8);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluInt16Workload)
+TEST_CASE("CreatePreluInt16Workload")
 {
     RefCreatePreluWorkloadTest({ 1, 4, 1, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 }, armnn::DataType::QSymmS16);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat32NoBroadcastWorkload)
+TEST_CASE("CreatePreluFloat32NoBroadcastWorkload")
 {
-    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+    CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
                                                  armnn::DataType::Float32),
                       armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluFloat16NoBroadcastWorkload)
+TEST_CASE("CreatePreluFloat16NoBroadcastWorkload")
 {
-    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+    CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
                                                  armnn::DataType::Float16),
                       armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluUint8NoBroadcastWorkload)
+TEST_CASE("CreatePreluUint8NoBroadcastWorkload")
 {
-    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+    CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
                                                  armnn::DataType::QAsymmU8),
                       armnn::InvalidArgumentException);
 }
 
-BOOST_AUTO_TEST_CASE(CreatePreluInt16NoBroadcastWorkload)
+TEST_CASE("CreatePreluInt16NoBroadcastWorkload")
 {
-    BOOST_CHECK_THROW(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
+    CHECK_THROWS_AS(RefCreatePreluWorkloadTest({ 1, 4, 7, 2 }, { 5, 4, 3, 1 }, { 5, 4, 3, 2 },
                                                  armnn::DataType::QSymmS16),
                       armnn::InvalidArgumentException);
 }
@@ -1161,22 +1163,22 @@
                      TensorInfo({ 1, 1, 1, 4 }, DataType));
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat32)
+TEST_CASE("CreateSpaceToDepthWorkloadFloat32")
 {
     RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float32>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadFloat16)
+TEST_CASE("CreateSpaceToDepthWorkloadFloat16")
 {
     RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::Float16>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQASymm8)
+TEST_CASE("CreateSpaceToDepthWorkloadQASymm8")
 {
     RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QAsymmU8>();
 }
 
-BOOST_AUTO_TEST_CASE(CreateSpaceToDepthWorkloadQSymm16)
+TEST_CASE("CreateSpaceToDepthWorkloadQSymm16")
 {
     RefCreateSpaceToDepthWorkloadTest<RefSpaceToDepthWorkload, armnn::DataType::QSymmS16>();
 }
@@ -1201,23 +1203,23 @@
     for (unsigned int i = 0; i < numInputs; ++i)
     {
         auto inputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Inputs[i]);
-        BOOST_TEST((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
+        CHECK((inputHandle->GetTensorInfo() == TensorInfo(inputShape, DataType)));
     }
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
+    CHECK((outputHandle->GetTensorInfo() == TensorInfo(outputShape, DataType)));
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
+TEST_CASE("CreateStackFloat32Workload")
 {
     RefCreateStackWorkloadTest<armnn::DataType::Float32>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackUint8Workload)
+TEST_CASE("CreateStackUint8Workload")
 {
     RefCreateStackWorkloadTest<armnn::DataType::QAsymmU8>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
 
-BOOST_AUTO_TEST_CASE(CreateStackUint16Workload)
+TEST_CASE("CreateStackUint16Workload")
 {
     RefCreateStackWorkloadTest<armnn::DataType::QSymmS16>({ 3, 4, 5 }, { 3, 4, 2, 5 }, 2, 2);
 }
@@ -1241,14 +1243,14 @@
     auto cellStateOutHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[1]);
     auto outputHandle = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor.m_Outputs[2]);
 
-    BOOST_TEST((inputHandle->GetTensorInfo() == inputInfo));
-    BOOST_TEST((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
-    BOOST_TEST((outputHandle->GetTensorInfo() == outputInfo));
+    CHECK((inputHandle->GetTensorInfo() == inputInfo));
+    CHECK((cellStateOutHandle->GetTensorInfo() == cellStateInfo));
+    CHECK((outputHandle->GetTensorInfo() == outputInfo));
 }
 
-BOOST_AUTO_TEST_CASE(CreateQLstmWorkload)
+TEST_CASE("CreateQLstmWorkload")
 {
     RefCreateQLstmWorkloadTest<RefQLstmWorkload>();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefDetectionPostProcessTests.cpp b/src/backends/reference/test/RefDetectionPostProcessTests.cpp
index fab6e00..763578b 100644
--- a/src/backends/reference/test/RefDetectionPostProcessTests.cpp
+++ b/src/backends/reference/test/RefDetectionPostProcessTests.cpp
@@ -8,46 +8,46 @@
 #include <armnn/Descriptors.hpp>
 #include <armnn/Types.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefDetectionPostProcess)
-
-BOOST_AUTO_TEST_CASE(TopKSortTest)
+TEST_SUITE("RefDetectionPostProcess")
+{
+TEST_CASE("TopKSortTest")
 {
     unsigned int k = 3;
     unsigned int indices[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
     float values[8] = { 0, 7, 6, 5, 4, 3, 2, 500 };
     armnn::TopKSort(k, indices, values, 8);
-    BOOST_TEST(indices[0] == 7);
-    BOOST_TEST(indices[1] == 1);
-    BOOST_TEST(indices[2] == 2);
+    CHECK(indices[0] == 7);
+    CHECK(indices[1] == 1);
+    CHECK(indices[2] == 2);
 }
 
-BOOST_AUTO_TEST_CASE(FullTopKSortTest)
+TEST_CASE("FullTopKSortTest")
 {
     unsigned int k = 8;
     unsigned int indices[8] = { 0, 1, 2, 3, 4, 5, 6, 7 };
     float values[8] = { 0, 7, 6, 5, 4, 3, 2, 500 };
     armnn::TopKSort(k, indices, values, 8);
-    BOOST_TEST(indices[0] == 7);
-    BOOST_TEST(indices[1] == 1);
-    BOOST_TEST(indices[2] == 2);
-    BOOST_TEST(indices[3] == 3);
-    BOOST_TEST(indices[4] == 4);
-    BOOST_TEST(indices[5] == 5);
-    BOOST_TEST(indices[6] == 6);
-    BOOST_TEST(indices[7] == 0);
+    CHECK(indices[0] == 7);
+    CHECK(indices[1] == 1);
+    CHECK(indices[2] == 2);
+    CHECK(indices[3] == 3);
+    CHECK(indices[4] == 4);
+    CHECK(indices[5] == 5);
+    CHECK(indices[6] == 6);
+    CHECK(indices[7] == 0);
 }
 
-BOOST_AUTO_TEST_CASE(IouTest)
+TEST_CASE("IouTest")
 {
     float boxI[4] = { 0.0f, 0.0f, 10.0f, 10.0f };
     float boxJ[4] = { 1.0f, 1.0f, 11.0f, 11.0f };
     float iou = armnn::IntersectionOverUnion(boxI, boxJ);
-    BOOST_TEST(iou == 0.68, boost::test_tools::tolerance(0.001));
+    CHECK(iou == doctest::Approx(0.68).epsilon(0.001f));
 }
 
-BOOST_AUTO_TEST_CASE(NmsFunction)
+TEST_CASE("NmsFunction")
 {
     std::vector<float> boxCorners({
         0.0f, 0.0f, 1.0f, 1.0f,
@@ -63,10 +63,10 @@
     std::vector<unsigned int> result =
         armnn::NonMaxSuppression(6, boxCorners, scores, 0.0, 3, 0.5);
 
-    BOOST_TEST(result.size() == 3);
-    BOOST_TEST(result[0] == 3);
-    BOOST_TEST(result[1] == 0);
-    BOOST_TEST(result[2] == 5);
+    CHECK(result.size() == 3);
+    CHECK(result[0] == 3);
+    CHECK(result[1] == 0);
+    CHECK(result[2] == 5);
 }
 
 void DetectionPostProcessTestImpl(bool useRegularNms,
@@ -149,28 +149,22 @@
                                 detectionScores.data(),
                                 numDetections.data());
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(detectionBoxes.begin(),
+    CHECK(std::equal(detectionBoxes.begin(),
                                   detectionBoxes.end(),
                                   expectedDetectionBoxes.begin(),
-                                  expectedDetectionBoxes.end());
+                                  expectedDetectionBoxes.end()));
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(detectionScores.begin(),
-                                  detectionScores.end(),
-                                  expectedDetectionScores.begin(),
-                                  expectedDetectionScores.end());
+    CHECK(std::equal(detectionScores.begin(), detectionScores.end(),
+        expectedDetectionScores.begin(), expectedDetectionScores.end()));
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(detectionClasses.begin(),
-                                  detectionClasses.end(),
-                                  expectedDetectionClasses.begin(),
-                                  expectedDetectionClasses.end());
+    CHECK(std::equal(detectionClasses.begin(), detectionClasses.end(),
+        expectedDetectionClasses.begin(), expectedDetectionClasses.end()));
 
-    BOOST_CHECK_EQUAL_COLLECTIONS(numDetections.begin(),
-                                  numDetections.end(),
-                                  expectedNumDetections.begin(),
-                                  expectedNumDetections.end());
+    CHECK(std::equal(numDetections.begin(), numDetections.end(),
+        expectedNumDetections.begin(), expectedNumDetections.end()));
 }
 
-BOOST_AUTO_TEST_CASE(RegularNmsDetectionPostProcess)
+TEST_CASE("RegularNmsDetectionPostProcess")
 {
     std::vector<float> expectedDetectionBoxes({
         0.0f, 10.0f, 1.0f, 11.0f,
@@ -186,7 +180,7 @@
                                  expectedDetectionScores, expectedNumDetections);
 }
 
-BOOST_AUTO_TEST_CASE(FastNmsDetectionPostProcess)
+TEST_CASE("FastNmsDetectionPostProcess")
 {
     std::vector<float> expectedDetectionBoxes({
         0.0f, 10.0f, 1.0f, 11.0f,
@@ -201,4 +195,4 @@
                                  expectedDetectionScores, expectedNumDetections);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/reference/test/RefEndToEndTests.cpp b/src/backends/reference/test/RefEndToEndTests.cpp
index 910df29..69a2048 100644
--- a/src/backends/reference/test/RefEndToEndTests.cpp
+++ b/src/backends/reference/test/RefEndToEndTests.cpp
@@ -28,14 +28,14 @@
 #include <backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp>
 #include <backendsCommon/test/TransposeConvolution2dEndToEndTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefEndToEnd)
-
+TEST_SUITE("RefEndToEnd")
+{
 std::vector<armnn::BackendId> defaultBackends = {armnn::Compute::CpuRef};
 
 // Abs
-BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestFloat32)
+TEST_CASE("RefAbsEndToEndTestFloat32")
 {
     std::vector<float> expectedOutput =
     {
@@ -48,7 +48,7 @@
                                                              expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestUint8)
+TEST_CASE("RefAbsEndToEndTestUint8")
 {
     // Note the expected output will be implicitly quantized by the below test function
     std::vector<float> expectedOutput =
@@ -62,7 +62,7 @@
                                                                      expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefAbsEndToEndTestInt16)
+TEST_CASE("RefAbsEndToEndTestInt16")
 {
     // Note the expected output will be implicitly quantized by the below test function
     std::vector<float> expectedOutput =
@@ -77,17 +77,17 @@
 }
 
 // Constant
-BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Float32)
+TEST_CASE("ConstantUsage_Ref_Float32")
 {
-    BOOST_TEST(ConstantUsageFloat32Test(defaultBackends));
+    CHECK(ConstantUsageFloat32Test(defaultBackends));
 }
 
-BOOST_AUTO_TEST_CASE(ConstantUsage_Ref_Uint8)
+TEST_CASE("ConstantUsage_Ref_Uint8")
 {
-    BOOST_TEST(ConstantUsageUint8Test(defaultBackends));
+    CHECK(ConstantUsageUint8Test(defaultBackends));
 }
 
-BOOST_AUTO_TEST_CASE(Unsigned8)
+TEST_CASE("Unsigned8")
 {
     using namespace armnn;
 
@@ -122,7 +122,7 @@
     // Loads it into the runtime.
     NetworkId netId;
     auto error = runtime->LoadNetwork(netId, std::move(optNet));
-    BOOST_TEST(error == Status::Success);
+    CHECK(error == Status::Success);
 
     // Creates structures for input & output.
     std::vector<uint8_t> inputData
@@ -144,14 +144,14 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results.
-    BOOST_TEST(outputData[0] == 0);
-    BOOST_TEST(outputData[1] == 0);
-    BOOST_TEST(outputData[2] == 0);
-    BOOST_TEST(outputData[3] == 255); // softmax has been saturated.
-    BOOST_TEST(outputData[4] == 0);
+    CHECK(outputData[0] == 0);
+    CHECK(outputData[1] == 0);
+    CHECK(outputData[2] == 0);
+    CHECK(outputData[3] == 255); // softmax has been saturated.
+    CHECK(outputData[4] == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TrivialAdd)
+TEST_CASE("TrivialAdd")
 {
     // This test was designed to match "AddTwo" in android nn/runtime/test/TestTrivialModel.cpp.
 
@@ -211,21 +211,21 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results
-    BOOST_TEST(outputData[0] == 101);
-    BOOST_TEST(outputData[1] == 202);
-    BOOST_TEST(outputData[2] == 303);
-    BOOST_TEST(outputData[3] == 404);
-    BOOST_TEST(outputData[4] == 505);
-    BOOST_TEST(outputData[5] == 606);
-    BOOST_TEST(outputData[6] == 707);
-    BOOST_TEST(outputData[7] == 808);
-    BOOST_TEST(outputData[8] == 909);
-    BOOST_TEST(outputData[9] == 1010);
-    BOOST_TEST(outputData[10] == 1111);
-    BOOST_TEST(outputData[11] == 1212);
+    CHECK(outputData[0] == 101);
+    CHECK(outputData[1] == 202);
+    CHECK(outputData[2] == 303);
+    CHECK(outputData[3] == 404);
+    CHECK(outputData[4] == 505);
+    CHECK(outputData[5] == 606);
+    CHECK(outputData[6] == 707);
+    CHECK(outputData[7] == 808);
+    CHECK(outputData[8] == 909);
+    CHECK(outputData[9] == 1010);
+    CHECK(outputData[10] == 1111);
+    CHECK(outputData[11] == 1212);
 }
 
-BOOST_AUTO_TEST_CASE(MultipleOutputs)
+TEST_CASE("MultipleOutputs")
 {
     using namespace armnn;
 
@@ -306,12 +306,12 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results.
-    BOOST_TEST(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
-    BOOST_TEST(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
-    BOOST_TEST(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
+    CHECK(output1Data == std::vector<float>({ 1.f, 1.f, 1.f, 1.f, 1.f, 0.f, -1.f, -1.f, 1.f, 1.f })); // ReLu1
+    CHECK(output2Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 6.f, 0.f, 0.f, 0.f, 3.f, 3.f })); // ReLu6
+    CHECK(output3Data == std::vector<float>({ 3.f, 5.f, 2.f, 3.f, 5.f, 2.f, 2.f, 2.f, 3.f, 3.f })); // [2, 5]
 }
 
-BOOST_AUTO_TEST_CASE(TrivialMin)
+TEST_CASE("TrivialMin")
 {
     using namespace armnn;
 
@@ -369,13 +369,13 @@
     runtime->EnqueueWorkload(netId, inputTensors, outputTensors);
 
     // Checks the results
-    BOOST_TEST(outputData[0] == 1);
-    BOOST_TEST(outputData[1] == 1);
-    BOOST_TEST(outputData[2] == 3);
-    BOOST_TEST(outputData[3] == 2);
+    CHECK(outputData[0] == 1);
+    CHECK(outputData[1] == 1);
+    CHECK(outputData[2] == 3);
+    CHECK(outputData[3] == 2);
 }
 
-BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndTest)
+TEST_CASE("RefEqualSimpleEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1,  0, 0, 0, 0,
                                                 0, 0, 0, 0,  1, 1, 1, 1 });
@@ -385,7 +385,7 @@
                                                        expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndTest)
+TEST_CASE("RefGreaterSimpleEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
@@ -395,7 +395,7 @@
                                                        expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefEqualSimpleEndToEndUint8Test)
+TEST_CASE("RefEqualSimpleEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 1, 1, 1, 1,  0, 0, 0, 0,
                                                 0, 0, 0, 0,  1, 1, 1, 1 });
@@ -405,7 +405,7 @@
                                                                expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefGreaterSimpleEndToEndUint8Test)
+TEST_CASE("RefGreaterSimpleEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 0, 0, 0,  1, 1, 1, 1,
                                                 0, 0, 0, 0,  0, 0, 0, 0 });
@@ -415,7 +415,7 @@
                                                                expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndTest)
+TEST_CASE("RefEqualBroadcastEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 1, 0, 1, 1, 0, 0,
                                                 0, 0, 0, 0, 0, 0 });
@@ -425,7 +425,7 @@
                                                           expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndTest)
+TEST_CASE("RefGreaterBroadcastEndToEndTest")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
@@ -435,7 +435,7 @@
                                                           expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefEqualBroadcastEndToEndUint8Test)
+TEST_CASE("RefEqualBroadcastEndToEndUint8Test")
 {
     const std::vector<uint8_t > expectedOutput({ 1, 0, 1, 1, 0, 0,
                                                  0, 0, 0, 0, 0, 0 });
@@ -445,7 +445,7 @@
                                                                   expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefGreaterBroadcastEndToEndUint8Test)
+TEST_CASE("RefGreaterBroadcastEndToEndUint8Test")
 {
     const std::vector<uint8_t> expectedOutput({ 0, 1, 0, 0, 0, 1,
                                                 1, 1, 1, 1, 1, 1 });
@@ -455,249 +455,249 @@
                                                                   expectedOutput);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndFloat32NHWCTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndUint8NHWCTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndQSymm16NHWCTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndFloat32NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndFloat32NCHWTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndUint8NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndUint8NCHWTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndQSymm16NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndQSymm16NCHWTest")
 {
     BatchToSpaceNdEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexFloat32NHWCTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexUint8NHWCTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexQSymm16NHWCTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexFloat32NCHWTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexUint8NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexUint8NCHWTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest)
+TEST_CASE("RefBatchToSpaceNdEndToEndComplexQSymm16NCHWTest")
 {
     BatchToSpaceNdComplexEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Test)
+TEST_CASE("RefConcatEndToEndDim0Test")
 {
     ConcatDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim0Uint8Test)
+TEST_CASE("RefConcatEndToEndDim0Uint8Test")
 {
     ConcatDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Test)
+TEST_CASE("RefConcatEndToEndDim1Test")
 {
     ConcatDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim1Uint8Test)
+TEST_CASE("RefConcatEndToEndDim1Uint8Test")
 {
     ConcatDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Test)
+TEST_CASE("RefConcatEndToEndDim2Test")
 {
     ConcatDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim2Uint8Test)
+TEST_CASE("RefConcatEndToEndDim2Uint8Test")
 {
     ConcatDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Test)
+TEST_CASE("RefConcatEndToEndDim3Test")
 {
     ConcatDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefConcatEndToEndDim3Uint8Test)
+TEST_CASE("RefConcatEndToEndDim3Uint8Test")
 {
     ConcatDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestFloat32)
+TEST_CASE("RefEluEndToEndTestFloat32")
 {
     EluEndToEndTest<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestFloat16)
+TEST_CASE("RefEluEndToEndTestFloat16")
 {
     EluEndToEndTest<armnn::DataType::Float16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestBFloat16)
+TEST_CASE("RefEluEndToEndTestBFloat16")
 {
     EluEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQAsymmS8)
+TEST_CASE("RefEluEndToEndTestQAsymmS8")
 {
     EluEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQAsymmU8)
+TEST_CASE("RefEluEndToEndTestQAsymmU8")
 {
     EluEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefEluEndToEndTestQSymmS16)
+TEST_CASE("RefEluEndToEndTestQSymmS16")
 {
     EluEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTest)
+TEST_CASE("RefFillEndToEndTest")
 {
     FillEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTestFloat16)
+TEST_CASE("RefFillEndToEndTestFloat16")
 {
     FillEndToEnd<armnn::DataType::Float16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFillEndToEndTestInt32)
+TEST_CASE("RefFillEndToEndTestInt32")
 {
     FillEndToEnd<armnn::DataType::Signed32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefFullyConnectedEndToEndTestInt32)
+TEST_CASE("RefFullyConnectedEndToEndTestInt32")
 {
     FullyConnectedWithDynamicWeightsEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherFloatTest)
+TEST_CASE("RefGatherFloatTest")
 {
     GatherEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherUint8Test)
+TEST_CASE("RefGatherUint8Test")
 {
     GatherEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherInt16Test)
+TEST_CASE("RefGatherInt16Test")
 {
     GatherEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherMultiDimFloatTest)
+TEST_CASE("RefGatherMultiDimFloatTest")
 {
     GatherMultiDimEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherMultiDimUint8Test)
+TEST_CASE("RefGatherMultiDimUint8Test")
 {
     GatherMultiDimEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefGatherMultiDimInt16Test)
+TEST_CASE("RefGatherMultiDimInt16Test")
 {
     GatherMultiDimEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 // DepthToSpace
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat32)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat32")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwFloat16)
+TEST_CASE("DephtToSpaceEndToEndNchwFloat16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwUint8)
+TEST_CASE("DephtToSpaceEndToEndNchwUint8")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNchwInt16)
+TEST_CASE("DephtToSpaceEndToEndNchwInt16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat32)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat32")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcFloat16)
+TEST_CASE("DephtToSpaceEndToEndNhwcFloat16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::Float16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcUint8)
+TEST_CASE("DephtToSpaceEndToEndNhwcUint8")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(DephtToSpaceEndToEndNhwcInt16)
+TEST_CASE("DephtToSpaceEndToEndNhwcInt16")
 {
     DepthToSpaceEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Dequantize
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleTest)
+TEST_CASE("DequantizeEndToEndSimpleTest")
 {
     DequantizeEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetTest)
+TEST_CASE("DequantizeEndToEndOffsetTest")
 {
     DequantizeEndToEndOffset<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndSimpleInt16Test)
+TEST_CASE("DequantizeEndToEndSimpleInt16Test")
 {
     DequantizeEndToEndSimple<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(DequantizeEndToEndOffsetInt16Test)
+TEST_CASE("DequantizeEndToEndOffsetInt16Test")
 {
     DequantizeEndToEndOffset<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsTest)
+TEST_CASE("RefDetectionPostProcessRegularNmsTest")
 {
     std::vector<float> boxEncodings({
         0.0f, 0.0f, 0.0f, 0.0f,
@@ -734,7 +734,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(RefDetectionPostProcessRegularNmsUint8Test)
+TEST_CASE("RefDetectionPostProcessRegularNmsUint8Test")
 {
     armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
     armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
@@ -783,7 +783,7 @@
                                                                              1.0f, 1, 0.01f, 0, 0.5f, 0);
 }
 
-BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsTest)
+TEST_CASE("RefDetectionPostProcessFastNmsTest")
 {
     std::vector<float> boxEncodings({
         0.0f, 0.0f, 0.0f, 0.0f,
@@ -812,7 +812,7 @@
     DetectionPostProcessFastNmsEndToEnd<armnn::DataType::Float32>(defaultBackends, boxEncodings, scores, anchors);
 }
 
-BOOST_AUTO_TEST_CASE(RefDetectionPostProcessFastNmsUint8Test)
+TEST_CASE("RefDetectionPostProcessFastNmsUint8Test")
 {
     armnn::TensorInfo boxEncodingsInfo({ 1, 6, 4 }, armnn::DataType::Float32);
     armnn::TensorInfo scoresInfo({ 1, 6, 3 }, armnn::DataType::Float32);
@@ -862,494 +862,493 @@
 }
 
 // HardSwish
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestFloat32)
+TEST_CASE("RefHardSwishEndToEndTestFloat32")
 {
     HardSwishEndToEndTest<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestFloat16)
+TEST_CASE("RefHardSwishEndToEndTestFloat16")
 {
     HardSwishEndToEndTest<armnn::DataType::Float16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestBFloat16)
+TEST_CASE("RefHardSwishEndToEndTestBFloat16")
 {
-HardSwishEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
+    HardSwishEndToEndTest<armnn::DataType::BFloat16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestQAsymmS8)
+TEST_CASE("RefHardSwishEndToEndTestQAsymmS8")
 {
     HardSwishEndToEndTest<armnn::DataType::QAsymmS8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestQAsymmU8)
+TEST_CASE("RefHardSwishEndToEndTestQAsymmU8")
 {
     HardSwishEndToEndTest<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefHardSwishEndToEndTestQSymmS16)
+TEST_CASE("RefHardSwishEndToEndTestQSymmS16")
 {
     HardSwishEndToEndTest<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
 // LogSoftmax
-BOOST_AUTO_TEST_CASE(RefLogSoftmaxEndToEndTest)
+TEST_CASE("RefLogSoftmaxEndToEndTest")
 {
     LogSoftmaxEndToEndTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestFloat32)
+TEST_CASE("RefPreluEndToEndTestFloat32")
 {
     PreluEndToEndNegativeTest<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestUint8)
+TEST_CASE("RefPreluEndToEndTestUint8")
 {
     PreluEndToEndPositiveTest<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefPreluEndToEndTestQSymm16)
+TEST_CASE("RefPreluEndToEndTestQSymm16")
 {
     PreluEndToEndPositiveTest<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSpaceToDepthNhwcEndToEndTest1)
+TEST_CASE("RefSpaceToDepthNhwcEndToEndTest1")
 {
     SpaceToDepthNhwcEndToEndTest1(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSpaceToDepthNchwEndToEndTest1)
+TEST_CASE("RefSpaceToDepthNchwEndToEndTest1")
 {
     SpaceToDepthNchwEndToEndTest1(defaultBackends);
-
 }
 
-BOOST_AUTO_TEST_CASE(RefSpaceToDepthNhwcEndToEndTest2)
+TEST_CASE("RefSpaceToDepthNhwcEndToEndTest2")
 {
     SpaceToDepthNhwcEndToEndTest2(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSpaceToDepthNchwEndToEndTest2)
+TEST_CASE("RefSpaceToDepthNchwEndToEndTest2")
 {
     SpaceToDepthNchwEndToEndTest2(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndTest)
+TEST_CASE("RefSplitter1dEndToEndTest")
 {
     Splitter1dEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter1dEndToEndUint8Test)
+TEST_CASE("RefSplitter1dEndToEndUint8Test")
 {
     Splitter1dEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndTest)
+TEST_CASE("RefSplitter2dDim0EndToEndTest")
 {
     Splitter2dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndTest)
+TEST_CASE("RefSplitter2dDim1EndToEndTest")
 {
     Splitter2dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter2dDim0EndToEndUint8Test)
+TEST_CASE("RefSplitter2dDim0EndToEndUint8Test")
 {
     Splitter2dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter2dDim1EndToEndUint8Test)
+TEST_CASE("RefSplitter2dDim1EndToEndUint8Test")
 {
     Splitter2dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndTest)
+TEST_CASE("RefSplitter3dDim0EndToEndTest")
 {
     Splitter3dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndTest)
+TEST_CASE("RefSplitter3dDim1EndToEndTest")
 {
     Splitter3dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndTest)
+TEST_CASE("RefSplitter3dDim2EndToEndTest")
 {
     Splitter3dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim0EndToEndUint8Test)
+TEST_CASE("RefSplitter3dDim0EndToEndUint8Test")
 {
     Splitter3dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim1EndToEndUint8Test)
+TEST_CASE("RefSplitter3dDim1EndToEndUint8Test")
 {
     Splitter3dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter3dDim2EndToEndUint8Test)
+TEST_CASE("RefSplitter3dDim2EndToEndUint8Test")
 {
     Splitter3dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndTest)
+TEST_CASE("RefSplitter4dDim0EndToEndTest")
 {
     Splitter4dDim0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndTest)
+TEST_CASE("RefSplitter4dDim1EndToEndTest")
 {
     Splitter4dDim1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndTest)
+TEST_CASE("RefSplitter4dDim2EndToEndTest")
 {
     Splitter4dDim2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndTest)
+TEST_CASE("RefSplitter4dDim3EndToEndTest")
 {
     Splitter4dDim3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim0EndToEndUint8Test)
+TEST_CASE("RefSplitter4dDim0EndToEndUint8Test")
 {
     Splitter4dDim0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim1EndToEndUint8Test)
+TEST_CASE("RefSplitter4dDim1EndToEndUint8Test")
 {
     Splitter4dDim1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim2EndToEndUint8Test)
+TEST_CASE("RefSplitter4dDim2EndToEndUint8Test")
 {
     Splitter4dDim2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefSplitter4dDim3EndToEndUint8Test)
+TEST_CASE("RefSplitter4dDim3EndToEndUint8Test")
 {
     Splitter4dDim3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
 // TransposeConvolution2d
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNchwTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndFloatNchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NchwTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndUint8NchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NchwTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndInt16NchwTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndFloatNhwcTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndFloatNhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::Float32, armnn::DataType::Float32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndUint8NhwcTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndUint8NhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QAsymmU8, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefTransposeConvolution2dEndToEndInt16NhwcTest)
+TEST_CASE("RefTransposeConvolution2dEndToEndInt16NhwcTest")
 {
     TransposeConvolution2dEndToEnd<armnn::DataType::QSymmS16, armnn::DataType::Signed32>(
         defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Resize Bilinear
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNchwTest)
+TEST_CASE("RefResizeBilinearEndToEndFloatNchwTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NchwTest)
+TEST_CASE("RefResizeBilinearEndToEndUint8NchwTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NchwTest)
+TEST_CASE("RefResizeBilinearEndToEndInt16NchwTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndFloatNhwcTest)
+TEST_CASE("RefResizeBilinearEndToEndFloatNhwcTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndUint8NhwcTest)
+TEST_CASE("RefResizeBilinearEndToEndUint8NhwcTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeBilinearEndToEndInt16NhwcTest)
+TEST_CASE("RefResizeBilinearEndToEndInt16NhwcTest")
 {
     ResizeBilinearEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // Resize NearestNeighbor
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNchwTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndFloatNchwTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NchwTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndUint8NchwTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NchwTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndInt16NchwTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NCHW);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndFloatNhwcTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndFloatNhwcTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::Float32>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndUint8NhwcTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndUint8NhwcTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::QAsymmU8>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
-BOOST_AUTO_TEST_CASE(RefResizeNearestNeighborEndToEndInt16NhwcTest)
+TEST_CASE("RefResizeNearestNeighborEndToEndInt16NhwcTest")
 {
     ResizeNearestNeighborEndToEnd<armnn::DataType::QSymmS16>(defaultBackends, armnn::DataLayout::NHWC);
 }
 
 // InstanceNormalization
-BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNhwcEndToEndTest1)
+TEST_CASE("RefInstanceNormalizationNhwcEndToEndTest1")
 {
     InstanceNormalizationNhwcEndToEndTest1(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest1)
+TEST_CASE("RefInstanceNormalizationNchwEndToEndTest1")
 {
     InstanceNormalizationNchwEndToEndTest1(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNhwcEndToEndTest2)
+TEST_CASE("RefInstanceNormalizationNhwcEndToEndTest2")
 {
     InstanceNormalizationNhwcEndToEndTest2(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefInstanceNormalizationNchwEndToEndTest2)
+TEST_CASE("RefInstanceNormalizationNchwEndToEndTest2")
 {
     InstanceNormalizationNchwEndToEndTest2(defaultBackends);
 }
 
 // ArgMinMax
-BOOST_AUTO_TEST_CASE(RefArgMaxSimpleTest)
+TEST_CASE("RefArgMaxSimpleTest")
 {
     ArgMaxEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxSimpleUint8Test)
+TEST_CASE("RefArgMaxSimpleUint8Test")
 {
     ArgMaxEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinSimpleTest)
+TEST_CASE("RefArgMinSimpleTest")
 {
     ArgMinEndToEndSimple<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinSimpleUint8Test)
+TEST_CASE("RefArgMinSimpleUint8Test")
 {
     ArgMinEndToEndSimple<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Test)
+TEST_CASE("RefArgMaxAxis0Test")
 {
     ArgMaxAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis0Uint8Test)
+TEST_CASE("RefArgMaxAxis0Uint8Test")
 {
     ArgMaxAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis0Test)
+TEST_CASE("RefArgMinAxis0Test")
 {
     ArgMinAxis0EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis0Uint8Test)
+TEST_CASE("RefArgMinAxis0Uint8Test")
 {
 
     ArgMinAxis0EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Test)
+TEST_CASE("RefArgMaxAxis1Test")
 {
     ArgMaxAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis1Uint8Test)
+TEST_CASE("RefArgMaxAxis1Uint8Test")
 {
     ArgMaxAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis1Test)
+TEST_CASE("RefArgMinAxis1Test")
 {
     ArgMinAxis1EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis1Uint8Test)
+TEST_CASE("RefArgMinAxis1Uint8Test")
 {
 
     ArgMinAxis1EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Test)
+TEST_CASE("RefArgMaxAxis2Test")
 {
     ArgMaxAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis2Uint8Test)
+TEST_CASE("RefArgMaxAxis2Uint8Test")
 {
     ArgMaxAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis2Test)
+TEST_CASE("RefArgMinAxis2Test")
 {
     ArgMinAxis2EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis2Uint8Test)
+TEST_CASE("RefArgMinAxis2Uint8Test")
 {
 
     ArgMinAxis2EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Test)
+TEST_CASE("RefArgMaxAxis3Test")
 {
     ArgMaxAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMaxAxis3Uint8Test)
+TEST_CASE("RefArgMaxAxis3Uint8Test")
 {
     ArgMaxAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis3Test)
+TEST_CASE("RefArgMinAxis3Test")
 {
     ArgMinAxis3EndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefArgMinAxis3Uint8Test)
+TEST_CASE("RefArgMinAxis3Uint8Test")
 {
 
     ArgMinAxis3EndToEnd<armnn::DataType::QAsymmU8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefQLstmEndToEndTest)
+TEST_CASE("RefQLstmEndToEndTest")
 {
     QLstmEndToEnd(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTest)
+TEST_CASE("RefRankEndToEndTest")
 {
     RankEndToEnd<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestFloat16)
+TEST_CASE("RefRankEndToEndTestFloat16")
 {
     RankEndToEnd<armnn::DataType::Float16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestInt32)
+TEST_CASE("RefRankEndToEndTestInt32")
 {
     RankEndToEnd<armnn::DataType::Signed32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestQAsymmS8)
+TEST_CASE("RefRankEndToEndTestQAsymmS8")
 {
     RankEndToEnd<armnn::DataType::QAsymmS8>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestQSymmS16)
+TEST_CASE("RefRankEndToEndTestQSymmS16")
 {
     RankEndToEnd<armnn::DataType::QSymmS16>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefRankEndToEndTestQSymmS8)
+TEST_CASE("RefRankEndToEndTestQSymmS8")
 {
     RankEndToEnd<armnn::DataType::QSymmS8>(defaultBackends);
 }
 
 #if !defined(__ANDROID__)
 // Only run these tests on non Android platforms
-BOOST_AUTO_TEST_CASE(RefImportNonAlignedPointerTest)
+TEST_CASE("RefImportNonAlignedPointerTest")
 {
     ImportNonAlignedInputPointerTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefExportNonAlignedPointerTest)
+TEST_CASE("RefExportNonAlignedPointerTest")
 {
     ExportNonAlignedOutputPointerTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefImportAlignedPointerTest)
+TEST_CASE("RefImportAlignedPointerTest")
 {
     ImportAlignedPointerTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefImportOnlyWorkload)
+TEST_CASE("RefImportOnlyWorkload")
 {
     ImportOnlyWorkload(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefExportOnlyWorkload)
+TEST_CASE("RefExportOnlyWorkload")
 {
     ExportOnlyWorkload(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefImportAndExportWorkload)
+TEST_CASE("RefImportAndExportWorkload")
 {
     ImportAndExportWorkload(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefExportOutputWithSeveralOutputSlotConnectionsTest)
+TEST_CASE("RefExportOutputWithSeveralOutputSlotConnectionsTest")
 {
     ExportOutputWithSeveralOutputSlotConnectionsTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefStridedSliceInvalidSliceEndToEndTest)
+TEST_CASE("RefStridedSliceInvalidSliceEndToEndTest")
 {
     StridedSliceInvalidSliceEndToEndTest(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefThreadSafeFP32StridedSlicedEndToEndTest)
+TEST_CASE("RefThreadSafeFP32StridedSlicedEndToEndTest")
 {
     armnn::experimental::StridedSlicedEndToEndTest<armnn::DataType::Float32>(defaultBackends, 1);
 }
 
-BOOST_AUTO_TEST_CASE(RefAsyncFP32StridedSlicedMultiThreadedEndToEndTest)
+TEST_CASE("RefAsyncFP32StridedSlicedMultiThreadedEndToEndTest")
 {
     armnn::experimental::StridedSlicedMultiThreadedEndToEndTest<armnn::DataType::Float32>(defaultBackends);
 }
 
-BOOST_AUTO_TEST_CASE(RefAsyncFP32StridedSlicedScheduledMultiThreadedEndToEndTest)
+TEST_CASE("RefAsyncFP32StridedSlicedScheduledMultiThreadedEndToEndTest")
 {
     armnn::experimental::StridedSlicedEndToEndTest<armnn::DataType::Float32>(defaultBackends, 3);
 }
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefJsonPrinterTests.cpp b/src/backends/reference/test/RefJsonPrinterTests.cpp
index ff604a7..15b591a 100644
--- a/src/backends/reference/test/RefJsonPrinterTests.cpp
+++ b/src/backends/reference/test/RefJsonPrinterTests.cpp
@@ -7,16 +7,16 @@
 
 #include <backendsCommon/test/JsonPrinterTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <vector>
 
-BOOST_AUTO_TEST_SUITE(RefJsonPrinter)
-
-BOOST_AUTO_TEST_CASE(SoftmaxProfilerJsonPrinterCpuRefTest)
+TEST_SUITE("RefJsonPrinter")
+{
+TEST_CASE("SoftmaxProfilerJsonPrinterCpuRefTest")
 {
     std::vector<armnn::BackendId> backends = {armnn::Compute::CpuRef};
     RunSoftmaxProfilerJsonPrinterTest(backends);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/src/backends/reference/test/RefLayerSupportTests.cpp b/src/backends/reference/test/RefLayerSupportTests.cpp
index a148706..1adc54e 100644
--- a/src/backends/reference/test/RefLayerSupportTests.cpp
+++ b/src/backends/reference/test/RefLayerSupportTests.cpp
@@ -13,7 +13,7 @@
 #include <backendsCommon/test/LayerTests.hpp>
 #include <backendsCommon/test/IsLayerSupportedTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <string>
 
@@ -27,13 +27,14 @@
 
 } // anonymous namespace
 
-BOOST_AUTO_TEST_SUITE(RefLayerSupported)
-
-BOOST_AUTO_TEST_CASE(IsLayerSupportedLayerTypeMatches)
+TEST_SUITE("RefLayerSupported")
+{
+TEST_CASE("IsLayerSupportedLayerTypeMatches")
 {
     LayerTypeMatchesTest();
 }
-BOOST_AUTO_TEST_CASE(IsLayerSupportedReferenceAddition)
+
+TEST_CASE("IsLayerSupportedReferenceAddition")
 {
     armnn::TensorShape shape0 = {1,1,3,4};
     armnn::TensorShape shape1 = {4};
@@ -44,232 +45,232 @@
 
     armnn::RefLayerSupport supportChecker;
     std::string reasonNotSupported;
-    BOOST_CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported));
+    CHECK(supportChecker.IsAdditionSupported(in0, in1, out, reasonNotSupported));
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedBFloat16Reference)
+TEST_CASE("IsLayerSupportedBFloat16Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::BFloat16>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat16Reference)
+TEST_CASE("IsLayerSupportedFloat16Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float16>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedFloat32Reference)
+TEST_CASE("IsLayerSupportedFloat32Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::Float32>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedUint8Reference)
+TEST_CASE("IsLayerSupportedUint8Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QAsymmU8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedInt8Reference)
+TEST_CASE("IsLayerSupportedInt8Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS8>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedInt16Reference)
+TEST_CASE("IsLayerSupportedInt16Reference")
 {
     armnn::RefWorkloadFactory factory;
     IsLayerSupportedTests<armnn::RefWorkloadFactory, armnn::DataType::QSymmS16>(&factory);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedReference)
+TEST_CASE("IsConvertFp16ToFp32SupportedReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp32InputReference)
+TEST_CASE("IsConvertFp16ToFp32SupportedFp32InputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type input");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float32 data type input");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp16ToFp32SupportedFp16OutputReference)
+TEST_CASE("IsConvertFp16ToFp32SupportedFp16OutputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp16ToFp32Layer,
       armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type output");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float16 data type output");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedReference)
+TEST_CASE("IsConvertBf16ToFp32SupportedReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
       armnn::DataType::BFloat16, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedFp32InputReference)
+TEST_CASE("IsConvertBf16ToFp32SupportedFp32InputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: input type not supported\n");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertBf16ToFp32SupportedBf16OutputReference)
+TEST_CASE("IsConvertBf16ToFp32SupportedBf16OutputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertBf16ToFp32Layer,
       armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertBf16ToFp32 layer: output type not supported\n");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedReference)
+TEST_CASE("IsConvertFp32ToBf16SupportedReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
       armnn::DataType::Float32, armnn::DataType::BFloat16>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedBf16InputReference)
+TEST_CASE("IsConvertFp32ToBf16SupportedBf16InputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
       armnn::DataType::BFloat16, armnn::DataType::BFloat16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: input type not supported\n");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToBf16SupportedFp32OutputReference)
+TEST_CASE("IsConvertFp32ToBf16SupportedFp32OutputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToBf16Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Reference for ConvertFp32ToBf16 layer: output type not supported\n");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedReference)
+TEST_CASE("IsConvertFp32ToFp16SupportedReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp16InputReference)
+TEST_CASE("IsConvertFp32ToFp16SupportedFp16InputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float16, armnn::DataType::Float16>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float16 data type input");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float16 data type input");
 }
 
-BOOST_AUTO_TEST_CASE(IsConvertFp32ToFp16SupportedFp32OutputReference)
+TEST_CASE("IsConvertFp32ToFp16SupportedFp32OutputReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConvertLayerSupportedTests<armnn::RefWorkloadFactory, armnn::ConvertFp32ToFp16Layer,
       armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK_EQUAL(reasonIfUnsupported, "Layer is not supported with float32 data type output");
+    CHECK(!result);
+    CHECK_EQ(reasonIfUnsupported, "Layer is not supported with float32 data type output");
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerSupportedMeanDimensionsReference)
+TEST_CASE("IsLayerSupportedMeanDimensionsReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsMeanLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_CASE(IsLayerNotSupportedMeanDimensionsReference)
+TEST_CASE("IsLayerNotSupportedMeanDimensionsReference")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsMeanLayerNotSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Float32, armnn::DataType::Float32>(reasonIfUnsupported);
 
-    BOOST_CHECK(!result);
+    CHECK(!result);
 
-    BOOST_CHECK(reasonIfUnsupported.find(
+    CHECK(reasonIfUnsupported.find(
         "Reference Mean: Expected 4 dimensions but got 2 dimensions instead, for the 'output' tensor.")
         != std::string::npos);
 }
 
-BOOST_AUTO_TEST_CASE(IsConstantSupportedRef)
+TEST_CASE("IsConstantSupportedRef")
 {
     std::string reasonIfUnsupported;
 
     bool result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Float16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Float32>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::QAsymmU8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::Boolean>(reasonIfUnsupported);
-    BOOST_CHECK(!result);
+    CHECK(!result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::QSymmS16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::QSymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::QAsymmS8>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 
     result = IsConstantLayerSupportedTests<armnn::RefWorkloadFactory,
             armnn::DataType::BFloat16>(reasonIfUnsupported);
-    BOOST_CHECK(result);
+    CHECK(result);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefLayerTests.cpp b/src/backends/reference/test/RefLayerTests.cpp
index df48877..6bc6f8a 100644
--- a/src/backends/reference/test/RefLayerTests.cpp
+++ b/src/backends/reference/test/RefLayerTests.cpp
@@ -11,9 +11,8 @@
 
 #include <test/UnitTests.hpp>
 
-#include <boost/test/unit_test.hpp>
-
-BOOST_AUTO_TEST_SUITE(Compute_Reference)
+TEST_SUITE("Compute_Reference")
+{
 
 using namespace armnn;
 
@@ -1437,15 +1436,15 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(PermuteQSymm16ValueSet3Test, PermuteValueSet3Test<DataType::QSymmS16>)
 
 // Lstm
-BOOST_AUTO_TEST_CASE(LstmUtilsZeroVector) {
+TEST_CASE("LstmUtilsZeroVector") {
                               LstmUtilsZeroVectorTest(); }
-BOOST_AUTO_TEST_CASE(LstmUtilsMeanStddevNormalization) {
+TEST_CASE("LstmUtilsMeanStddevNormalization") {
                               LstmUtilsMeanStddevNormalizationNoneZeroInputTest();
                               LstmUtilsMeanStddevNormalizationAllZeroInputTest();
                               LstmUtilsMeanStddevNormalizationMixedZeroInputTest(); }
-BOOST_AUTO_TEST_CASE(LstmUtilsVectorBatchVectorCwiseProduct) {
+TEST_CASE("LstmUtilsVectorBatchVectorCwiseProduct") {
                               LstmUtilsVectorBatchVectorCwiseProductTest(); }
-BOOST_AUTO_TEST_CASE(LstmUtilsVectorBatchVectorAdd) {
+TEST_CASE("LstmUtilsVectorBatchVectorAdd") {
                               LstmUtilsVectorBatchVectorAddTest(); }
 
 ARMNN_AUTO_TEST_CASE_WITH_THF(LstmLayerFloat32WithCifgWithPeepholeNoProjection,
@@ -1873,43 +1872,37 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(Abs3dQuantisedSymm16, Abs3dTest<DataType::QSymmS16>)
 
 // Detection PostProcess
-BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsFloat)
+TEST_CASE("DetectionPostProcessRegularNmsFloat")
 {
     DetectionPostProcessRegularNmsFloatTest<RefWorkloadFactory>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsFloat)
+TEST_CASE("DetectionPostProcessFastNmsFloat")
 {
     DetectionPostProcessFastNmsFloatTest<RefWorkloadFactory>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt8)
+TEST_CASE("DetectionPostProcessRegularNmsInt8")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QAsymmS8>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QAsymmS8>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt8)
+TEST_CASE("DetectionPostProcessFastNmsInt8")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QAsymmS8>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QAsymmS8>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsUint8)
+TEST_CASE("DetectionPostProcessRegularNmsUint8")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QAsymmU8>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QAsymmU8>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsUint8)
+TEST_CASE("DetectionPostProcessFastNmsUint8")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QAsymmU8>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QAsymmU8>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessRegularNmsInt16)
+TEST_CASE("DetectionPostProcessRegularNmsInt16")
 {
-    DetectionPostProcessRegularNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QSymmS16>();
+    DetectionPostProcessRegularNmsQuantizedTest<RefWorkloadFactory, DataType::QSymmS16>();
 }
-BOOST_AUTO_TEST_CASE(DetectionPostProcessFastNmsInt16)
+TEST_CASE("DetectionPostProcessFastNmsInt16")
 {
-    DetectionPostProcessFastNmsQuantizedTest<
-        RefWorkloadFactory, DataType::QSymmS16>();
+    DetectionPostProcessFastNmsQuantizedTest<RefWorkloadFactory, DataType::QSymmS16>();
 }
 
 // Dequantize
@@ -2271,4 +2264,4 @@
 ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinFloat32, ReduceMinSimpleTest<DataType::Float32>)
 ARMNN_AUTO_TEST_CASE_WITH_THF(ReduceMinNegativeAxisFloat32, ReduceMinNegativeAxisTest<DataType::Float32>)
 
-BOOST_AUTO_TEST_SUITE_END()
+}
\ No newline at end of file
diff --git a/src/backends/reference/test/RefMemoryManagerTests.cpp b/src/backends/reference/test/RefMemoryManagerTests.cpp
index 15b7c2a..960e7cc 100644
--- a/src/backends/reference/test/RefMemoryManagerTests.cpp
+++ b/src/backends/reference/test/RefMemoryManagerTests.cpp
@@ -5,47 +5,48 @@
 
 #include <reference/RefMemoryManager.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefMemoryManagerTests)
+TEST_SUITE("RefMemoryManagerTests")
+{
 using namespace armnn;
 using Pool = RefMemoryManager::Pool;
 
-BOOST_AUTO_TEST_CASE(ManageOneThing)
+TEST_CASE("ManageOneThing")
 {
     RefMemoryManager memoryManager;
 
     Pool* pool = memoryManager.Manage(10);
 
-    BOOST_CHECK(pool);
+    CHECK(pool);
 
     memoryManager.Acquire();
 
-    BOOST_CHECK(memoryManager.GetPointer(pool) != nullptr); // Yields a valid pointer
+    CHECK(memoryManager.GetPointer(pool) != nullptr); // Yields a valid pointer
 
     memoryManager.Release();
 }
 
-BOOST_AUTO_TEST_CASE(ManageTwoThings)
+TEST_CASE("ManageTwoThings")
 {
     RefMemoryManager memoryManager;
 
     Pool* pool1 = memoryManager.Manage(10);
     Pool* pool2 = memoryManager.Manage(5);
 
-    BOOST_CHECK(pool1);
-    BOOST_CHECK(pool2);
+    CHECK(pool1);
+    CHECK(pool2);
 
     memoryManager.Acquire();
 
     void *p1 = memoryManager.GetPointer(pool1);
     void *p2 = memoryManager.GetPointer(pool2);
 
-    BOOST_CHECK(p1);
-    BOOST_CHECK(p2);
-    BOOST_CHECK(p1 != p2);
+    CHECK(p1);
+    CHECK(p2);
+    CHECK(p1 != p2);
 
     memoryManager.Release();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefOptimizedNetworkTests.cpp b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
index 2f25b6c..578d667 100644
--- a/src/backends/reference/test/RefOptimizedNetworkTests.cpp
+++ b/src/backends/reference/test/RefOptimizedNetworkTests.cpp
@@ -7,13 +7,13 @@
 #include <Network.hpp>
 
 #include <reference/RefWorkloadFactory.hpp>
-
-#include <boost/test/unit_test.hpp>
 #include <test/GraphUtils.hpp>
 
-BOOST_AUTO_TEST_SUITE(RefOptimizedNetwork)
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateCpuRefWorkloads)
+TEST_SUITE("RefOptimizedNetwork")
+{
+TEST_CASE("OptimizeValidateCpuRefWorkloads")
 {
     const armnn::TensorInfo desc({3, 5}, armnn::DataType::Float32);
 
@@ -73,17 +73,17 @@
     armnn::IOptimizedNetworkPtr optNet = armnn::Optimize(*net, backends, runtime->GetDeviceSpec());
     armnn::Graph& graph = GetGraphForTesting(optNet.get());
     graph.AllocateDynamicBuffers();
-    BOOST_CHECK(optNet);
+    CHECK(optNet);
 
     // Validates workloads.
     armnn::RefWorkloadFactory fact;
     for (auto&& layer : graph)
     {
-        BOOST_CHECK_NO_THROW(layer->CreateWorkload(fact));
+        CHECK_NOTHROW(layer->CreateWorkload(fact));
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefPermuteLayer)
+TEST_CASE("OptimizeValidateWorkloadsCpuRefPermuteLayer")
 {
     // Create runtime in which test will run
     armnn::IRuntime::CreationOptions options;
@@ -115,11 +115,11 @@
 
     for (auto&& layer : graph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
     }
 }
 
-BOOST_AUTO_TEST_CASE(OptimizeValidateWorkloadsCpuRefMeanLayer)
+TEST_CASE("OptimizeValidateWorkloadsCpuRefMeanLayer")
 {
     // Create runtime in which test will run
     armnn::IRuntime::CreationOptions options;
@@ -149,11 +149,11 @@
     graph.AllocateDynamicBuffers();
     for (auto&& layer : graph)
     {
-        BOOST_CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
+        CHECK(layer->GetBackendId() == armnn::Compute::CpuRef);
     }
 }
 
-BOOST_AUTO_TEST_CASE(DebugTestOnCpuRef)
+TEST_CASE("DebugTestOnCpuRef")
 {
     // build up the structure of the network
     armnn::INetworkPtr net(armnn::INetwork::Create());
@@ -192,14 +192,14 @@
     graph.AllocateDynamicBuffers();
 
     // Tests that all layers are present in the graph.
-    BOOST_TEST(graph.GetNumLayers() == 5);
+    CHECK(graph.GetNumLayers() == 5);
 
     // Tests that the vertices exist and have correct names.
-    BOOST_TEST(GraphHasNamedLayer(graph, "InputLayer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterInputLayer_0"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "ActivationLayer"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "DebugLayerAfterActivationLayer_0"));
-    BOOST_TEST(GraphHasNamedLayer(graph, "OutputLayer"));
+    CHECK(GraphHasNamedLayer(graph, "InputLayer"));
+    CHECK(GraphHasNamedLayer(graph, "DebugLayerAfterInputLayer_0"));
+    CHECK(GraphHasNamedLayer(graph, "ActivationLayer"));
+    CHECK(GraphHasNamedLayer(graph, "DebugLayerAfterActivationLayer_0"));
+    CHECK(GraphHasNamedLayer(graph, "OutputLayer"));
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/backends/reference/test/RefRuntimeTests.cpp b/src/backends/reference/test/RefRuntimeTests.cpp
index 17d5816..6fd4910 100644
--- a/src/backends/reference/test/RefRuntimeTests.cpp
+++ b/src/backends/reference/test/RefRuntimeTests.cpp
@@ -9,14 +9,15 @@
 
 #include <backendsCommon/test/RuntimeTestImpl.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefRuntime)
 
 #ifdef ARMNN_LEAK_CHECKING_ENABLED
-BOOST_AUTO_TEST_CASE(RuntimeMemoryLeaksCpuRef)
+TEST_SUITE("RefRuntime")
 {
-    BOOST_TEST(ARMNN_LEAK_CHECKER_IS_ACTIVE());
+TEST_CASE("RuntimeMemoryLeaksCpuRef")
+{
+    CHECK(ARMNN_LEAK_CHECKER_IS_ACTIVE());
 
     armnn::IRuntime::CreationOptions options;
     armnn::RuntimeImpl runtime(options);
@@ -31,16 +32,16 @@
 
     {
         ARMNN_SCOPED_LEAK_CHECKER("LoadAndUnloadNetworkCpuRef");
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
         // In the second run we check for all remaining memory
         // in use after the network was unloaded. If there is any
         // then it will be treated as a memory leak.
         CreateAndDropDummyNetwork(backends, runtime);
-        BOOST_TEST(ARMNN_NO_LEAKS_IN_SCOPE());
-        BOOST_TEST(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
-        BOOST_TEST(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_NO_LEAKS_IN_SCOPE());
+        CHECK(ARMNN_BYTES_LEAKED_IN_SCOPE() == 0);
+        CHECK(ARMNN_OBJECTS_LEAKED_IN_SCOPE() == 0);
     }
 }
+}
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp
index dadd1de..39f5a2a 100644
--- a/src/backends/reference/test/RefTensorHandleTests.cpp
+++ b/src/backends/reference/test/RefTensorHandleTests.cpp
@@ -5,12 +5,13 @@
 #include <reference/RefTensorHandle.hpp>
 #include <reference/RefTensorHandleFactory.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(RefTensorHandleTests)
+TEST_SUITE("RefTensorHandleTests")
+{
 using namespace armnn;
 
-BOOST_AUTO_TEST_CASE(AcquireAndRelease)
+TEST_CASE("AcquireAndRelease")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
 
@@ -24,11 +25,11 @@
     {
         float* buffer = reinterpret_cast<float*>(handle.Map());
 
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
 
         buffer[0] = 2.5f;
 
-        BOOST_CHECK(buffer[0] == 2.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 2.5f); // Memory is writable and readable
 
     }
     memoryManager->Release();
@@ -37,16 +38,16 @@
     {
         float* buffer = reinterpret_cast<float*>(handle.Map());
 
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
 
         buffer[0] = 3.5f;
 
-        BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 3.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleFactoryMemoryManaged)
+TEST_CASE("RefTensorHandleFactoryMemoryManaged")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
     RefTensorHandleFactory handleFactory(memoryManager);
@@ -60,31 +61,31 @@
     memoryManager->Acquire();
     {
         float* buffer = reinterpret_cast<float*>(handle->Map());
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
         buffer[0] = 1.5f;
         buffer[1] = 2.5f;
-        BOOST_CHECK(buffer[0] == 1.5f); // Memory is writable and readable
-        BOOST_CHECK(buffer[1] == 2.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 1.5f); // Memory is writable and readable
+        CHECK(buffer[1] == 2.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 
     memoryManager->Acquire();
     {
         float* buffer = reinterpret_cast<float*>(handle->Map());
-        BOOST_CHECK(buffer != nullptr); // Yields a valid pointer
+        CHECK(buffer != nullptr); // Yields a valid pointer
         buffer[0] = 3.5f;
         buffer[1] = 4.5f;
-        BOOST_CHECK(buffer[0] == 3.5f); // Memory is writable and readable
-        BOOST_CHECK(buffer[1] == 4.5f); // Memory is writable and readable
+        CHECK(buffer[0] == 3.5f); // Memory is writable and readable
+        CHECK(buffer[1] == 4.5f); // Memory is writable and readable
     }
     memoryManager->Release();
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Cannot import as import is disabled
-    BOOST_CHECK(!handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    CHECK(!handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleFactoryImport)
+TEST_CASE("RefTensorHandleFactoryImport")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
     RefTensorHandleFactory handleFactory(memoryManager);
@@ -97,25 +98,25 @@
     memoryManager->Acquire();
 
     // No buffer allocated when import is enabled
-    BOOST_CHECK_THROW(handle->Map(), armnn::NullPointerException);
+    CHECK_THROWS_AS(handle->Map(), armnn::NullPointerException);
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Correctly import
-    BOOST_CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    CHECK(handle->Import(static_cast<void*>(testPtr), MemorySource::Malloc));
     float* buffer = reinterpret_cast<float*>(handle->Map());
-    BOOST_CHECK(buffer != nullptr); // Yields a valid pointer after import
-    BOOST_CHECK(buffer == testPtr); // buffer is pointing to testPtr
+    CHECK(buffer != nullptr); // Yields a valid pointer after import
+    CHECK(buffer == testPtr); // buffer is pointing to testPtr
     // Memory is writable and readable with correct value
-    BOOST_CHECK(buffer[0] == 2.5f);
-    BOOST_CHECK(buffer[1] == 5.5f);
+    CHECK(buffer[0] == 2.5f);
+    CHECK(buffer[1] == 5.5f);
     buffer[0] = 3.5f;
     buffer[1] = 10.0f;
-    BOOST_CHECK(buffer[0] == 3.5f);
-    BOOST_CHECK(buffer[1] == 10.0f);
+    CHECK(buffer[0] == 3.5f);
+    CHECK(buffer[1] == 10.0f);
     memoryManager->Release();
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleImport)
+TEST_CASE("RefTensorHandleImport")
 {
     TensorInfo info({ 1, 1, 2, 1 }, DataType::Float32);
     RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
@@ -124,24 +125,24 @@
     handle.Allocate();
 
     // No buffer allocated when import is enabled
-    BOOST_CHECK_THROW(handle.Map(), armnn::NullPointerException);
+    CHECK_THROWS_AS(handle.Map(), armnn::NullPointerException);
 
     float testPtr[2] = { 2.5f, 5.5f };
     // Correctly import
-    BOOST_CHECK(handle.Import(static_cast<void*>(testPtr), MemorySource::Malloc));
+    CHECK(handle.Import(static_cast<void*>(testPtr), MemorySource::Malloc));
     float* buffer = reinterpret_cast<float*>(handle.Map());
-    BOOST_CHECK(buffer != nullptr); // Yields a valid pointer after import
-    BOOST_CHECK(buffer == testPtr); // buffer is pointing to testPtr
+    CHECK(buffer != nullptr); // Yields a valid pointer after import
+    CHECK(buffer == testPtr); // buffer is pointing to testPtr
     // Memory is writable and readable with correct value
-    BOOST_CHECK(buffer[0] == 2.5f);
-    BOOST_CHECK(buffer[1] == 5.5f);
+    CHECK(buffer[0] == 2.5f);
+    CHECK(buffer[1] == 5.5f);
     buffer[0] = 3.5f;
     buffer[1] = 10.0f;
-    BOOST_CHECK(buffer[0] == 3.5f);
-    BOOST_CHECK(buffer[1] == 10.0f);
+    CHECK(buffer[0] == 3.5f);
+    CHECK(buffer[1] == 10.0f);
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleGetCapabilities)
+TEST_CASE("RefTensorHandleGetCapabilities")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
     RefTensorHandleFactory handleFactory(memoryManager);
@@ -155,10 +156,10 @@
     std::vector<Capability> capabilities = handleFactory.GetCapabilities(input,
                                                                          output,
                                                                          CapabilityClass::PaddingRequired);
-    BOOST_CHECK(capabilities.empty());
+    CHECK(capabilities.empty());
 }
 
-BOOST_AUTO_TEST_CASE(RefTensorHandleSupportsInPlaceComputation)
+TEST_CASE("RefTensorHandleSupportsInPlaceComputation")
 {
     std::shared_ptr<RefMemoryManager> memoryManager = std::make_shared<RefMemoryManager>();
     RefTensorHandleFactory handleFactory(memoryManager);
@@ -167,7 +168,7 @@
     ARMNN_ASSERT(!(handleFactory.SupportsInPlaceComputation()));
 }
 
-BOOST_AUTO_TEST_CASE(TestManagedConstTensorHandle)
+TEST_CASE("TestManagedConstTensorHandle")
 {
     // Initialize arguments
     void* mem = nullptr;
@@ -178,31 +179,31 @@
 
     // Test managed handle is initialized with m_Mapped unset and once Map() called its set
     ManagedConstTensorHandle managedHandle(passThroughHandle);
-    BOOST_CHECK(!managedHandle.IsMapped());
+    CHECK(!managedHandle.IsMapped());
     managedHandle.Map();
-    BOOST_CHECK(managedHandle.IsMapped());
+    CHECK(managedHandle.IsMapped());
 
     // Test it can then be unmapped
     managedHandle.Unmap();
-    BOOST_CHECK(!managedHandle.IsMapped());
+    CHECK(!managedHandle.IsMapped());
 
     // Test member function
-    BOOST_CHECK(managedHandle.GetTensorInfo() == info);
+    CHECK(managedHandle.GetTensorInfo() == info);
 
     // Test that nullptr tensor handle doesn't get mapped
     ManagedConstTensorHandle managedHandleNull(nullptr);
-    BOOST_CHECK(!managedHandleNull.IsMapped());
-    BOOST_CHECK_THROW(managedHandleNull.Map(), armnn::Exception);
-    BOOST_CHECK(!managedHandleNull.IsMapped());
+    CHECK(!managedHandleNull.IsMapped());
+    CHECK_THROWS_AS(managedHandleNull.Map(), armnn::Exception);
+    CHECK(!managedHandleNull.IsMapped());
 
     // Check Unmap() when m_Mapped already false
     managedHandleNull.Unmap();
-    BOOST_CHECK(!managedHandleNull.IsMapped());
+    CHECK(!managedHandleNull.IsMapped());
 }
 
 #if !defined(__ANDROID__)
 // Only run these tests on non Android platforms
-BOOST_AUTO_TEST_CASE(CheckSourceType)
+TEST_CASE("CheckSourceType")
 {
     TensorInfo info({1}, DataType::Float32);
     RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
@@ -210,18 +211,18 @@
     int* testPtr = new int(4);
 
     // Not supported
-    BOOST_CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBuf));
+    CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBuf));
 
     // Not supported
-    BOOST_CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBufProtected));
+    CHECK(!handle.Import(static_cast<void *>(testPtr), MemorySource::DmaBufProtected));
 
     // Supported
-    BOOST_CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
+    CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
 
     delete testPtr;
 }
 
-BOOST_AUTO_TEST_CASE(ReusePointer)
+TEST_CASE("ReusePointer")
 {
     TensorInfo info({1}, DataType::Float32);
     RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
@@ -231,12 +232,12 @@
     handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc);
 
     // Reusing previously Imported pointer
-    BOOST_CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
+    CHECK(handle.Import(static_cast<void *>(testPtr), MemorySource::Malloc));
 
     delete testPtr;
 }
 
-BOOST_AUTO_TEST_CASE(MisalignedPointer)
+TEST_CASE("MisalignedPointer")
 {
     TensorInfo info({2}, DataType::Float32);
     RefTensorHandle handle(info, static_cast<unsigned int>(MemorySource::Malloc));
@@ -247,11 +248,11 @@
     // Increment pointer by 1 byte
     void* misalignedPtr = static_cast<void*>(reinterpret_cast<char*>(testPtr) + 1);
 
-    BOOST_CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc));
+    CHECK(!handle.Import(misalignedPtr, MemorySource::Malloc));
 
     delete[] testPtr;
 }
 
 #endif
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/profiling/test/BufferTests.cpp b/src/profiling/test/BufferTests.cpp
index 7a92ee1..e3abe86 100644
--- a/src/profiling/test/BufferTests.cpp
+++ b/src/profiling/test/BufferTests.cpp
@@ -11,17 +11,17 @@
 
 #include <armnn/Exceptions.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn::profiling;
 
-BOOST_AUTO_TEST_SUITE(BufferTests)
-
-BOOST_AUTO_TEST_CASE(PacketBufferTest0)
+TEST_SUITE("BufferTests")
+{
+TEST_CASE("PacketBufferTest0")
 {
     IPacketBufferPtr packetBuffer = std::make_unique<PacketBuffer>(512);
 
-    BOOST_TEST(packetBuffer->GetSize() == 0);
+    CHECK(packetBuffer->GetSize() == 0);
 
     // Write data to the buffer
     WriteUint32(packetBuffer, 0, 10);
@@ -33,7 +33,7 @@
     packetBuffer->Commit(16);
 
     // Size of buffer is equal to committed data
-    BOOST_TEST(packetBuffer->GetSize() == 16);
+    CHECK(packetBuffer->GetSize() == 16);
 
     // Read data from the buffer
     auto readBuffer = packetBuffer->GetReadableData();
@@ -43,23 +43,23 @@
     uint32_t readData3 = ReadUint32(readBuffer, 12);
 
     // Check that data is correct
-    BOOST_TEST(readData0 == 10);
-    BOOST_TEST(readData1 == 20);
-    BOOST_TEST(readData2 == 30);
-    BOOST_TEST(readData3 == 40);
+    CHECK(readData0 == 10);
+    CHECK(readData1 == 20);
+    CHECK(readData2 == 30);
+    CHECK(readData3 == 40);
 
     // Mark read
     packetBuffer->MarkRead();
 
     // Size of buffer become 0 after marked read
-    BOOST_TEST(packetBuffer->GetSize() == 0);
+    CHECK(packetBuffer->GetSize() == 0);
 }
 
-BOOST_AUTO_TEST_CASE(PacketBufferTest1)
+TEST_CASE("PacketBufferTest1")
 {
     IPacketBufferPtr packetBuffer = std::make_unique<PacketBuffer>(512);
 
-    BOOST_TEST(packetBuffer->GetSize() == 0);
+    CHECK(packetBuffer->GetSize() == 0);
 
     // Write data to the buffer using GetWritableData
     auto writeBuffer = packetBuffer->GetWritableData();
@@ -70,7 +70,7 @@
 
     packetBuffer->Commit(16);
 
-    BOOST_TEST(packetBuffer->GetSize() == 16);
+    CHECK(packetBuffer->GetSize() == 16);
 
     // Read data from the buffer
     auto readBuffer = packetBuffer->GetReadableData();
@@ -79,20 +79,21 @@
     uint32_t readData2 = ReadUint32(readBuffer, 8);
     uint32_t readData3 = ReadUint32(readBuffer, 12);
 
-    BOOST_TEST(readData0 == 10);
-    BOOST_TEST(readData1 == 20);
-    BOOST_TEST(readData2 == 30);
-    BOOST_TEST(readData3 == 40);
+    CHECK(readData0 == 10);
+    CHECK(readData1 == 20);
+    CHECK(readData2 == 30);
+    CHECK(readData3 == 40);
 
     packetBuffer->MarkRead();
 
-    BOOST_TEST(packetBuffer->GetSize() == 0);
+    CHECK(packetBuffer->GetSize() == 0);
 }
 
-BOOST_AUTO_TEST_CASE(PacketBufferReleaseTest) {
+TEST_CASE("PacketBufferReleaseTest")
+{
     IPacketBufferPtr packetBuffer = std::make_unique<PacketBuffer>(512);
 
-    BOOST_TEST(packetBuffer->GetSize() == 0);
+    CHECK(packetBuffer->GetSize() == 0);
 
     auto writeBuffer = packetBuffer->GetWritableData();
 
@@ -104,48 +105,48 @@
     packetBuffer->Release();
 
     // Size of buffer become 0 after release
-    BOOST_TEST(packetBuffer->GetSize() == 0);
+    CHECK(packetBuffer->GetSize() == 0);
 }
 
-BOOST_AUTO_TEST_CASE(PacketBufferCommitErrorTest)
+TEST_CASE("PacketBufferCommitErrorTest")
 {
     IPacketBufferPtr packetBuffer = std::make_unique<PacketBuffer>(8);
 
     // Cannot commit data bigger than the max size of the buffer
-    BOOST_CHECK_THROW(packetBuffer->Commit(16);, armnn::RuntimeException);
+    CHECK_THROWS_AS(packetBuffer->Commit(16);, armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(BufferReserveTest)
+TEST_CASE("BufferReserveTest")
 {
     BufferManager bufferManager(1, 512);
     unsigned int reservedSize = 0;
     auto packetBuffer = bufferManager.Reserve(512, reservedSize);
 
     // Successfully reserved the buffer with requested size
-    BOOST_TEST(reservedSize == 512);
-    BOOST_TEST(packetBuffer.get());
+    CHECK(reservedSize == 512);
+    CHECK(packetBuffer.get());
 }
 
-BOOST_AUTO_TEST_CASE(BufferReserveExceedingSpaceTest)
+TEST_CASE("BufferReserveExceedingSpaceTest")
 {
     BufferManager bufferManager(1, 512);
     unsigned int reservedSize = 0;
 
     // Cannot reserve buffer bigger than maximum buffer size
     auto reservedBuffer = bufferManager.Reserve(1024, reservedSize);
-    BOOST_TEST(reservedSize == 0);
-    BOOST_TEST(!reservedBuffer.get());
+    CHECK(reservedSize == 0);
+    CHECK(!reservedBuffer.get());
 }
 
-BOOST_AUTO_TEST_CASE(BufferExhaustionTest)
+TEST_CASE("BufferExhaustionTest")
 {
     BufferManager bufferManager(1, 512);
     unsigned int reservedSize = 0;
     auto packetBuffer = bufferManager.Reserve(512, reservedSize);
 
     // Successfully reserved the buffer with requested size
-    BOOST_TEST(reservedSize == 512);
-    BOOST_TEST(packetBuffer.get());
+    CHECK(reservedSize == 512);
+    CHECK(packetBuffer.get());
 
     // Cannot reserve buffer when buffer is not available
     // NOTE: because the buffer manager now has surge capacity of
@@ -154,43 +155,43 @@
     packetBuffer = bufferManager.Reserve(512, reservedSize);
 
     // Successfully reserved the second buffer with requested size
-    BOOST_TEST(reservedSize == 512);
-    BOOST_TEST(packetBuffer.get());
+    CHECK(reservedSize == 512);
+    CHECK(packetBuffer.get());
 
     packetBuffer = bufferManager.Reserve(512, reservedSize);
 
     // Successfully reserved the third buffer with requested size
-    BOOST_TEST(reservedSize == 512);
-    BOOST_TEST(packetBuffer.get());
+    CHECK(reservedSize == 512);
+    CHECK(packetBuffer.get());
 
     auto reservedBuffer = bufferManager.Reserve(512, reservedSize);
-    BOOST_TEST(reservedSize == 0);
-    BOOST_TEST(!reservedBuffer.get());
+    CHECK(reservedSize == 0);
+    CHECK(!reservedBuffer.get());
 }
 
-BOOST_AUTO_TEST_CASE(BufferReserveMultipleTest)
+TEST_CASE("BufferReserveMultipleTest")
 {
     BufferManager bufferManager(3, 512);
     unsigned int reservedSize0 = 0;
     auto packetBuffer0 = bufferManager.Reserve(512, reservedSize0);
 
     // Successfully reserved the buffer with requested size
-    BOOST_TEST(reservedSize0 == 512);
-    BOOST_TEST(packetBuffer0.get());
+    CHECK(reservedSize0 == 512);
+    CHECK(packetBuffer0.get());
 
     unsigned int reservedSize1 = 0;
     auto packetBuffer1 = bufferManager.Reserve(128, reservedSize1);
 
     // Successfully reserved the buffer with requested size
-    BOOST_TEST(reservedSize1 == 128);
-    BOOST_TEST(packetBuffer1.get());
+    CHECK(reservedSize1 == 128);
+    CHECK(packetBuffer1.get());
 
     unsigned int reservedSize2 = 0;
     auto packetBuffer2 = bufferManager.Reserve(512, reservedSize2);
 
     // Successfully reserved the buffer with requested size
-    BOOST_TEST(reservedSize2 == 512);
-    BOOST_TEST(packetBuffer2.get());
+    CHECK(reservedSize2 == 512);
+    CHECK(packetBuffer2.get());
 
     // NOTE: the buffer now has a surge capacity of initial size * 3
     //       so we can grab 9 of them prior to exhaustion now
@@ -201,33 +202,33 @@
         auto packetBuffer = bufferManager.Reserve(512, reservedSize);
 
         // Successfully reserved the third buffer with requested size
-        BOOST_TEST(reservedSize == 512);
-        BOOST_TEST(packetBuffer.get());
+        CHECK(reservedSize == 512);
+        CHECK(packetBuffer.get());
     }
 
     // Cannot reserve when buffer is not available
     unsigned int reservedSize3 = 0;
     auto reservedBuffer = bufferManager.Reserve(512, reservedSize3);
-    BOOST_TEST(reservedSize3 == 0);
-    BOOST_TEST(!reservedBuffer.get());
+    CHECK(reservedSize3 == 0);
+    CHECK(!reservedBuffer.get());
 }
 
-BOOST_AUTO_TEST_CASE(BufferReleaseTest)
+TEST_CASE("BufferReleaseTest")
 {
     BufferManager bufferManager(2, 512);
     unsigned int reservedSize0 = 0;
     auto packetBuffer0 = bufferManager.Reserve(512, reservedSize0);
 
     // Successfully reserved the buffer with requested size
-    BOOST_TEST(reservedSize0 == 512);
-    BOOST_TEST(packetBuffer0.get());
+    CHECK(reservedSize0 == 512);
+    CHECK(packetBuffer0.get());
 
     unsigned int reservedSize1 = 0;
     auto packetBuffer1 = bufferManager.Reserve(128, reservedSize1);
 
     // Successfully reserved the buffer with requested size
-    BOOST_TEST(reservedSize1 == 128);
-    BOOST_TEST(packetBuffer1.get());
+    CHECK(reservedSize1 == 128);
+    CHECK(packetBuffer1.get());
 
     // NOTE: now that we have a surge capacity of up to
     //       initial size * 3 we need to allocate four more
@@ -239,39 +240,39 @@
         auto packetBuffer = bufferManager.Reserve(512, reservedSize);
 
         // Successfully reserved the third buffer with requested size
-        BOOST_TEST(reservedSize == 512);
-        BOOST_TEST(packetBuffer.get());
+        CHECK(reservedSize == 512);
+        CHECK(packetBuffer.get());
     }
 
     // Cannot reserve when buffer is not available
     unsigned int reservedSize2 = 0;
     auto reservedBuffer = bufferManager.Reserve(512, reservedSize2);
-    BOOST_TEST(reservedSize2 == 0);
-    BOOST_TEST(!reservedBuffer.get());
+    CHECK(reservedSize2 == 0);
+    CHECK(!reservedBuffer.get());
 
     bufferManager.Release(packetBuffer0);
 
     // Buffer should become available after release
     auto packetBuffer2 = bufferManager.Reserve(128, reservedSize2);
 
-    BOOST_TEST(reservedSize2 == 128);
-    BOOST_TEST(packetBuffer2.get());
+    CHECK(reservedSize2 == 128);
+    CHECK(packetBuffer2.get());
 }
 
-BOOST_AUTO_TEST_CASE(BufferCommitTest)
+TEST_CASE("BufferCommitTest")
 {
     BufferManager bufferManager(2, 512);
     unsigned int reservedSize0 = 0;
     auto packetBuffer0 = bufferManager.Reserve(512, reservedSize0);
 
-    BOOST_TEST(reservedSize0 == 512);
-    BOOST_TEST(packetBuffer0.get());
+    CHECK(reservedSize0 == 512);
+    CHECK(packetBuffer0.get());
 
     unsigned int reservedSize1 = 0;
     auto packetBuffer1 = bufferManager.Reserve(128, reservedSize1);
 
-    BOOST_TEST(reservedSize1 == 128);
-    BOOST_TEST(packetBuffer1.get());
+    CHECK(reservedSize1 == 128);
+    CHECK(packetBuffer1.get());
 
     // NOTE: now that we have a surge capacity of up to
     //       initial size * 3 we need to allocate four more
@@ -283,43 +284,43 @@
         auto packetBuffer = bufferManager.Reserve(512, reservedSize);
 
         // Successfully reserved the third buffer with requested size
-        BOOST_TEST(reservedSize == 512);
-        BOOST_TEST(packetBuffer.get());
+        CHECK(reservedSize == 512);
+        CHECK(packetBuffer.get());
     }
 
     unsigned int reservedSize2 = 0;
     auto reservedBuffer = bufferManager.Reserve(512, reservedSize2);
-    BOOST_TEST(reservedSize2 == 0);
-    BOOST_TEST(!reservedBuffer.get());
+    CHECK(reservedSize2 == 0);
+    CHECK(!reservedBuffer.get());
 
     bufferManager.Commit(packetBuffer0, 256);
 
     // Buffer should become readable after commit
     auto packetBuffer2 = bufferManager.GetReadableBuffer();
-    BOOST_TEST(packetBuffer2.get());
-    BOOST_TEST(packetBuffer2->GetSize() == 256);
+    CHECK(packetBuffer2.get());
+    CHECK(packetBuffer2->GetSize() == 256);
 
     // Buffer not set back to available list after commit
     unsigned int reservedSize = 0;
     reservedBuffer = bufferManager.Reserve(512, reservedSize);
-    BOOST_TEST(reservedSize == 0);
-    BOOST_TEST(!reservedBuffer.get());
+    CHECK(reservedSize == 0);
+    CHECK(!reservedBuffer.get());
 }
 
-BOOST_AUTO_TEST_CASE(BufferMarkReadTest)
+TEST_CASE("BufferMarkReadTest")
 {
     BufferManager bufferManager(2, 512);
     unsigned int reservedSize0 = 0;
     auto packetBuffer0 = bufferManager.Reserve(512, reservedSize0);
 
-    BOOST_TEST(reservedSize0 == 512);
-    BOOST_TEST(packetBuffer0.get());
+    CHECK(reservedSize0 == 512);
+    CHECK(packetBuffer0.get());
 
     unsigned int reservedSize1 = 0;
     auto packetBuffer1 = bufferManager.Reserve(128, reservedSize1);
 
-    BOOST_TEST(reservedSize1 == 128);
-    BOOST_TEST(packetBuffer1.get());
+    CHECK(reservedSize1 == 128);
+    CHECK(packetBuffer1.get());
 
     // NOTE: now that we have a surge capacity of up to
     //       initial size * 3 we need to allocate four more
@@ -331,45 +332,45 @@
         auto packetBuffer = bufferManager.Reserve(512, reservedSize);
 
         // Successfully reserved the third buffer with requested size
-        BOOST_TEST(reservedSize == 512);
-        BOOST_TEST(packetBuffer.get());
+        CHECK(reservedSize == 512);
+        CHECK(packetBuffer.get());
     }
 
     // Cannot reserve when buffer is not available
     unsigned int reservedSize2 = 0;
     auto reservedBuffer = bufferManager.Reserve(512, reservedSize2);
-    BOOST_TEST(reservedSize2 == 0);
-    BOOST_TEST(!reservedBuffer.get());
+    CHECK(reservedSize2 == 0);
+    CHECK(!reservedBuffer.get());
 
     bufferManager.Commit(packetBuffer0, 256);
 
     // Buffer should become readable after commit
     auto packetBuffer2 = bufferManager.GetReadableBuffer();
-    BOOST_TEST(packetBuffer2.get());
-    BOOST_TEST(packetBuffer2->GetSize() == 256);
+    CHECK(packetBuffer2.get());
+    CHECK(packetBuffer2->GetSize() == 256);
 
     // Buffer not set back to available list after commit
     reservedBuffer = bufferManager.Reserve(512, reservedSize2);
-    BOOST_TEST(reservedSize2 == 0);
-    BOOST_TEST(!reservedBuffer.get());
+    CHECK(reservedSize2 == 0);
+    CHECK(!reservedBuffer.get());
 
     bufferManager.MarkRead(packetBuffer2);
 
     //Buffer should set back to available list after marked read and can be reserved
     auto readBuffer = bufferManager.GetReadableBuffer();
-    BOOST_TEST(!readBuffer);
+    CHECK(!readBuffer);
     unsigned int reservedSize3 = 0;
     auto packetBuffer3 = bufferManager.Reserve(56, reservedSize3);
 
-    BOOST_TEST(reservedSize3 == 56);
-    BOOST_TEST(packetBuffer3.get());
+    CHECK(reservedSize3 == 56);
+    CHECK(packetBuffer3.get());
 }
 
-BOOST_AUTO_TEST_CASE(ReadSwTraceMessageExceptionTest0)
+TEST_CASE("ReadSwTraceMessageExceptionTest0")
 {
     IPacketBufferPtr packetBuffer = std::make_unique<PacketBuffer>(512);
 
-    BOOST_TEST(packetBuffer->GetSize() == 0);
+    CHECK(packetBuffer->GetSize() == 0);
 
     // Write zero data to the buffer
     WriteUint32(packetBuffer, 0, 0);
@@ -382,16 +383,16 @@
 
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int offset = uint32_t_size;
-    BOOST_CHECK_THROW(arm::pipe::ReadSwTraceMessage(packetBuffer->GetReadableData(), offset, packetBuffer->GetSize()),
+    CHECK_THROWS_AS(arm::pipe::ReadSwTraceMessage(packetBuffer->GetReadableData(), offset, packetBuffer->GetSize()),
                       arm::pipe::ProfilingException);
 
 }
 
-BOOST_AUTO_TEST_CASE(ReadSwTraceMessageExceptionTest1)
+TEST_CASE("ReadSwTraceMessageExceptionTest1")
 {
     IPacketBufferPtr packetBuffer = std::make_unique<PacketBuffer>(512);
 
-    BOOST_TEST(packetBuffer->GetSize() == 0);
+    CHECK(packetBuffer->GetSize() == 0);
 
     // Write data to the buffer
     WriteUint32(packetBuffer, 0, 10);
@@ -404,9 +405,9 @@
 
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int offset = uint32_t_size;
-    BOOST_CHECK_THROW(arm::pipe::ReadSwTraceMessage(packetBuffer->GetReadableData(), offset, packetBuffer->GetSize()),
+    CHECK_THROWS_AS(arm::pipe::ReadSwTraceMessage(packetBuffer->GetReadableData(), offset, packetBuffer->GetSize()),
                       arm::pipe::ProfilingException);
 
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
index 813bb49..5827c0d 100644
--- a/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
+++ b/src/profiling/test/FileOnlyProfilingDecoratorTests.cpp
@@ -11,7 +11,7 @@
 #include <Runtime.hpp>
 #include "TestTimelinePacketHandler.hpp"
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <cstdio>
 #include <sstream>
@@ -33,9 +33,9 @@
     armnn::profiling::ProfilingService m_ProfilingService;
 };
 
-BOOST_AUTO_TEST_SUITE(FileOnlyProfilingDecoratorTests)
-
-BOOST_AUTO_TEST_CASE(TestFileOnlyProfiling)
+TEST_SUITE("FileOnlyProfilingDecoratorTests")
+{
+TEST_CASE("TestFileOnlyProfiling")
 {
     // Get all registered backends
     std::vector<BackendId> suitableBackends = GetSuitableBackendRegistered();
@@ -79,7 +79,7 @@
 
         // Load it into the runtime. It should succeed.
         armnn::NetworkId netId;
-        BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
+        CHECK(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
 
         // Creates structures for input & output.
         std::vector<float> inputData(16);
@@ -109,7 +109,7 @@
         for (auto &error : model.GetErrors()) {
             std::cout << error.what() << std::endl;
         }
-        BOOST_TEST(model.GetErrors().empty());
+        CHECK(model.GetErrors().empty());
         std::vector<std::string> desc = GetModelDescription(model);
         std::vector<std::string> expectedOutput;
         expectedOutput.push_back("Entity [0] name = input type = layer");
@@ -147,11 +147,11 @@
         expectedOutput.push_back("Entity [55] type = workload_execution");
         expectedOutput.push_back("   event: [59] class [start_of_life]");
         expectedOutput.push_back("   event: [61] class [end_of_life]");
-        BOOST_TEST(CompareOutput(desc, expectedOutput));
+        CHECK(CompareOutput(desc, expectedOutput));
     }
 }
 
-BOOST_AUTO_TEST_CASE(DumpOutgoingValidFileEndToEnd)
+TEST_CASE("DumpOutgoingValidFileEndToEnd")
 {
     // Get all registered backends
     std::vector<BackendId> suitableBackends = GetSuitableBackendRegistered();
@@ -162,7 +162,7 @@
         // Create a temporary file name.
         fs::path tempPath = armnnUtils::Filesystem::NamedTempFile("DumpOutgoingValidFileEndToEnd_CaptureFile.txt");
         // Make sure the file does not exist at this point
-        BOOST_CHECK(!fs::exists(tempPath));
+        CHECK(!fs::exists(tempPath));
 
         armnn::IRuntime::CreationOptions options;
         options.m_ProfilingOptions.m_EnableProfiling     = true;
@@ -202,7 +202,7 @@
 
         // Load it into the runtime. It should succeed.
         armnn::NetworkId netId;
-        BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
+        CHECK(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
 
         // Creates structures for input & output.
         std::vector<float> inputData(16);
@@ -231,13 +231,13 @@
         GetProfilingService(&runtime).ResetExternalProfilingOptions(options.m_ProfilingOptions, true);
 
         // The output file size should be greater than 0.
-        BOOST_CHECK(fs::file_size(tempPath) > 0);
+        CHECK(fs::file_size(tempPath) > 0);
 
         // NOTE: would be an interesting exercise to take this file and decode it
 
         // Delete the tmp file.
-        BOOST_CHECK(fs::remove(tempPath));
+        CHECK(fs::remove(tempPath));
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp b/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp
index 2ebba5d..c2fcf1c 100644
--- a/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp
+++ b/src/profiling/test/ProfilingConnectionDumpToFileDecoratorTests.cpp
@@ -12,7 +12,7 @@
 #include <fstream>
 #include <sstream>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn::profiling;
 
@@ -74,27 +74,27 @@
 
 } // anonymous namespace
 
-BOOST_AUTO_TEST_SUITE(ProfilingConnectionDumpToFileDecoratorTests)
-
-BOOST_AUTO_TEST_CASE(DumpIncomingInvalidFile)
+TEST_SUITE("ProfilingConnectionDumpToFileDecoratorTests")
+{
+TEST_CASE("DumpIncomingInvalidFile")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     options.m_IncomingCaptureFile = "/";
     options.m_OutgoingCaptureFile =  "";
     ProfilingConnectionDumpToFileDecorator decorator(std::make_unique<DummyProfilingConnection>(), options, false);
-    BOOST_CHECK_THROW(decorator.ReadPacket(0), armnn::RuntimeException);
+    CHECK_THROWS_AS(decorator.ReadPacket(0), armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(DumpIncomingInvalidFileIgnoreErrors)
+TEST_CASE("DumpIncomingInvalidFileIgnoreErrors")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     options.m_IncomingCaptureFile = "/";
     options.m_OutgoingCaptureFile =  "";
     ProfilingConnectionDumpToFileDecorator decorator(std::make_unique<DummyProfilingConnection>(), options, true);
-    BOOST_CHECK_NO_THROW(decorator.ReadPacket(0));
+    CHECK_NOTHROW(decorator.ReadPacket(0));
 }
 
-BOOST_AUTO_TEST_CASE(DumpIncomingValidFile)
+TEST_CASE("DumpIncomingValidFile")
 {
     fs::path fileName = armnnUtils::Filesystem::NamedTempFile("Armnn-DumpIncomingValidFileTest-TempFile");
 
@@ -106,7 +106,7 @@
 
     // NOTE: unique_ptr is needed here because operator=() is deleted for Packet
     std::unique_ptr<arm::pipe::Packet> packet;
-    BOOST_CHECK_NO_THROW(packet = std::make_unique<arm::pipe::Packet>(decorator.ReadPacket(0)));
+    CHECK_NOTHROW(packet = std::make_unique<arm::pipe::Packet>(decorator.ReadPacket(0)));
 
     decorator.Close();
 
@@ -116,33 +116,33 @@
     // check if the data read back from the dump file matches the original
     constexpr unsigned int bytesToSkip = 2u * sizeof(uint32_t); // skip header and packet length
     int diff = std::strncmp(data.data() + bytesToSkip, packetData, g_DataLength);
-    BOOST_CHECK(diff == 0);
+    CHECK(diff == 0);
     fs::remove(fileName);
 }
 
-BOOST_AUTO_TEST_CASE(DumpOutgoingInvalidFile)
+TEST_CASE("DumpOutgoingInvalidFile")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     options.m_IncomingCaptureFile = "";
     options.m_OutgoingCaptureFile = "/";
     ProfilingConnectionDumpToFileDecorator decorator(std::make_unique<DummyProfilingConnection>(), options, false);
-    BOOST_CHECK_THROW(decorator.WritePacket(g_DataPtr, g_DataLength), armnn::RuntimeException);
+    CHECK_THROWS_AS(decorator.WritePacket(g_DataPtr, g_DataLength), armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(DumpOutgoingInvalidFileIgnoreErrors)
+TEST_CASE("DumpOutgoingInvalidFileIgnoreErrors")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     options.m_IncomingCaptureFile = "";
     options.m_OutgoingCaptureFile = "/";
 
     ProfilingConnectionDumpToFileDecorator decorator(std::make_unique<DummyProfilingConnection>(), options, true);
-    BOOST_CHECK_NO_THROW(decorator.WritePacket(g_DataPtr, g_DataLength));
+    CHECK_NOTHROW(decorator.WritePacket(g_DataPtr, g_DataLength));
 
     bool success = decorator.WritePacket(g_DataPtr, g_DataLength);
-    BOOST_CHECK(!success);
+    CHECK(!success);
 }
 
-BOOST_AUTO_TEST_CASE(DumpOutgoingValidFile)
+TEST_CASE("DumpOutgoingValidFile")
 {
     fs::path fileName = armnnUtils::Filesystem::NamedTempFile("Armnn-DumpOutgoingValidFileTest-TempFile");
 
@@ -153,8 +153,8 @@
     ProfilingConnectionDumpToFileDecorator decorator(std::make_unique<DummyProfilingConnection>(), options, false);
 
     bool success = false;
-    BOOST_CHECK_NO_THROW(success = decorator.WritePacket(g_DataPtr, g_DataLength));
-    BOOST_CHECK(success);
+    CHECK_NOTHROW(success = decorator.WritePacket(g_DataPtr, g_DataLength));
+    CHECK(success);
 
     decorator.Close();
 
@@ -162,8 +162,8 @@
 
     // check if the data read back from the dump file matches the original
     int diff = std::strncmp(data.data(), g_Data.data(), g_DataLength);
-    BOOST_CHECK(diff == 0);
+    CHECK(diff == 0);
     fs::remove(fileName);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/profiling/test/ProfilingGuidTest.cpp b/src/profiling/test/ProfilingGuidTest.cpp
index d70e0d5..07a0985 100644
--- a/src/profiling/test/ProfilingGuidTest.cpp
+++ b/src/profiling/test/ProfilingGuidTest.cpp
@@ -8,75 +8,75 @@
 #include <armnn/Types.hpp>
 
 #include <set>
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 #include <fmt/format.h>
 #include <thread>
 
 using namespace armnn::profiling;
 
-BOOST_AUTO_TEST_SUITE(ProfilingGuidTests)
-
-BOOST_AUTO_TEST_CASE(GuidTest)
+TEST_SUITE("ProfilingGuidTests")
+{
+TEST_CASE("GuidTest")
 {
     ProfilingGuid guid0(0);
     ProfilingGuid guid1(1);
     ProfilingGuid guid2(1);
 
-    BOOST_TEST(guid0 != guid1);
-    BOOST_TEST(guid1 == guid2);
-    BOOST_TEST(guid0 < guid1);
-    BOOST_TEST(guid0 <= guid1);
-    BOOST_TEST(guid1 <= guid2);
-    BOOST_TEST(guid1 > guid0);
-    BOOST_TEST(guid1 >= guid0);
-    BOOST_TEST(guid1 >= guid2);
+    CHECK(guid0 != guid1);
+    CHECK(guid1 == guid2);
+    CHECK(guid0 < guid1);
+    CHECK(guid0 <= guid1);
+    CHECK(guid1 <= guid2);
+    CHECK(guid1 > guid0);
+    CHECK(guid1 >= guid0);
+    CHECK(guid1 >= guid2);
 }
 
-BOOST_AUTO_TEST_CASE(StaticGuidTest)
+TEST_CASE("StaticGuidTest")
 {
     ProfilingStaticGuid guid0(0);
     ProfilingStaticGuid guid1(1);
     ProfilingStaticGuid guid2(1);
 
-    BOOST_TEST(guid0 != guid1);
-    BOOST_TEST(guid1 == guid2);
-    BOOST_TEST(guid0 < guid1);
-    BOOST_TEST(guid0 <= guid1);
-    BOOST_TEST(guid1 <= guid2);
-    BOOST_TEST(guid1 > guid0);
-    BOOST_TEST(guid1 >= guid0);
-    BOOST_TEST(guid1 >= guid2);
+    CHECK(guid0 != guid1);
+    CHECK(guid1 == guid2);
+    CHECK(guid0 < guid1);
+    CHECK(guid0 <= guid1);
+    CHECK(guid1 <= guid2);
+    CHECK(guid1 > guid0);
+    CHECK(guid1 >= guid0);
+    CHECK(guid1 >= guid2);
 }
 
-BOOST_AUTO_TEST_CASE(DynamicGuidTest)
+TEST_CASE("DynamicGuidTest")
 {
     ProfilingDynamicGuid guid0(0);
     ProfilingDynamicGuid guid1(1);
     ProfilingDynamicGuid guid2(1);
 
-    BOOST_TEST(guid0 != guid1);
-    BOOST_TEST(guid1 == guid2);
-    BOOST_TEST(guid0 < guid1);
-    BOOST_TEST(guid0 <= guid1);
-    BOOST_TEST(guid1 <= guid2);
-    BOOST_TEST(guid1 > guid0);
-    BOOST_TEST(guid1 >= guid0);
-    BOOST_TEST(guid1 >= guid2);
+    CHECK(guid0 != guid1);
+    CHECK(guid1 == guid2);
+    CHECK(guid0 < guid1);
+    CHECK(guid0 <= guid1);
+    CHECK(guid1 <= guid2);
+    CHECK(guid1 > guid0);
+    CHECK(guid1 >= guid0);
+    CHECK(guid1 >= guid2);
 }
 
 void CheckStaticGuid(uint64_t guid, uint64_t expectedGuid)
 {
-    BOOST_TEST(guid == expectedGuid);
-    BOOST_TEST(guid >= MIN_STATIC_GUID);
+    CHECK(guid == expectedGuid);
+    CHECK(guid >= MIN_STATIC_GUID);
 }
 
 void CheckDynamicGuid(uint64_t guid, uint64_t expectedGuid)
 {
-    BOOST_TEST(guid == expectedGuid);
-    BOOST_TEST(guid < MIN_STATIC_GUID);
+    CHECK(guid == expectedGuid);
+    CHECK(guid < MIN_STATIC_GUID);
 }
 
-BOOST_AUTO_TEST_CASE(StaticGuidGeneratorCollisionTest)
+TEST_CASE("StaticGuidGeneratorCollisionTest")
 {
     ProfilingGuidGenerator generator;
     std::set<uint64_t> guids;
@@ -93,11 +93,11 @@
             // message rather than error in this case.
             if (guid == ProfilingGuid(armnn::profiling::MIN_STATIC_GUID))
             {
-                BOOST_WARN("MIN_STATIC_GUID returned more than once from GenerateStaticId.");
+                WARN("MIN_STATIC_GUID returned more than once from GenerateStaticId.");
             } 
             else
             {
-                BOOST_ERROR(fmt::format("GUID collision occurred: {} -> {}", str, guid));
+                FAIL(fmt::format("GUID collision occurred: {} -> {}", str, guid));
             }
             break;
         }
@@ -105,24 +105,24 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE(StaticGuidGeneratorTest)
+TEST_CASE("StaticGuidGeneratorTest")
 {
     ProfilingGuidGenerator generator;
 
     ProfilingStaticGuid staticGuid0 = generator.GenerateStaticId("name");
     CheckStaticGuid(staticGuid0, LabelsAndEventClasses::NAME_GUID);
-    BOOST_TEST(staticGuid0 != generator.GenerateStaticId("Name"));
+    CHECK(staticGuid0 != generator.GenerateStaticId("Name"));
 
     ProfilingStaticGuid staticGuid1 = generator.GenerateStaticId("type");
     CheckStaticGuid(staticGuid1, LabelsAndEventClasses::TYPE_GUID);
-    BOOST_TEST(staticGuid1 != generator.GenerateStaticId("Type"));
+    CHECK(staticGuid1 != generator.GenerateStaticId("Type"));
 
     ProfilingStaticGuid staticGuid2 = generator.GenerateStaticId("index");
     CheckStaticGuid(staticGuid2, LabelsAndEventClasses::INDEX_GUID);
-    BOOST_TEST(staticGuid2 != generator.GenerateStaticId("Index"));
+    CHECK(staticGuid2 != generator.GenerateStaticId("Index"));
 }
 
-BOOST_AUTO_TEST_CASE(DynamicGuidGeneratorTest)
+TEST_CASE("DynamicGuidGeneratorTest")
 {
     ProfilingGuidGenerator generator;
 
@@ -133,7 +133,7 @@
     }
 }
 
-BOOST_AUTO_TEST_CASE (ProfilingGuidThreadTest)
+TEST_CASE("ProfilingGuidThreadTest")
 {
     ProfilingGuidGenerator profilingGuidGenerator;
 
@@ -154,7 +154,7 @@
     t3.join();
 
     uint64_t guid = profilingGuidGenerator.NextGuid();
-    BOOST_CHECK(guid == 3000u);
+    CHECK(guid == 3000u);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/profiling/test/ProfilingTestUtils.cpp b/src/profiling/test/ProfilingTestUtils.cpp
index faa86e5..0d8e9ef 100644
--- a/src/profiling/test/ProfilingTestUtils.cpp
+++ b/src/profiling/test/ProfilingTestUtils.cpp
@@ -7,6 +7,7 @@
 #include "ProfilingUtils.hpp"
 
 #include <armnn/Descriptors.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 #include <LabelsAndEventClasses.hpp>
@@ -16,7 +17,7 @@
 
 #include <test/TestUtils.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 uint32_t GetStreamMetaDataPacketSize()
 {
@@ -84,16 +85,16 @@
     uint32_t timelineBinaryPacketClass       = (timelineBinaryPacketHeaderWord0 >> 19) & 0x0000007F;
     uint32_t timelineBinaryPacketType        = (timelineBinaryPacketHeaderWord0 >> 16) & 0x00000007;
     uint32_t timelineBinaryPacketStreamId    = (timelineBinaryPacketHeaderWord0 >>  0) & 0x00000007;
-    BOOST_CHECK(timelineBinaryPacketFamily   == 1);
-    BOOST_CHECK(timelineBinaryPacketClass    == 0);
-    BOOST_CHECK(timelineBinaryPacketType     == 1);
-    BOOST_CHECK(timelineBinaryPacketStreamId == 0);
+    CHECK(timelineBinaryPacketFamily   == 1);
+    CHECK(timelineBinaryPacketClass    == 0);
+    CHECK(timelineBinaryPacketType     == 1);
+    CHECK(timelineBinaryPacketStreamId == 0);
     offset += uint32_t_size;
     uint32_t timelineBinaryPacketHeaderWord1   = ReadUint32(readableData, offset);
     uint32_t timelineBinaryPacketSequenceNumber = (timelineBinaryPacketHeaderWord1 >> 24) & 0x00000001;
     uint32_t timelineBinaryPacketDataLength     = (timelineBinaryPacketHeaderWord1 >>  0) & 0x00FFFFFF;
-    BOOST_CHECK(timelineBinaryPacketSequenceNumber == 0);
-    BOOST_CHECK(timelineBinaryPacketDataLength     == packetDataLength);
+    CHECK(timelineBinaryPacketSequenceNumber == 0);
+    CHECK(timelineBinaryPacketDataLength     == packetDataLength);
     offset += uint32_t_size;
 }
 
@@ -111,27 +112,27 @@
 
     // Check the decl id
     uint32_t eventClassDeclId = ReadUint32(readableData, offset);
-    BOOST_CHECK(eventClassDeclId == 0);
+    CHECK(eventClassDeclId == 0);
 
     // Check the profiling GUID
     offset += uint32_t_size;
     uint64_t readProfilingGuid = ReadUint64(readableData, offset);
     if (guid.has_value())
     {
-        BOOST_CHECK(readProfilingGuid == guid.value());
+        CHECK(readProfilingGuid == guid.value());
     }
     else
     {
         armnn::profiling::ProfilingService profilingService;
-        BOOST_CHECK(readProfilingGuid == profilingService.GetStaticId(label));
+        CHECK(readProfilingGuid == profilingService.GetStaticId(label));
     }
 
     // Check the SWTrace label
     offset += uint64_t_size;
     uint32_t swTraceLabelLength = ReadUint32(readableData, offset);
-    BOOST_CHECK(swTraceLabelLength == label_size + 1);               // Label length including the null-terminator
+    CHECK(swTraceLabelLength == label_size + 1);               // Label length including the null-terminator
     offset += uint32_t_size;
-    BOOST_CHECK(std::memcmp(readableData + offset,                  // Offset to the label in the buffer
+    CHECK(std::memcmp(readableData + offset,                  // Offset to the label in the buffer
                                label.data(),                           // The original label
                                swTraceLabelLength - 1) == 0);          // The length of the label
 
@@ -155,16 +156,16 @@
 
     // Check the decl id
     uint32_t eventClassDeclId = ReadUint32(readableData, offset);
-    BOOST_CHECK(eventClassDeclId == 2);
+    CHECK(eventClassDeclId == 2);
 
     // Check the profiling GUID
     offset += uint32_t_size;
     uint64_t readProfilingGuid = ReadUint64(readableData, offset);
-    BOOST_CHECK(readProfilingGuid == guid);
+    CHECK(readProfilingGuid == guid);
 
     offset += uint64_t_size;
     uint64_t readProfiilngNameGuid = ReadUint64(readableData, offset);
-    BOOST_CHECK(readProfiilngNameGuid == nameGuid);
+    CHECK(readProfiilngNameGuid == nameGuid);
 
     // Update the offset to allow parsing to be continued after this function returns
     offset += uint64_t_size;
@@ -196,7 +197,7 @@
             relationshipTypeUint = 3;
             break;
         default:
-            BOOST_ERROR("Unknown relationship type");
+            FAIL("Unknown relationship type");
     }
 
     // Utils
@@ -205,23 +206,23 @@
 
     // Check the decl id
     uint32_t eventClassDeclId = ReadUint32(readableData, offset);
-    BOOST_CHECK(eventClassDeclId == 3);
+    CHECK(eventClassDeclId == 3);
 
     // Check the relationship type
     offset += uint32_t_size;
     uint32_t readRelationshipTypeUint = ReadUint32(readableData, offset);
-    BOOST_CHECK(readRelationshipTypeUint == relationshipTypeUint);
+    CHECK(readRelationshipTypeUint == relationshipTypeUint);
 
     // Check the relationship GUID
     offset += uint32_t_size;
     uint64_t readRelationshipGuid = ReadUint64(readableData, offset);
     if (relationshipGuid.has_value())
     {
-        BOOST_CHECK(readRelationshipGuid == relationshipGuid.value());
+        CHECK(readRelationshipGuid == relationshipGuid.value());
     }
     else
     {
-        BOOST_CHECK(readRelationshipGuid != ProfilingGuid(0));
+        CHECK(readRelationshipGuid != ProfilingGuid(0));
     }
 
     // Check the head GUID of the relationship
@@ -229,11 +230,11 @@
     uint64_t readHeadRelationshipGuid = ReadUint64(readableData, offset);
     if (headGuid.has_value())
     {
-        BOOST_CHECK(readHeadRelationshipGuid == headGuid.value());
+        CHECK(readHeadRelationshipGuid == headGuid.value());
     }
     else
     {
-        BOOST_CHECK(readHeadRelationshipGuid != ProfilingGuid(0));
+        CHECK(readHeadRelationshipGuid != ProfilingGuid(0));
     }
 
     // Check the tail GUID of the relationship
@@ -241,11 +242,11 @@
     uint64_t readTailRelationshipGuid = ReadUint64(readableData, offset);
     if (tailGuid.has_value())
     {
-        BOOST_CHECK(readTailRelationshipGuid == tailGuid.value());
+        CHECK(readTailRelationshipGuid == tailGuid.value());
     }
     else
     {
-        BOOST_CHECK(readTailRelationshipGuid != ProfilingGuid(0));
+        CHECK(readTailRelationshipGuid != ProfilingGuid(0));
     }
 
     // Check the attribute GUID of the relationship
@@ -253,11 +254,11 @@
     uint64_t readAttributeRelationshipGuid = ReadUint64(readableData, offset);
     if (attributeGuid.has_value())
     {
-        BOOST_CHECK(readAttributeRelationshipGuid == attributeGuid.value());
+        CHECK(readAttributeRelationshipGuid == attributeGuid.value());
     }
     else
     {
-        BOOST_CHECK(readAttributeRelationshipGuid == ProfilingGuid(0));
+        CHECK(readAttributeRelationshipGuid == ProfilingGuid(0));
     }
 
     // Update the offset to allow parsing to be continued after this function returns
@@ -277,7 +278,7 @@
     // Reading TimelineEntityClassBinaryPacket
     // Check the decl_id
     uint32_t entityDeclId = ReadUint32(readableData, offset);
-    BOOST_CHECK(entityDeclId == 1);
+    CHECK(entityDeclId == 1);
 
     // Check the profiling GUID
     offset += uint32_t_size;
@@ -285,11 +286,11 @@
 
     if (guid.has_value())
     {
-        BOOST_CHECK(readProfilingGuid == guid.value());
+        CHECK(readProfilingGuid == guid.value());
     }
     else
     {
-        BOOST_CHECK(readProfilingGuid != ProfilingGuid(0));
+        CHECK(readProfilingGuid != ProfilingGuid(0));
     }
 
     offset += uint64_t_size;
@@ -313,18 +314,18 @@
     // Reading TimelineEventBinaryPacket
     // Check the decl_id
     uint32_t entityDeclId = ReadUint32(readableData, offset);
-    BOOST_CHECK(entityDeclId == 4);
+    CHECK(entityDeclId == 4);
 
     // Check the timestamp
     offset += uint32_t_size;
     uint64_t readTimestamp = ReadUint64(readableData, offset);
     if (timestamp.has_value())
     {
-        BOOST_CHECK(readTimestamp == timestamp.value());
+        CHECK(readTimestamp == timestamp.value());
     }
     else
     {
-        BOOST_CHECK(readTimestamp != 0);
+        CHECK(readTimestamp != 0);
     }
 
     // Check the thread id
@@ -333,11 +334,11 @@
     ReadBytes(readableData, offset, ThreadIdSize, readThreadId.data());
     if (threadId.has_value())
     {
-        BOOST_CHECK(readThreadId == threadId.value());
+        CHECK(readThreadId == threadId.value());
     }
     else
     {
-        BOOST_CHECK(readThreadId == armnnUtils::Threads::GetCurrentThreadId());
+        CHECK(readThreadId == armnnUtils::Threads::GetCurrentThreadId());
     }
 
     // Check the event GUID
@@ -345,11 +346,11 @@
     uint64_t readEventGuid = ReadUint64(readableData, offset);
     if (eventGuid.has_value())
     {
-        BOOST_CHECK(readEventGuid == eventGuid.value());
+        CHECK(readEventGuid == eventGuid.value());
     }
     else
     {
-        BOOST_CHECK(readEventGuid != ProfilingGuid(0));
+        CHECK(readEventGuid != ProfilingGuid(0));
     }
 
     offset += uint64_t_size;
@@ -436,28 +437,28 @@
 
     // Load it into the runtime. It should success.
     armnn::NetworkId netId;
-    BOOST_TEST(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
+    CHECK(runtime.LoadNetwork(netId, std::move(optNet)) == Status::Success);
 
     profiling::BufferManager& bufferManager = profilingServiceHelper.GetProfilingBufferManager();
     auto readableBuffer = bufferManager.GetReadableBuffer();
 
     // Profiling is enabled, the post-optimisation structure should be created
-    BOOST_CHECK(readableBuffer != nullptr);
+    CHECK(readableBuffer != nullptr);
     unsigned int size = readableBuffer->GetSize();
 
     const unsigned char* readableData = readableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     unsigned int offset = 0;
 
     // Verify Header
     VerifyTimelineHeaderBinary(readableData, offset, size - 8);
-    BOOST_TEST_MESSAGE("HEADER OK");
+    MESSAGE("HEADER OK");
 
     // Post-optimisation network
     // Network entity
     VerifyTimelineEntityBinaryPacketData(optNetGuid, readableData, offset);
-    BOOST_TEST_MESSAGE("NETWORK ENTITY OK");
+    MESSAGE("NETWORK ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -467,7 +468,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK TYPE RELATIONSHIP OK");
+    MESSAGE("NETWORK TYPE RELATIONSHIP OK");
 
     // Network - START OF LIFE
     ProfilingGuid networkSolEventGuid = VerifyTimelineEventBinaryPacket(EmptyOptional(),
@@ -475,7 +476,7 @@
                                                                         EmptyOptional(),
                                                                         readableData,
                                                                         offset);
-    BOOST_TEST_MESSAGE("NETWORK START OF LIFE EVENT OK");
+    MESSAGE("NETWORK START OF LIFE EVENT OK");
 
     // Network - START OF LIFE event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -485,7 +486,7 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK START OF LIFE RELATIONSHIP OK");
+    MESSAGE("NETWORK START OF LIFE RELATIONSHIP OK");
 
     // Process ID Label
     int processID = armnnUtils::Processes::GetCurrentId();
@@ -493,7 +494,7 @@
     ss << processID;
     std::string processIdLabel = ss.str();
     VerifyTimelineLabelBinaryPacketData(EmptyOptional(), processIdLabel, readableData, offset);
-    BOOST_TEST_MESSAGE("PROCESS ID LABEL OK");
+    MESSAGE("PROCESS ID LABEL OK");
 
     // Entity - Process ID relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -503,16 +504,16 @@
                                                LabelsAndEventClasses::PROCESS_ID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK PROCESS ID RELATIONSHIP OK");
+    MESSAGE("NETWORK PROCESS ID RELATIONSHIP OK");
 
     // Input layer
     // Input layer entity
     VerifyTimelineEntityBinaryPacketData(input->GetGuid(), readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT ENTITY OK");
+    MESSAGE("INPUT ENTITY OK");
 
     // Name Entity
     ProfilingGuid inputLabelGuid = VerifyTimelineLabelBinaryPacketData(EmptyOptional(), "input", readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT NAME LABEL OK");
+    MESSAGE("INPUT NAME LABEL OK");
 
     // Entity - Name relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -522,7 +523,7 @@
                                                LabelsAndEventClasses::NAME_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT NAME RELATIONSHIP OK");
+    MESSAGE("INPUT NAME RELATIONSHIP OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -532,7 +533,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT TYPE RELATIONSHIP OK");
+    MESSAGE("INPUT TYPE RELATIONSHIP OK");
 
     // Network - Input layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -542,7 +543,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - INPUT CHILD RELATIONSHIP OK");
+    MESSAGE("NETWORK - INPUT CHILD RELATIONSHIP OK");
 
     // Conv2d layer
     // Conv2d layer entity
@@ -551,7 +552,7 @@
     // Name entity
     ProfilingGuid conv2dNameLabelGuid = VerifyTimelineLabelBinaryPacketData(
         EmptyOptional(), "<Unnamed>", readableData, offset);
-    BOOST_TEST_MESSAGE("CONV2D NAME LABEL OK");
+    MESSAGE("CONV2D NAME LABEL OK");
 
     // Entity - Name relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -561,7 +562,7 @@
                                                LabelsAndEventClasses::NAME_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D NAME RELATIONSHIP OK");
+    MESSAGE("CONV2D NAME RELATIONSHIP OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -571,7 +572,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D TYPE RELATIONSHIP OK");
+    MESSAGE("CONV2D TYPE RELATIONSHIP OK");
 
     // Network - Conv2d layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -581,7 +582,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - CONV2D CHILD RELATIONSHIP OK");
+    MESSAGE("NETWORK - CONV2D CHILD RELATIONSHIP OK");
 
     // Input layer - Conv2d layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -591,12 +592,12 @@
                                                LabelsAndEventClasses::CONNECTION_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT - CONV2D LAYER CONNECTION OK");
+    MESSAGE("INPUT - CONV2D LAYER CONNECTION OK");
 
     // Conv2d workload
     // Conv2d workload entity
     ProfilingGuid conv2DWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD ENTITY OK");
+    MESSAGE("CONV2D WORKLOAD ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -606,7 +607,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD TYPE RELATIONSHIP OK");
+    MESSAGE("CONV2D WORKLOAD TYPE RELATIONSHIP OK");
 
     // BackendId entity
     ProfilingGuid backendIdLabelGuid = VerifyTimelineLabelBinaryPacketData(
@@ -620,7 +621,7 @@
                                                LabelsAndEventClasses::BACKENDID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD BACKEND ID RELATIONSHIP OK");
+    MESSAGE("CONV2D WORKLOAD BACKEND ID RELATIONSHIP OK");
 
 
     // Conv2d layer - Conv2d workload relationship
@@ -631,17 +632,17 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D LAYER - WORKLOAD CHILD RELATIONSHIP OK");
+    MESSAGE("CONV2D LAYER - WORKLOAD CHILD RELATIONSHIP OK");
 
     // Abs layer
     // Abs layer entity
     VerifyTimelineEntityBinaryPacketData(abs->GetGuid(), readableData, offset);
-    BOOST_TEST_MESSAGE("ABS ENTITY OK");
+    MESSAGE("ABS ENTITY OK");
 
     // Name entity
     ProfilingGuid absLabelGuid = VerifyTimelineLabelBinaryPacketData(
         EmptyOptional(), "abs", readableData, offset);
-    BOOST_TEST_MESSAGE("ABS NAME LABEL OK");
+    MESSAGE("ABS NAME LABEL OK");
 
     // Entity - Name relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -651,7 +652,7 @@
                                                LabelsAndEventClasses::NAME_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS LAYER - NAME RELATIONSHIP OK");
+    MESSAGE("ABS LAYER - NAME RELATIONSHIP OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -661,7 +662,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS LAYER TYPE RELATIONSHIP OK");
+    MESSAGE("ABS LAYER TYPE RELATIONSHIP OK");
 
     // Network - Abs layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -671,7 +672,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - ABS LAYER CHILD RELATIONSHIP OK");
+    MESSAGE("NETWORK - ABS LAYER CHILD RELATIONSHIP OK");
 
     // Conv2d layer - Abs layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -681,12 +682,12 @@
                                                LabelsAndEventClasses::CONNECTION_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D LAYER - ABS LAYER CONNECTION OK");
+    MESSAGE("CONV2D LAYER - ABS LAYER CONNECTION OK");
 
     // Abs workload
     // Abs workload entity
     ProfilingGuid absWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("ABS WORKLOAD ENTITY OK");
+    MESSAGE("ABS WORKLOAD ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -696,11 +697,11 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS WORKLAD TYPE RELATIONSHIP OK");
+    MESSAGE("ABS WORKLAD TYPE RELATIONSHIP OK");
 
     // BackendId entity
     VerifyTimelineLabelBinaryPacketData(EmptyOptional(), backendId.Get(), readableData, offset);
-    BOOST_TEST_MESSAGE("BACKEND ID LABEL OK");
+    MESSAGE("BACKEND ID LABEL OK");
 
     // Entity - BackendId relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -710,7 +711,7 @@
                                                LabelsAndEventClasses::BACKENDID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS WORKLOAD BACKEND ID RELATIONSHIP OK");
+    MESSAGE("ABS WORKLOAD BACKEND ID RELATIONSHIP OK");
 
     // Abs layer - Abs workload relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -720,12 +721,12 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS LAYER - WORKLOAD CHILD RELATIONSHIP OK");
+    MESSAGE("ABS LAYER - WORKLOAD CHILD RELATIONSHIP OK");
 
     // Output layer
     // Output layer entity
     VerifyTimelineEntityBinaryPacketData(output->GetGuid(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT LAYER ENTITY OK");
+    MESSAGE("OUTPUT LAYER ENTITY OK");
 
     // Name entity
     ProfilingGuid outputLabelGuid = VerifyTimelineLabelBinaryPacketData(
@@ -739,7 +740,7 @@
                                                LabelsAndEventClasses::NAME_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT LAYER NAME RELATIONSHIP OK");
+    MESSAGE("OUTPUT LAYER NAME RELATIONSHIP OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -749,7 +750,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT LAYER TYPE RELATIONSHIP OK");
+    MESSAGE("OUTPUT LAYER TYPE RELATIONSHIP OK");
 
     // Network - Output layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -759,7 +760,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - OUTPUT LAYER CHILD RELATIONSHIP OK");
+    MESSAGE("NETWORK - OUTPUT LAYER CHILD RELATIONSHIP OK");
 
     // Abs layer - Output layer relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -769,7 +770,7 @@
                                                LabelsAndEventClasses::CONNECTION_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS LAYER - OUTPUT LAYER CONNECTION OK");
+    MESSAGE("ABS LAYER - OUTPUT LAYER CONNECTION OK");
 
     bufferManager.MarkRead(readableBuffer);
 
@@ -791,33 +792,33 @@
 
     // Get readable buffer for input workload
     auto inputReadableBuffer = bufferManager.GetReadableBuffer();
-    BOOST_CHECK(inputReadableBuffer != nullptr);
+    CHECK(inputReadableBuffer != nullptr);
 
     // Get readable buffer for output workload
     auto outputReadableBuffer = bufferManager.GetReadableBuffer();
-    BOOST_CHECK(outputReadableBuffer != nullptr);
+    CHECK(outputReadableBuffer != nullptr);
 
     // Get readable buffer for inference timeline
     auto inferenceReadableBuffer = bufferManager.GetReadableBuffer();
-    BOOST_CHECK(inferenceReadableBuffer != nullptr);
+    CHECK(inferenceReadableBuffer != nullptr);
 
     // Validate input workload data
     size = inputReadableBuffer->GetSize();
-    BOOST_CHECK(size == 164);
+    CHECK(size == 164);
 
     readableData = inputReadableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     offset = 0;
 
     // Verify Header
     VerifyTimelineHeaderBinary(readableData, offset, 156);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD HEADER OK");
+    MESSAGE("INPUT WORKLOAD HEADER OK");
 
     // Input workload
     // Input workload entity
     ProfilingGuid inputWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD TYPE RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD TYPE RELATIONSHIP OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -827,7 +828,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD TYPE RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD TYPE RELATIONSHIP OK");
 
     // BackendId entity
     VerifyTimelineLabelBinaryPacketData(EmptyOptional(), backendId.Get(), readableData, offset);
@@ -840,7 +841,7 @@
                                                LabelsAndEventClasses::BACKENDID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
 
     // Input layer - Input workload relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -850,27 +851,27 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT LAYER - INPUT WORKLOAD CHILD RELATIONSHIP OK");
+    MESSAGE("INPUT LAYER - INPUT WORKLOAD CHILD RELATIONSHIP OK");
 
     bufferManager.MarkRead(inputReadableBuffer);
 
     // Validate output workload data
     size = outputReadableBuffer->GetSize();
-    BOOST_CHECK(size == 164);
+    CHECK(size == 164);
 
     readableData = outputReadableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     offset = 0;
 
     // Verify Header
     VerifyTimelineHeaderBinary(readableData, offset, 156);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD HEADER OK");
+    MESSAGE("OUTPUT WORKLOAD HEADER OK");
 
     // Output workload
     // Output workload entity
     ProfilingGuid outputWorkloadGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD ENTITY OK");
+    MESSAGE("OUTPUT WORKLOAD ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -880,11 +881,11 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD TYPE RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD TYPE RELATIONSHIP OK");
 
     // BackendId entity
     VerifyTimelineLabelBinaryPacketData(EmptyOptional(), backendId.Get(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD LABEL OK");
+    MESSAGE("OUTPUT WORKLOAD LABEL OK");
 
     // Entity - BackendId relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -894,7 +895,7 @@
                                                LabelsAndEventClasses::BACKENDID_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD BACKEND ID RELATIONSHIP OK");
 
     // Output layer - Output workload relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -904,28 +905,28 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT LAYER - OUTPUT WORKLOAD CHILD RELATIONSHIP OK");
+    MESSAGE("OUTPUT LAYER - OUTPUT WORKLOAD CHILD RELATIONSHIP OK");
 
     bufferManager.MarkRead(outputReadableBuffer);
 
     // Validate inference data
     size = inferenceReadableBuffer->GetSize();
 
-    BOOST_CHECK(size == 1228 + 10 * ThreadIdSize);
+    CHECK(size == 1228 + 10 * ThreadIdSize);
 
     readableData = inferenceReadableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     offset = 0;
 
     // Verify Header
     VerifyTimelineHeaderBinary(readableData, offset, 1220 + 10 * ThreadIdSize);
-    BOOST_TEST_MESSAGE("INFERENCE HEADER OK");
+    MESSAGE("INFERENCE HEADER OK");
 
     // Inference timeline trace
     // Inference entity
     ProfilingGuid inferenceGuid = VerifyTimelineEntityBinaryPacketData(EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INFERENCE ENTITY OK");
+    MESSAGE("INFERENCE ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -935,7 +936,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE TYPE RELATIONSHIP OK");
+    MESSAGE("INFERENCE TYPE RELATIONSHIP OK");
 
     // Network - Inference relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -945,13 +946,13 @@
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("NETWORK - INFERENCE EXECUTION_OF RELATIONSHIP OK");
+    MESSAGE("NETWORK - INFERENCE EXECUTION_OF RELATIONSHIP OK");
 
     // Start Inference life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid inferenceEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INFERENCE START OF LIFE EVENT OK");
+    MESSAGE("INFERENCE START OF LIFE EVENT OK");
 
     // Inference - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -961,14 +962,14 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE START OF LIFE RELATIONSHIP OK");
+    MESSAGE("INFERENCE START OF LIFE RELATIONSHIP OK");
 
     // Execution
     // Input workload execution
     // Input workload execution entity
     ProfilingGuid inputWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
         EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION ENTITY OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -978,7 +979,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
 
     // Inference - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -988,7 +989,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD - INPUT WORKLOAD EXECUTION RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD - INPUT WORKLOAD EXECUTION RELATIONSHIP OK");
 
     // Workload - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -998,13 +999,13 @@
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD - INPUT WORKLOAD EXECUTION RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD - INPUT WORKLOAD EXECUTION RELATIONSHIP OK");
 
     // Start Input workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid inputWorkloadExecutionSOLEventId = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION - START OF LIFE EVENT OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION - START OF LIFE EVENT OK");
 
     // Input workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1014,13 +1015,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
 
     // End of Input workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid inputWorkloadExecutionEOLEventId = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION - END OF LIFE EVENT OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION - END OF LIFE EVENT OK");
 
     // Input workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1030,13 +1031,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("INPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
 
     // Conv2d workload execution
     // Conv2d workload execution entity
     ProfilingGuid conv2DWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
         EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD EXECUTION ENTITY OK");
+    MESSAGE("CONV2D WORKLOAD EXECUTION ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -1046,7 +1047,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+    MESSAGE("CONV2D WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
 
     // Inference - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1056,7 +1057,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE - CONV2D WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+    MESSAGE("INFERENCE - CONV2D WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
 
     // Workload - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1066,13 +1067,13 @@
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD - CONV2D WORKLOAD EXECUTION RELATIONSHIP OK");
+    MESSAGE("CONV2D WORKLOAD - CONV2D WORKLOAD EXECUTION RELATIONSHIP OK");
 
     // Start Conv2d workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid conv2DWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD EXECUTION START OF LIFE EVENT OK");
+    MESSAGE("CONV2D WORKLOAD EXECUTION START OF LIFE EVENT OK");
 
     // Conv2d workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1082,13 +1083,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
+    MESSAGE("CONV2D WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
 
     // End of Conv2d workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid conv2DWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD EXECUTION END OF LIFE EVENT OK");
+    MESSAGE("CONV2D WORKLOAD EXECUTION END OF LIFE EVENT OK");
 
     // Conv2d workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1098,13 +1099,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("CONV2D WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
+    MESSAGE("CONV2D WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
 
     // Abs workload execution
     // Abs workload execution entity
     ProfilingGuid absWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
         EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION ENTITY OK");
+    MESSAGE("ABS WORKLOAD EXECUTION ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -1114,7 +1115,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+    MESSAGE("ABS WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
 
     // Inference - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1124,7 +1125,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE - ABS WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+    MESSAGE("INFERENCE - ABS WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
 
     // Workload - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1134,13 +1135,13 @@
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS WORKLOAD - ABS WORKLOAD EXECUTION RELATIONSHIP OK");
+    MESSAGE("ABS WORKLOAD - ABS WORKLOAD EXECUTION RELATIONSHIP OK");
 
     // Start Abs workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid absWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION START OF LIFE EVENT OK");
+    MESSAGE("ABS WORKLOAD EXECUTION START OF LIFE EVENT OK");
 
     // Abs workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1150,13 +1151,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
+    MESSAGE("ABS WORKLOAD EXECUTION START OF LIFE RELATIONSHIP OK");
 
     // End of Abs workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid absWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION END OF LIFE EVENT OK");
+    MESSAGE("ABS WORKLOAD EXECUTION END OF LIFE EVENT OK");
 
     // Abs workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1166,13 +1167,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("ABS WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
+    MESSAGE("ABS WORKLOAD EXECUTION END OF LIFE RELATIONSHIP OK");
 
     // Output workload execution
     // Output workload execution entity
     ProfilingGuid outputWorkloadExecutionGuid = VerifyTimelineEntityBinaryPacketData(
         EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION ENTITY OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION ENTITY OK");
 
     // Entity - Type relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::LabelLink,
@@ -1182,7 +1183,7 @@
                                                LabelsAndEventClasses::TYPE_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION TYPE RELATIONSHIP OK");
 
     // Inference - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1192,7 +1193,7 @@
                                                LabelsAndEventClasses::CHILD_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE - OUTPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
+    MESSAGE("INFERENCE - OUTPUT WORKLOAD EXECUTION CHILD RELATIONSHIP OK");
 
     // Workload - Workload execution relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::RetentionLink,
@@ -1202,13 +1203,13 @@
                                                LabelsAndEventClasses::EXECUTION_OF_GUID,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD - OUTPUT WORKLOAD EXECUTION EXECUTION_OF RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD - OUTPUT WORKLOAD EXECUTION EXECUTION_OF RELATIONSHIP OK");
 
     // Start Output workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid outputWorkloadExecutionSOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION START OF LIFE EVENT OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION START OF LIFE EVENT OK");
 
     // Output workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1218,13 +1219,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION - START OF LIFE EVENT RELATIONSHIP OK");
 
     // End of Normalize workload execution life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid outputWorkloadExecutionEOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION END OF LIFE EVENT OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION END OF LIFE EVENT OK");
 
     // Output workload execution - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1234,13 +1235,13 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("OUTPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("OUTPUT WORKLOAD EXECUTION - END OF LIFE EVENT RELATIONSHIP OK");
 
     // End of Inference life
     // Event packet - timeline, threadId, eventGuid
     ProfilingGuid inferenceEOLEventGuid = VerifyTimelineEventBinaryPacket(
         EmptyOptional(), EmptyOptional(), EmptyOptional(), readableData, offset);
-    BOOST_TEST_MESSAGE("INFERENCE END OF LIFE EVENT OK");
+    MESSAGE("INFERENCE END OF LIFE EVENT OK");
 
     // Inference - event relationship
     VerifyTimelineRelationshipBinaryPacketData(ProfilingRelationshipType::ExecutionLink,
@@ -1250,7 +1251,7 @@
                                                LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS,
                                                readableData,
                                                offset);
-    BOOST_TEST_MESSAGE("INFERENCE - END OF LIFE EVENT RELATIONSHIP OK");
+    MESSAGE("INFERENCE - END OF LIFE EVENT RELATIONSHIP OK");
 
     bufferManager.MarkRead(inferenceReadableBuffer);
 }
diff --git a/src/profiling/test/ProfilingTests.cpp b/src/profiling/test/ProfilingTests.cpp
index a4a25a8..e0629b3 100644
--- a/src/profiling/test/ProfilingTests.cpp
+++ b/src/profiling/test/ProfilingTests.cpp
@@ -40,6 +40,8 @@
 #include <common/include/SocketConnectionException.hpp>
 #include <common/include/Packet.hpp>
 
+#include <doctest/doctest.h>
+
 #include <cstdint>
 #include <cstring>
 #include <iostream>
@@ -51,9 +53,9 @@
 using namespace armnn::profiling;
 using PacketType = MockProfilingConnection::PacketType;
 
-BOOST_AUTO_TEST_SUITE(ExternalProfiling)
-
-BOOST_AUTO_TEST_CASE(CheckCommandHandlerKeyComparisons)
+TEST_SUITE("ExternalProfiling")
+{
+TEST_CASE("CheckCommandHandlerKeyComparisons")
 {
     arm::pipe::CommandHandlerKey testKey1_0(1, 1, 1);
     arm::pipe::CommandHandlerKey testKey1_1(1, 1, 1);
@@ -66,28 +68,28 @@
     arm::pipe::CommandHandlerKey testKey4(0, 2, 2);
     arm::pipe::CommandHandlerKey testKey5(0, 0, 2);
 
-    BOOST_CHECK(testKey1_0 > testKey0);
-    BOOST_CHECK(testKey1_0 == testKey1_1);
-    BOOST_CHECK(testKey1_0 < testKey1_2);
+    CHECK(testKey1_0 > testKey0);
+    CHECK(testKey1_0 == testKey1_1);
+    CHECK(testKey1_0 < testKey1_2);
 
-    BOOST_CHECK(testKey1 < testKey4);
-    BOOST_CHECK(testKey1 > testKey3);
-    BOOST_CHECK(testKey1 <= testKey4);
-    BOOST_CHECK(testKey1 >= testKey3);
-    BOOST_CHECK(testKey1 <= testKey2);
-    BOOST_CHECK(testKey1 >= testKey2);
-    BOOST_CHECK(testKey1 == testKey2);
-    BOOST_CHECK(testKey1 == testKey1);
+    CHECK(testKey1 < testKey4);
+    CHECK(testKey1 > testKey3);
+    CHECK(testKey1 <= testKey4);
+    CHECK(testKey1 >= testKey3);
+    CHECK(testKey1 <= testKey2);
+    CHECK(testKey1 >= testKey2);
+    CHECK(testKey1 == testKey2);
+    CHECK(testKey1 == testKey1);
 
-    BOOST_CHECK(!(testKey1 == testKey5));
-    BOOST_CHECK(!(testKey1 != testKey1));
-    BOOST_CHECK(testKey1 != testKey5);
+    CHECK(!(testKey1 == testKey5));
+    CHECK(!(testKey1 != testKey1));
+    CHECK(testKey1 != testKey5);
 
-    BOOST_CHECK(testKey1 == testKey2 && testKey2 == testKey1);
-    BOOST_CHECK(testKey0 == testKey1 && testKey1 == testKey2 && testKey0 == testKey2);
+    CHECK((testKey1 == testKey2 && testKey2 == testKey1));
+    CHECK((testKey0 == testKey1 && testKey1 == testKey2 && testKey0 == testKey2));
 
-    BOOST_CHECK(testKey1.GetPacketId() == 1);
-    BOOST_CHECK(testKey1.GetVersion() == 1);
+    CHECK(testKey1.GetPacketId() == 1);
+    CHECK(testKey1.GetVersion() == 1);
 
     std::vector<arm::pipe::CommandHandlerKey> vect = {
         arm::pipe::CommandHandlerKey(0, 0, 1), arm::pipe::CommandHandlerKey(0, 2, 0),
@@ -103,10 +105,10 @@
         arm::pipe::CommandHandlerKey(0, 1, 1), arm::pipe::CommandHandlerKey(0, 2, 0),
         arm::pipe::CommandHandlerKey(0, 2, 0), arm::pipe::CommandHandlerKey(0, 2, 1) };
 
-    BOOST_CHECK(vect == expectedVect);
+    CHECK(vect == expectedVect);
 }
 
-BOOST_AUTO_TEST_CASE(CheckPacketKeyComparisons)
+TEST_CASE("CheckPacketKeyComparisons")
 {
     arm::pipe::PacketKey key0(0, 0);
     arm::pipe::PacketKey key1(0, 0);
@@ -116,22 +118,22 @@
     arm::pipe::PacketKey key5(1, 0);
     arm::pipe::PacketKey key6(1, 1);
 
-    BOOST_CHECK(!(key0 < key1));
-    BOOST_CHECK(!(key0 > key1));
-    BOOST_CHECK(key0 <= key1);
-    BOOST_CHECK(key0 >= key1);
-    BOOST_CHECK(key0 == key1);
-    BOOST_CHECK(key0 < key2);
-    BOOST_CHECK(key2 < key3);
-    BOOST_CHECK(key3 > key0);
-    BOOST_CHECK(key4 == key5);
-    BOOST_CHECK(key4 > key0);
-    BOOST_CHECK(key5 < key6);
-    BOOST_CHECK(key5 <= key6);
-    BOOST_CHECK(key5 != key6);
+    CHECK(!(key0 < key1));
+    CHECK(!(key0 > key1));
+    CHECK(key0 <= key1);
+    CHECK(key0 >= key1);
+    CHECK(key0 == key1);
+    CHECK(key0 < key2);
+    CHECK(key2 < key3);
+    CHECK(key3 > key0);
+    CHECK(key4 == key5);
+    CHECK(key4 > key0);
+    CHECK(key5 < key6);
+    CHECK(key5 <= key6);
+    CHECK(key5 != key6);
 }
 
-BOOST_AUTO_TEST_CASE(CheckCommandHandler)
+TEST_CASE("CheckCommandHandler")
 {
     arm::pipe::PacketVersionResolver packetVersionResolver;
     ProfilingStateMachine profilingStateMachine;
@@ -175,7 +177,7 @@
         std::this_thread::sleep_for(std::chrono::milliseconds(2));
     }
 
-    BOOST_CHECK(profilingStateMachine.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingStateMachine.GetCurrentState() == ProfilingState::Active);
 
     // Close the thread again.
     commandHandler0.Stop();
@@ -206,7 +208,7 @@
     {
         if (timeSlept >= timeout)
         {
-            BOOST_FAIL("Timeout: The command handler loop did not stop after the timeout");
+            FAIL("Timeout: The command handler loop did not stop after the timeout");
         }
         std::this_thread::sleep_for(std::chrono::milliseconds(1));
         timeSlept ++;
@@ -214,14 +216,14 @@
 
     commandHandler1.Stop();
     // The state machine should never have received the ack so will still be in WaitingForAck.
-    BOOST_CHECK(profilingStateMachine.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingStateMachine.GetCurrentState() == ProfilingState::WaitingForAck);
 
     // Now try sending a bad connection acknowledged packet
     TestProfilingConnectionBadAckPacket testProfilingConnectionBadAckPacket;
     commandHandler1.Start(testProfilingConnectionBadAckPacket);
     commandHandler1.Stop();
     // This should also not change the state machine
-    BOOST_CHECK(profilingStateMachine.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingStateMachine.GetCurrentState() == ProfilingState::WaitingForAck);
 
     // Disable stop after timeout and now commandHandler1 should persist after a timeout
     commandHandler1.SetStopAfterTimeout(false);
@@ -240,7 +242,7 @@
     commandHandler1.Stop();
 
     // Even after the 3 exceptions the ack packet should have transitioned the command handler to active.
-    BOOST_CHECK(profilingStateMachine.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingStateMachine.GetCurrentState() == ProfilingState::Active);
 
     // A command handler that gets exceptions other than timeouts should keep going.
     CommandHandler commandHandler2(1, false, commandHandlerRegistry, packetVersionResolver);
@@ -257,41 +259,41 @@
         std::this_thread::sleep_for(std::chrono::milliseconds(2));
     }
 
-    BOOST_CHECK(commandHandler2.IsRunning());
+    CHECK(commandHandler2.IsRunning());
     commandHandler2.Stop();
 }
 
-BOOST_AUTO_TEST_CASE(CheckEncodeVersion)
+TEST_CASE("CheckEncodeVersion")
 {
     arm::pipe::Version version1(12);
 
-    BOOST_CHECK(version1.GetMajor() == 0);
-    BOOST_CHECK(version1.GetMinor() == 0);
-    BOOST_CHECK(version1.GetPatch() == 12);
+    CHECK(version1.GetMajor() == 0);
+    CHECK(version1.GetMinor() == 0);
+    CHECK(version1.GetPatch() == 12);
 
     arm::pipe::Version version2(4108);
 
-    BOOST_CHECK(version2.GetMajor() == 0);
-    BOOST_CHECK(version2.GetMinor() == 1);
-    BOOST_CHECK(version2.GetPatch() == 12);
+    CHECK(version2.GetMajor() == 0);
+    CHECK(version2.GetMinor() == 1);
+    CHECK(version2.GetPatch() == 12);
 
     arm::pipe::Version version3(4198412);
 
-    BOOST_CHECK(version3.GetMajor() == 1);
-    BOOST_CHECK(version3.GetMinor() == 1);
-    BOOST_CHECK(version3.GetPatch() == 12);
+    CHECK(version3.GetMajor() == 1);
+    CHECK(version3.GetMinor() == 1);
+    CHECK(version3.GetPatch() == 12);
 
     arm::pipe::Version version4(0);
 
-    BOOST_CHECK(version4.GetMajor() == 0);
-    BOOST_CHECK(version4.GetMinor() == 0);
-    BOOST_CHECK(version4.GetPatch() == 0);
+    CHECK(version4.GetMajor() == 0);
+    CHECK(version4.GetMinor() == 0);
+    CHECK(version4.GetPatch() == 0);
 
     arm::pipe::Version version5(1, 0, 0);
-    BOOST_CHECK(version5.GetEncodedValue() == 4194304);
+    CHECK(version5.GetEncodedValue() == 4194304);
 }
 
-BOOST_AUTO_TEST_CASE(CheckPacketClass)
+TEST_CASE("CheckPacketClass")
 {
     uint32_t length                              = 4;
     std::unique_ptr<unsigned char[]> packetData0 = std::make_unique<unsigned char[]>(length);
@@ -300,35 +302,35 @@
 
     arm::pipe::Packet packetTest0(472580096, length, packetData0);
 
-    BOOST_CHECK(packetTest0.GetHeader() == 472580096);
-    BOOST_CHECK(packetTest0.GetPacketFamily() == 7);
-    BOOST_CHECK(packetTest0.GetPacketId() == 43);
-    BOOST_CHECK(packetTest0.GetLength() == length);
-    BOOST_CHECK(packetTest0.GetPacketType() == 3);
-    BOOST_CHECK(packetTest0.GetPacketClass() == 5);
+    CHECK(packetTest0.GetHeader() == 472580096);
+    CHECK(packetTest0.GetPacketFamily() == 7);
+    CHECK(packetTest0.GetPacketId() == 43);
+    CHECK(packetTest0.GetLength() == length);
+    CHECK(packetTest0.GetPacketType() == 3);
+    CHECK(packetTest0.GetPacketClass() == 5);
 
-    BOOST_CHECK_THROW(arm::pipe::Packet packetTest1(472580096, 0, packetData1), arm::pipe::InvalidArgumentException);
-    BOOST_CHECK_NO_THROW(arm::pipe::Packet packetTest2(472580096, 0, nullPacketData));
+    CHECK_THROWS_AS(arm::pipe::Packet packetTest1(472580096, 0, packetData1), arm::pipe::InvalidArgumentException);
+    CHECK_NOTHROW(arm::pipe::Packet packetTest2(472580096, 0, nullPacketData));
 
     arm::pipe::Packet packetTest3(472580096, 0, nullPacketData);
-    BOOST_CHECK(packetTest3.GetLength() == 0);
-    BOOST_CHECK(packetTest3.GetData() == nullptr);
+    CHECK(packetTest3.GetLength() == 0);
+    CHECK(packetTest3.GetData() == nullptr);
 
     const unsigned char* packetTest0Data = packetTest0.GetData();
     arm::pipe::Packet packetTest4(std::move(packetTest0));
 
-    BOOST_CHECK(packetTest0.GetData() == nullptr);
-    BOOST_CHECK(packetTest4.GetData() == packetTest0Data);
+    CHECK(packetTest0.GetData() == nullptr);
+    CHECK(packetTest4.GetData() == packetTest0Data);
 
-    BOOST_CHECK(packetTest4.GetHeader() == 472580096);
-    BOOST_CHECK(packetTest4.GetPacketFamily() == 7);
-    BOOST_CHECK(packetTest4.GetPacketId() == 43);
-    BOOST_CHECK(packetTest4.GetLength() == length);
-    BOOST_CHECK(packetTest4.GetPacketType() == 3);
-    BOOST_CHECK(packetTest4.GetPacketClass() == 5);
+    CHECK(packetTest4.GetHeader() == 472580096);
+    CHECK(packetTest4.GetPacketFamily() == 7);
+    CHECK(packetTest4.GetPacketId() == 43);
+    CHECK(packetTest4.GetLength() == length);
+    CHECK(packetTest4.GetPacketType() == 3);
+    CHECK(packetTest4.GetPacketClass() == 5);
 }
 
-BOOST_AUTO_TEST_CASE(CheckCommandHandlerFunctor)
+TEST_CASE("CheckCommandHandlerFunctor")
 {
     // Hard code the version as it will be the same during a single profiling session
     uint32_t version = 1;
@@ -353,11 +355,11 @@
 
     // Check the order of the map is correct
     auto it = registry.begin();
-    BOOST_CHECK(it->first == keyC);    // familyId == 5
+    CHECK(it->first == keyC);    // familyId == 5
     it++;
-    BOOST_CHECK(it->first == keyA);    // familyId == 7
+    CHECK(it->first == keyA);    // familyId == 7
     it++;
-    BOOST_CHECK(it->first == keyB);    // familyId == 8
+    CHECK(it->first == keyB);    // familyId == 8
 
     std::unique_ptr<unsigned char[]> packetDataA;
     std::unique_ptr<unsigned char[]> packetDataB;
@@ -370,24 +372,24 @@
     // Check the correct operator of derived class is called
     registry.at(arm::pipe::CommandHandlerKey(
         packetA.GetPacketFamily(), packetA.GetPacketId(), version))->operator()(packetA);
-    BOOST_CHECK(testFunctorA.GetCount() == 1);
-    BOOST_CHECK(testFunctorB.GetCount() == 0);
-    BOOST_CHECK(testFunctorC.GetCount() == 0);
+    CHECK(testFunctorA.GetCount() == 1);
+    CHECK(testFunctorB.GetCount() == 0);
+    CHECK(testFunctorC.GetCount() == 0);
 
     registry.at(arm::pipe::CommandHandlerKey(
         packetB.GetPacketFamily(), packetB.GetPacketId(), version))->operator()(packetB);
-    BOOST_CHECK(testFunctorA.GetCount() == 1);
-    BOOST_CHECK(testFunctorB.GetCount() == 1);
-    BOOST_CHECK(testFunctorC.GetCount() == 0);
+    CHECK(testFunctorA.GetCount() == 1);
+    CHECK(testFunctorB.GetCount() == 1);
+    CHECK(testFunctorC.GetCount() == 0);
 
     registry.at(arm::pipe::CommandHandlerKey(
         packetC.GetPacketFamily(), packetC.GetPacketId(), version))->operator()(packetC);
-    BOOST_CHECK(testFunctorA.GetCount() == 1);
-    BOOST_CHECK(testFunctorB.GetCount() == 1);
-    BOOST_CHECK(testFunctorC.GetCount() == 1);
+    CHECK(testFunctorA.GetCount() == 1);
+    CHECK(testFunctorB.GetCount() == 1);
+    CHECK(testFunctorC.GetCount() == 1);
 }
 
-BOOST_AUTO_TEST_CASE(CheckCommandHandlerRegistry)
+TEST_CASE("CheckCommandHandlerRegistry")
 {
     // Hard code the version as it will be the same during a single profiling session
     uint32_t version = 1;
@@ -414,32 +416,32 @@
 
     // Check the correct operator of derived class is called
     registry.GetFunctor(packetA.GetPacketFamily(), packetA.GetPacketId(), version)->operator()(packetA);
-    BOOST_CHECK(testFunctorA.GetCount() == 1);
-    BOOST_CHECK(testFunctorB.GetCount() == 0);
-    BOOST_CHECK(testFunctorC.GetCount() == 0);
+    CHECK(testFunctorA.GetCount() == 1);
+    CHECK(testFunctorB.GetCount() == 0);
+    CHECK(testFunctorC.GetCount() == 0);
 
     registry.GetFunctor(packetB.GetPacketFamily(), packetB.GetPacketId(), version)->operator()(packetB);
-    BOOST_CHECK(testFunctorA.GetCount() == 1);
-    BOOST_CHECK(testFunctorB.GetCount() == 1);
-    BOOST_CHECK(testFunctorC.GetCount() == 0);
+    CHECK(testFunctorA.GetCount() == 1);
+    CHECK(testFunctorB.GetCount() == 1);
+    CHECK(testFunctorC.GetCount() == 0);
 
     registry.GetFunctor(packetC.GetPacketFamily(), packetC.GetPacketId(), version)->operator()(packetC);
-    BOOST_CHECK(testFunctorA.GetCount() == 1);
-    BOOST_CHECK(testFunctorB.GetCount() == 1);
-    BOOST_CHECK(testFunctorC.GetCount() == 1);
+    CHECK(testFunctorA.GetCount() == 1);
+    CHECK(testFunctorB.GetCount() == 1);
+    CHECK(testFunctorC.GetCount() == 1);
 
     // Re-register an existing key with a new function
     registry.RegisterFunctor(&testFunctorC, testFunctorA.GetFamilyId(), testFunctorA.GetPacketId(), version);
     registry.GetFunctor(packetA.GetPacketFamily(), packetA.GetPacketId(), version)->operator()(packetC);
-    BOOST_CHECK(testFunctorA.GetCount() == 1);
-    BOOST_CHECK(testFunctorB.GetCount() == 1);
-    BOOST_CHECK(testFunctorC.GetCount() == 2);
+    CHECK(testFunctorA.GetCount() == 1);
+    CHECK(testFunctorB.GetCount() == 1);
+    CHECK(testFunctorC.GetCount() == 2);
 
     // Check that non-existent key returns nullptr for its functor
-    BOOST_CHECK_THROW(registry.GetFunctor(0, 0, 0), arm::pipe::ProfilingException);
+    CHECK_THROWS_AS(registry.GetFunctor(0, 0, 0), arm::pipe::ProfilingException);
 }
 
-BOOST_AUTO_TEST_CASE(CheckPacketVersionResolver)
+TEST_CASE("CheckPacketVersionResolver")
 {
     // Set up random number generator for generating packetId values
     std::random_device device;
@@ -460,7 +462,7 @@
         const uint32_t packetId = distribution(generator);
         arm::pipe::Version resolvedVersion = packetVersionResolver.ResolvePacketVersion(familyId, packetId);
 
-        BOOST_TEST(resolvedVersion == expectedVersion);
+        CHECK(resolvedVersion == expectedVersion);
     }
 }
 
@@ -471,64 +473,64 @@
     states.TransitionToState(newState);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingStateMachine)
+TEST_CASE("CheckProfilingStateMachine")
 {
     ProfilingStateMachine profilingState1(ProfilingState::Uninitialised);
     profilingState1.TransitionToState(ProfilingState::Uninitialised);
-    BOOST_CHECK(profilingState1.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingState1.GetCurrentState() == ProfilingState::Uninitialised);
 
     ProfilingStateMachine profilingState2(ProfilingState::Uninitialised);
     profilingState2.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK(profilingState2.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingState2.GetCurrentState() == ProfilingState::NotConnected);
 
     ProfilingStateMachine profilingState3(ProfilingState::NotConnected);
     profilingState3.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK(profilingState3.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingState3.GetCurrentState() == ProfilingState::NotConnected);
 
     ProfilingStateMachine profilingState4(ProfilingState::NotConnected);
     profilingState4.TransitionToState(ProfilingState::WaitingForAck);
-    BOOST_CHECK(profilingState4.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingState4.GetCurrentState() == ProfilingState::WaitingForAck);
 
     ProfilingStateMachine profilingState5(ProfilingState::WaitingForAck);
     profilingState5.TransitionToState(ProfilingState::WaitingForAck);
-    BOOST_CHECK(profilingState5.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingState5.GetCurrentState() == ProfilingState::WaitingForAck);
 
     ProfilingStateMachine profilingState6(ProfilingState::WaitingForAck);
     profilingState6.TransitionToState(ProfilingState::Active);
-    BOOST_CHECK(profilingState6.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingState6.GetCurrentState() == ProfilingState::Active);
 
     ProfilingStateMachine profilingState7(ProfilingState::Active);
     profilingState7.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK(profilingState7.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingState7.GetCurrentState() == ProfilingState::NotConnected);
 
     ProfilingStateMachine profilingState8(ProfilingState::Active);
     profilingState8.TransitionToState(ProfilingState::Active);
-    BOOST_CHECK(profilingState8.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingState8.GetCurrentState() == ProfilingState::Active);
 
     ProfilingStateMachine profilingState9(ProfilingState::Uninitialised);
-    BOOST_CHECK_THROW(profilingState9.TransitionToState(ProfilingState::WaitingForAck), armnn::Exception);
+    CHECK_THROWS_AS(profilingState9.TransitionToState(ProfilingState::WaitingForAck), armnn::Exception);
 
     ProfilingStateMachine profilingState10(ProfilingState::Uninitialised);
-    BOOST_CHECK_THROW(profilingState10.TransitionToState(ProfilingState::Active), armnn::Exception);
+    CHECK_THROWS_AS(profilingState10.TransitionToState(ProfilingState::Active), armnn::Exception);
 
     ProfilingStateMachine profilingState11(ProfilingState::NotConnected);
-    BOOST_CHECK_THROW(profilingState11.TransitionToState(ProfilingState::Uninitialised), armnn::Exception);
+    CHECK_THROWS_AS(profilingState11.TransitionToState(ProfilingState::Uninitialised), armnn::Exception);
 
     ProfilingStateMachine profilingState12(ProfilingState::NotConnected);
-    BOOST_CHECK_THROW(profilingState12.TransitionToState(ProfilingState::Active), armnn::Exception);
+    CHECK_THROWS_AS(profilingState12.TransitionToState(ProfilingState::Active), armnn::Exception);
 
     ProfilingStateMachine profilingState13(ProfilingState::WaitingForAck);
-    BOOST_CHECK_THROW(profilingState13.TransitionToState(ProfilingState::Uninitialised), armnn::Exception);
+    CHECK_THROWS_AS(profilingState13.TransitionToState(ProfilingState::Uninitialised), armnn::Exception);
 
     ProfilingStateMachine profilingState14(ProfilingState::WaitingForAck);
     profilingState14.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK(profilingState14.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingState14.GetCurrentState() == ProfilingState::NotConnected);
 
     ProfilingStateMachine profilingState15(ProfilingState::Active);
-    BOOST_CHECK_THROW(profilingState15.TransitionToState(ProfilingState::Uninitialised), armnn::Exception);
+    CHECK_THROWS_AS(profilingState15.TransitionToState(ProfilingState::Uninitialised), armnn::Exception);
 
     ProfilingStateMachine profilingState16(armnn::profiling::ProfilingState::Active);
-    BOOST_CHECK_THROW(profilingState16.TransitionToState(ProfilingState::WaitingForAck), armnn::Exception);
+    CHECK_THROWS_AS(profilingState16.TransitionToState(ProfilingState::WaitingForAck), armnn::Exception);
 
     ProfilingStateMachine profilingState17(ProfilingState::Uninitialised);
 
@@ -544,7 +546,7 @@
     thread4.join();
     thread5.join();
 
-    BOOST_TEST((profilingState17.GetCurrentState() == ProfilingState::NotConnected));
+    CHECK((profilingState17.GetCurrentState() == ProfilingState::NotConnected));
 }
 
 void CaptureDataWriteThreadImpl(Holder& holder, uint32_t capturePeriod, const std::vector<uint16_t>& counterIds)
@@ -557,7 +559,7 @@
     captureData = holder.GetCaptureData();
 }
 
-BOOST_AUTO_TEST_CASE(CheckCaptureDataHolder)
+TEST_CASE("CheckCaptureDataHolder")
 {
     std::map<uint32_t, std::vector<uint16_t>> periodIdMap;
     std::vector<uint16_t> counterIds;
@@ -571,14 +573,14 @@
     // Verify the read and write threads set the holder correctly
     // and retrieve the expected values
     Holder holder;
-    BOOST_CHECK((holder.GetCaptureData()).GetCapturePeriod() == 0);
-    BOOST_CHECK(((holder.GetCaptureData()).GetCounterIds()).empty());
+    CHECK((holder.GetCaptureData()).GetCapturePeriod() == 0);
+    CHECK(((holder.GetCaptureData()).GetCounterIds()).empty());
 
     // Check Holder functions
     std::thread thread1(CaptureDataWriteThreadImpl, std::ref(holder), 2, std::ref(periodIdMap[2]));
     thread1.join();
-    BOOST_CHECK((holder.GetCaptureData()).GetCapturePeriod() == 2);
-    BOOST_CHECK((holder.GetCaptureData()).GetCounterIds() == periodIdMap[2]);
+    CHECK((holder.GetCaptureData()).GetCapturePeriod() == 2);
+    CHECK((holder.GetCaptureData()).GetCounterIds() == periodIdMap[2]);
     // NOTE: now that we have some initial values in the holder we don't have to worry
     //       in the multi-threaded section below about a read thread accessing the holder
     //       before any write thread has gotten to it so we read period = 0, counterIds empty
@@ -588,8 +590,8 @@
     CaptureData captureData;
     std::thread thread2(CaptureDataReadThreadImpl, std::ref(holder), std::ref(captureData));
     thread2.join();
-    BOOST_CHECK(captureData.GetCapturePeriod() == 2);
-    BOOST_CHECK(captureData.GetCounterIds() == periodIdMap[2]);
+    CHECK(captureData.GetCapturePeriod() == 2);
+    CHECK(captureData.GetCounterIds() == periodIdMap[2]);
 
     std::map<uint32_t, CaptureData> captureDataIdMap;
     for (uint32_t i = 0; i < numThreads; ++i)
@@ -606,8 +608,8 @@
             std::thread(CaptureDataWriteThreadImpl, std::ref(holder), i, std::ref(periodIdMap[i])));
 
         // Verify that the CaptureData goes into the thread in a virgin state
-        BOOST_CHECK(captureDataIdMap.at(i).GetCapturePeriod() == 0);
-        BOOST_CHECK(captureDataIdMap.at(i).GetCounterIds().empty());
+        CHECK(captureDataIdMap.at(i).GetCapturePeriod() == 0);
+        CHECK(captureDataIdMap.at(i).GetCounterIds().empty());
         readThreadsVect.emplace_back(
             std::thread(CaptureDataReadThreadImpl, std::ref(holder), std::ref(captureDataIdMap.at(i))));
     }
@@ -623,70 +625,70 @@
     for (uint32_t i = 0; i < numThreads; ++i)
     {
         CaptureData perThreadCaptureData = captureDataIdMap.at(i);
-        BOOST_CHECK(perThreadCaptureData.GetCounterIds() == periodIdMap.at(perThreadCaptureData.GetCapturePeriod()));
+        CHECK(perThreadCaptureData.GetCounterIds() == periodIdMap.at(perThreadCaptureData.GetCapturePeriod()));
     }
 }
 
-BOOST_AUTO_TEST_CASE(CaptureDataMethods)
+TEST_CASE("CaptureDataMethods")
 {
     // Check CaptureData setter and getter functions
     std::vector<uint16_t> counterIds = { 42, 29, 13 };
     CaptureData captureData;
-    BOOST_CHECK(captureData.GetCapturePeriod() == 0);
-    BOOST_CHECK((captureData.GetCounterIds()).empty());
+    CHECK(captureData.GetCapturePeriod() == 0);
+    CHECK((captureData.GetCounterIds()).empty());
     captureData.SetCapturePeriod(150);
     captureData.SetCounterIds(counterIds);
-    BOOST_CHECK(captureData.GetCapturePeriod() == 150);
-    BOOST_CHECK(captureData.GetCounterIds() == counterIds);
+    CHECK(captureData.GetCapturePeriod() == 150);
+    CHECK(captureData.GetCounterIds() == counterIds);
 
     // Check assignment operator
     CaptureData secondCaptureData;
 
     secondCaptureData = captureData;
-    BOOST_CHECK(secondCaptureData.GetCapturePeriod() == 150);
-    BOOST_CHECK(secondCaptureData.GetCounterIds() == counterIds);
+    CHECK(secondCaptureData.GetCapturePeriod() == 150);
+    CHECK(secondCaptureData.GetCounterIds() == counterIds);
 
     // Check copy constructor
     CaptureData copyConstructedCaptureData(captureData);
 
-    BOOST_CHECK(copyConstructedCaptureData.GetCapturePeriod() == 150);
-    BOOST_CHECK(copyConstructedCaptureData.GetCounterIds() == counterIds);
+    CHECK(copyConstructedCaptureData.GetCapturePeriod() == 150);
+    CHECK(copyConstructedCaptureData.GetCounterIds() == counterIds);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceDisabled)
+TEST_CASE("CheckProfilingServiceDisabled")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     armnn::profiling::ProfilingService profilingService;
     profilingService.ResetExternalProfilingOptions(options, true);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceCounterDirectory)
+TEST_CASE("CheckProfilingServiceCounterDirectory")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     armnn::profiling::ProfilingService profilingService;
     profilingService.ResetExternalProfilingOptions(options, true);
 
     const ICounterDirectory& counterDirectory0 = profilingService.GetCounterDirectory();
-    BOOST_CHECK(counterDirectory0.GetCounterCount() == 0);
+    CHECK(counterDirectory0.GetCounterCount() == 0);
     profilingService.Update();
-    BOOST_CHECK(counterDirectory0.GetCounterCount() == 0);
+    CHECK(counterDirectory0.GetCounterCount() == 0);
 
     options.m_EnableProfiling = true;
     profilingService.ResetExternalProfilingOptions(options);
 
     const ICounterDirectory& counterDirectory1 = profilingService.GetCounterDirectory();
-    BOOST_CHECK(counterDirectory1.GetCounterCount() == 0);
+    CHECK(counterDirectory1.GetCounterCount() == 0);
     profilingService.Update();
-    BOOST_CHECK(counterDirectory1.GetCounterCount() != 0);
+    CHECK(counterDirectory1.GetCounterCount() != 0);
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceCounterValues)
+TEST_CASE("CheckProfilingServiceCounterValues")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     options.m_EnableProfiling          = true;
@@ -696,11 +698,11 @@
     profilingService.Update();
     const ICounterDirectory& counterDirectory = profilingService.GetCounterDirectory();
     const Counters& counters                  = counterDirectory.GetCounters();
-    BOOST_CHECK(!counters.empty());
+    CHECK(!counters.empty());
 
     std::vector<std::thread> writers;
 
-    BOOST_CHECK(!counters.empty());
+    CHECK(!counters.empty());
 
     // Test GetAbsoluteCounterValue
     for (int i = 0; i < 4; ++i)
@@ -735,13 +737,13 @@
 
     uint32_t absoluteCounterValue = 0;
 
-    BOOST_CHECK_NO_THROW(absoluteCounterValue = profilingService.GetAbsoluteCounterValue(INFERENCES_RUN));
-    BOOST_CHECK(absoluteCounterValue = 5000);
+    CHECK_NOTHROW(absoluteCounterValue = profilingService.GetAbsoluteCounterValue(INFERENCES_RUN));
+    CHECK(absoluteCounterValue == 5000);
 
     // Test SetCounterValue
-    BOOST_CHECK_NO_THROW(profilingService.SetCounterValue(INFERENCES_RUN, 0));
-    BOOST_CHECK_NO_THROW(absoluteCounterValue = profilingService.GetAbsoluteCounterValue(INFERENCES_RUN));
-    BOOST_CHECK(absoluteCounterValue == 0);
+    CHECK_NOTHROW(profilingService.SetCounterValue(INFERENCES_RUN, 0));
+    CHECK_NOTHROW(absoluteCounterValue = profilingService.GetAbsoluteCounterValue(INFERENCES_RUN));
+    CHECK(absoluteCounterValue == 0);
 
     // Test GetDeltaCounterValue
     writers.clear();
@@ -788,402 +790,402 @@
 
     // Do one last read in case the reader stopped early
     deltaCounterValue += profilingService.GetDeltaCounterValue(INFERENCES_RUN);
-    BOOST_CHECK(deltaCounterValue == 5000);
+    CHECK(deltaCounterValue == 5000);
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingObjectUids)
+TEST_CASE("CheckProfilingObjectUids")
 {
     uint16_t uid = 0;
-    BOOST_CHECK_NO_THROW(uid = GetNextUid());
-    BOOST_CHECK(uid >= 1);
+    CHECK_NOTHROW(uid = GetNextUid());
+    CHECK(uid >= 1);
 
     uint16_t nextUid = 0;
-    BOOST_CHECK_NO_THROW(nextUid = GetNextUid());
-    BOOST_CHECK(nextUid > uid);
+    CHECK_NOTHROW(nextUid = GetNextUid());
+    CHECK(nextUid > uid);
 
     std::vector<uint16_t> counterUids;
-    BOOST_CHECK_NO_THROW(counterUids = GetNextCounterUids(uid,0));
-    BOOST_CHECK(counterUids.size() == 1);
+    CHECK_NOTHROW(counterUids = GetNextCounterUids(uid,0));
+    CHECK(counterUids.size() == 1);
 
     std::vector<uint16_t> nextCounterUids;
-    BOOST_CHECK_NO_THROW(nextCounterUids = GetNextCounterUids(nextUid, 2));
-    BOOST_CHECK(nextCounterUids.size() == 2);
-    BOOST_CHECK(nextCounterUids[0] > counterUids[0]);
+    CHECK_NOTHROW(nextCounterUids = GetNextCounterUids(nextUid, 2));
+    CHECK(nextCounterUids.size() == 2);
+    CHECK(nextCounterUids[0] > counterUids[0]);
 
     std::vector<uint16_t> counterUidsMultiCore;
     uint16_t thirdUid = nextCounterUids[0];
     uint16_t numberOfCores = 13;
-    BOOST_CHECK_NO_THROW(counterUidsMultiCore = GetNextCounterUids(thirdUid, numberOfCores));
-    BOOST_CHECK(counterUidsMultiCore.size() == numberOfCores);
-    BOOST_CHECK(counterUidsMultiCore.front() >= nextCounterUids[0]);
+    CHECK_NOTHROW(counterUidsMultiCore = GetNextCounterUids(thirdUid, numberOfCores));
+    CHECK(counterUidsMultiCore.size() == numberOfCores);
+    CHECK(counterUidsMultiCore.front() >= nextCounterUids[0]);
     for (size_t i = 1; i < numberOfCores; i++)
     {
-        BOOST_CHECK(counterUidsMultiCore[i] == counterUidsMultiCore[i - 1] + 1);
+        CHECK(counterUidsMultiCore[i] == counterUidsMultiCore[i - 1] + 1);
     }
-    BOOST_CHECK(counterUidsMultiCore.back() == counterUidsMultiCore.front() + numberOfCores - 1);
+    CHECK(counterUidsMultiCore.back() == counterUidsMultiCore.front() + numberOfCores - 1);
 }
 
-BOOST_AUTO_TEST_CASE(CheckCounterDirectoryRegisterCategory)
+TEST_CASE("CheckCounterDirectoryRegisterCategory")
 {
     CounterDirectory counterDirectory;
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 0);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 0);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 0);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(counterDirectory.GetCategoryCount() == 0);
+    CHECK(counterDirectory.GetDeviceCount() == 0);
+    CHECK(counterDirectory.GetCounterSetCount() == 0);
+    CHECK(counterDirectory.GetCounterCount() == 0);
 
     // Register a category with an invalid name
     const Category* noCategory = nullptr;
-    BOOST_CHECK_THROW(noCategory = counterDirectory.RegisterCategory(""), armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 0);
-    BOOST_CHECK(!noCategory);
+    CHECK_THROWS_AS(noCategory = counterDirectory.RegisterCategory(""), armnn::InvalidArgumentException);
+    CHECK(counterDirectory.GetCategoryCount() == 0);
+    CHECK(!noCategory);
 
     // Register a category with an invalid name
-    BOOST_CHECK_THROW(noCategory = counterDirectory.RegisterCategory("invalid category"),
+    CHECK_THROWS_AS(noCategory = counterDirectory.RegisterCategory("invalid category"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 0);
-    BOOST_CHECK(!noCategory);
+    CHECK(counterDirectory.GetCategoryCount() == 0);
+    CHECK(!noCategory);
 
     // Register a new category
     const std::string categoryName = "some_category";
     const Category* category       = nullptr;
-    BOOST_CHECK_NO_THROW(category = counterDirectory.RegisterCategory(categoryName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(category);
-    BOOST_CHECK(category->m_Name == categoryName);
-    BOOST_CHECK(category->m_Counters.empty());
+    CHECK_NOTHROW(category = counterDirectory.RegisterCategory(categoryName));
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(category);
+    CHECK(category->m_Name == categoryName);
+    CHECK(category->m_Counters.empty());
 
     // Get the registered category
     const Category* registeredCategory = counterDirectory.GetCategory(categoryName);
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(registeredCategory);
-    BOOST_CHECK(registeredCategory == category);
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(registeredCategory);
+    CHECK(registeredCategory == category);
 
     // Try to get a category not registered
     const Category* notRegisteredCategory = counterDirectory.GetCategory("not_registered_category");
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(!notRegisteredCategory);
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(!notRegisteredCategory);
 
     // Register a category already registered
     const Category* anotherCategory = nullptr;
-    BOOST_CHECK_THROW(anotherCategory = counterDirectory.RegisterCategory(categoryName),
+    CHECK_THROWS_AS(anotherCategory = counterDirectory.RegisterCategory(categoryName),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(!anotherCategory);
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(!anotherCategory);
 
     // Register a device for testing
     const std::string deviceName = "some_device";
     const Device* device         = nullptr;
-    BOOST_CHECK_NO_THROW(device = counterDirectory.RegisterDevice(deviceName));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(device);
-    BOOST_CHECK(device->m_Uid >= 1);
-    BOOST_CHECK(device->m_Name == deviceName);
-    BOOST_CHECK(device->m_Cores == 0);
+    CHECK_NOTHROW(device = counterDirectory.RegisterDevice(deviceName));
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(device);
+    CHECK(device->m_Uid >= 1);
+    CHECK(device->m_Name == deviceName);
+    CHECK(device->m_Cores == 0);
 
     // Register a new category not associated to any device
     const std::string categoryWoDeviceName = "some_category_without_device";
     const Category* categoryWoDevice       = nullptr;
-    BOOST_CHECK_NO_THROW(categoryWoDevice = counterDirectory.RegisterCategory(categoryWoDeviceName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 2);
-    BOOST_CHECK(categoryWoDevice);
-    BOOST_CHECK(categoryWoDevice->m_Name == categoryWoDeviceName);
-    BOOST_CHECK(categoryWoDevice->m_Counters.empty());
+    CHECK_NOTHROW(categoryWoDevice = counterDirectory.RegisterCategory(categoryWoDeviceName));
+    CHECK(counterDirectory.GetCategoryCount() == 2);
+    CHECK(categoryWoDevice);
+    CHECK(categoryWoDevice->m_Name == categoryWoDeviceName);
+    CHECK(categoryWoDevice->m_Counters.empty());
 
     // Register a new category associated to an invalid device name (already exist)
     const Category* categoryInvalidDeviceName = nullptr;
-    BOOST_CHECK_THROW(categoryInvalidDeviceName =
+    CHECK_THROWS_AS(categoryInvalidDeviceName =
                           counterDirectory.RegisterCategory(categoryWoDeviceName),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 2);
-    BOOST_CHECK(!categoryInvalidDeviceName);
+    CHECK(counterDirectory.GetCategoryCount() == 2);
+    CHECK(!categoryInvalidDeviceName);
 
     // Register a new category associated to a valid device
     const std::string categoryWValidDeviceName = "some_category_with_valid_device";
     const Category* categoryWValidDevice       = nullptr;
-    BOOST_CHECK_NO_THROW(categoryWValidDevice =
+    CHECK_NOTHROW(categoryWValidDevice =
                              counterDirectory.RegisterCategory(categoryWValidDeviceName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 3);
-    BOOST_CHECK(categoryWValidDevice);
-    BOOST_CHECK(categoryWValidDevice != category);
-    BOOST_CHECK(categoryWValidDevice->m_Name == categoryWValidDeviceName);
+    CHECK(counterDirectory.GetCategoryCount() == 3);
+    CHECK(categoryWValidDevice);
+    CHECK(categoryWValidDevice != category);
+    CHECK(categoryWValidDevice->m_Name == categoryWValidDeviceName);
 
     // Register a counter set for testing
     const std::string counterSetName = "some_counter_set";
     const CounterSet* counterSet     = nullptr;
-    BOOST_CHECK_NO_THROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 1);
-    BOOST_CHECK(counterSet);
-    BOOST_CHECK(counterSet->m_Uid >= 1);
-    BOOST_CHECK(counterSet->m_Name == counterSetName);
-    BOOST_CHECK(counterSet->m_Count == 0);
+    CHECK_NOTHROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
+    CHECK(counterDirectory.GetCounterSetCount() == 1);
+    CHECK(counterSet);
+    CHECK(counterSet->m_Uid >= 1);
+    CHECK(counterSet->m_Name == counterSetName);
+    CHECK(counterSet->m_Count == 0);
 
     // Register a new category not associated to any counter set
     const std::string categoryWoCounterSetName = "some_category_without_counter_set";
     const Category* categoryWoCounterSet       = nullptr;
-    BOOST_CHECK_NO_THROW(categoryWoCounterSet =
+    CHECK_NOTHROW(categoryWoCounterSet =
                              counterDirectory.RegisterCategory(categoryWoCounterSetName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 4);
-    BOOST_CHECK(categoryWoCounterSet);
-    BOOST_CHECK(categoryWoCounterSet->m_Name == categoryWoCounterSetName);
+    CHECK(counterDirectory.GetCategoryCount() == 4);
+    CHECK(categoryWoCounterSet);
+    CHECK(categoryWoCounterSet->m_Name == categoryWoCounterSetName);
 
     // Register a new category associated to a valid counter set
     const std::string categoryWValidCounterSetName = "some_category_with_valid_counter_set";
     const Category* categoryWValidCounterSet       = nullptr;
-    BOOST_CHECK_NO_THROW(categoryWValidCounterSet = counterDirectory.RegisterCategory(categoryWValidCounterSetName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 5);
-    BOOST_CHECK(categoryWValidCounterSet);
-    BOOST_CHECK(categoryWValidCounterSet != category);
-    BOOST_CHECK(categoryWValidCounterSet->m_Name == categoryWValidCounterSetName);
+    CHECK_NOTHROW(categoryWValidCounterSet = counterDirectory.RegisterCategory(categoryWValidCounterSetName));
+    CHECK(counterDirectory.GetCategoryCount() == 5);
+    CHECK(categoryWValidCounterSet);
+    CHECK(categoryWValidCounterSet != category);
+    CHECK(categoryWValidCounterSet->m_Name == categoryWValidCounterSetName);
 
     // Register a new category associated to a valid device and counter set
     const std::string categoryWValidDeviceAndValidCounterSetName = "some_category_with_valid_device_and_counter_set";
     const Category* categoryWValidDeviceAndValidCounterSet       = nullptr;
-    BOOST_CHECK_NO_THROW(categoryWValidDeviceAndValidCounterSet = counterDirectory.RegisterCategory(
+    CHECK_NOTHROW(categoryWValidDeviceAndValidCounterSet = counterDirectory.RegisterCategory(
                              categoryWValidDeviceAndValidCounterSetName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 6);
-    BOOST_CHECK(categoryWValidDeviceAndValidCounterSet);
-    BOOST_CHECK(categoryWValidDeviceAndValidCounterSet != category);
-    BOOST_CHECK(categoryWValidDeviceAndValidCounterSet->m_Name == categoryWValidDeviceAndValidCounterSetName);
+    CHECK(counterDirectory.GetCategoryCount() == 6);
+    CHECK(categoryWValidDeviceAndValidCounterSet);
+    CHECK(categoryWValidDeviceAndValidCounterSet != category);
+    CHECK(categoryWValidDeviceAndValidCounterSet->m_Name == categoryWValidDeviceAndValidCounterSetName);
 }
 
-BOOST_AUTO_TEST_CASE(CheckCounterDirectoryRegisterDevice)
+TEST_CASE("CheckCounterDirectoryRegisterDevice")
 {
     CounterDirectory counterDirectory;
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 0);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 0);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 0);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(counterDirectory.GetCategoryCount() == 0);
+    CHECK(counterDirectory.GetDeviceCount() == 0);
+    CHECK(counterDirectory.GetCounterSetCount() == 0);
+    CHECK(counterDirectory.GetCounterCount() == 0);
 
     // Register a device with an invalid name
     const Device* noDevice = nullptr;
-    BOOST_CHECK_THROW(noDevice = counterDirectory.RegisterDevice(""), armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 0);
-    BOOST_CHECK(!noDevice);
+    CHECK_THROWS_AS(noDevice = counterDirectory.RegisterDevice(""), armnn::InvalidArgumentException);
+    CHECK(counterDirectory.GetDeviceCount() == 0);
+    CHECK(!noDevice);
 
     // Register a device with an invalid name
-    BOOST_CHECK_THROW(noDevice = counterDirectory.RegisterDevice("inv@lid nam€"), armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 0);
-    BOOST_CHECK(!noDevice);
+    CHECK_THROWS_AS(noDevice = counterDirectory.RegisterDevice("inv@lid nam€"), armnn::InvalidArgumentException);
+    CHECK(counterDirectory.GetDeviceCount() == 0);
+    CHECK(!noDevice);
 
     // Register a new device with no cores or parent category
     const std::string deviceName = "some_device";
     const Device* device         = nullptr;
-    BOOST_CHECK_NO_THROW(device = counterDirectory.RegisterDevice(deviceName));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(device);
-    BOOST_CHECK(device->m_Name == deviceName);
-    BOOST_CHECK(device->m_Uid >= 1);
-    BOOST_CHECK(device->m_Cores == 0);
+    CHECK_NOTHROW(device = counterDirectory.RegisterDevice(deviceName));
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(device);
+    CHECK(device->m_Name == deviceName);
+    CHECK(device->m_Uid >= 1);
+    CHECK(device->m_Cores == 0);
 
     // Try getting an unregistered device
     const Device* unregisteredDevice = counterDirectory.GetDevice(9999);
-    BOOST_CHECK(!unregisteredDevice);
+    CHECK(!unregisteredDevice);
 
     // Get the registered device
     const Device* registeredDevice = counterDirectory.GetDevice(device->m_Uid);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(registeredDevice);
-    BOOST_CHECK(registeredDevice == device);
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(registeredDevice);
+    CHECK(registeredDevice == device);
 
     // Register a device with the name of a device already registered
     const Device* deviceSameName = nullptr;
-    BOOST_CHECK_THROW(deviceSameName = counterDirectory.RegisterDevice(deviceName), armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(!deviceSameName);
+    CHECK_THROWS_AS(deviceSameName = counterDirectory.RegisterDevice(deviceName), armnn::InvalidArgumentException);
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(!deviceSameName);
 
     // Register a new device with cores and no parent category
     const std::string deviceWCoresName = "some_device_with_cores";
     const Device* deviceWCores         = nullptr;
-    BOOST_CHECK_NO_THROW(deviceWCores = counterDirectory.RegisterDevice(deviceWCoresName, 2));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 2);
-    BOOST_CHECK(deviceWCores);
-    BOOST_CHECK(deviceWCores->m_Name == deviceWCoresName);
-    BOOST_CHECK(deviceWCores->m_Uid >= 1);
-    BOOST_CHECK(deviceWCores->m_Uid > device->m_Uid);
-    BOOST_CHECK(deviceWCores->m_Cores == 2);
+    CHECK_NOTHROW(deviceWCores = counterDirectory.RegisterDevice(deviceWCoresName, 2));
+    CHECK(counterDirectory.GetDeviceCount() == 2);
+    CHECK(deviceWCores);
+    CHECK(deviceWCores->m_Name == deviceWCoresName);
+    CHECK(deviceWCores->m_Uid >= 1);
+    CHECK(deviceWCores->m_Uid > device->m_Uid);
+    CHECK(deviceWCores->m_Cores == 2);
 
     // Get the registered device
     const Device* registeredDeviceWCores = counterDirectory.GetDevice(deviceWCores->m_Uid);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 2);
-    BOOST_CHECK(registeredDeviceWCores);
-    BOOST_CHECK(registeredDeviceWCores == deviceWCores);
-    BOOST_CHECK(registeredDeviceWCores != device);
+    CHECK(counterDirectory.GetDeviceCount() == 2);
+    CHECK(registeredDeviceWCores);
+    CHECK(registeredDeviceWCores == deviceWCores);
+    CHECK(registeredDeviceWCores != device);
 
     // Register a new device with cores and invalid parent category
     const std::string deviceWCoresWInvalidParentCategoryName = "some_device_with_cores_with_invalid_parent_category";
     const Device* deviceWCoresWInvalidParentCategory         = nullptr;
-    BOOST_CHECK_THROW(deviceWCoresWInvalidParentCategory =
+    CHECK_THROWS_AS(deviceWCoresWInvalidParentCategory =
                           counterDirectory.RegisterDevice(deviceWCoresWInvalidParentCategoryName, 3, std::string("")),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 2);
-    BOOST_CHECK(!deviceWCoresWInvalidParentCategory);
+    CHECK(counterDirectory.GetDeviceCount() == 2);
+    CHECK(!deviceWCoresWInvalidParentCategory);
 
     // Register a new device with cores and invalid parent category
     const std::string deviceWCoresWInvalidParentCategoryName2 = "some_device_with_cores_with_invalid_parent_category2";
     const Device* deviceWCoresWInvalidParentCategory2         = nullptr;
-    BOOST_CHECK_THROW(deviceWCoresWInvalidParentCategory2 = counterDirectory.RegisterDevice(
+    CHECK_THROWS_AS(deviceWCoresWInvalidParentCategory2 = counterDirectory.RegisterDevice(
                           deviceWCoresWInvalidParentCategoryName2, 3, std::string("invalid_parent_category")),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 2);
-    BOOST_CHECK(!deviceWCoresWInvalidParentCategory2);
+    CHECK(counterDirectory.GetDeviceCount() == 2);
+    CHECK(!deviceWCoresWInvalidParentCategory2);
 
     // Register a category for testing
     const std::string categoryName = "some_category";
     const Category* category       = nullptr;
-    BOOST_CHECK_NO_THROW(category = counterDirectory.RegisterCategory(categoryName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(category);
-    BOOST_CHECK(category->m_Name == categoryName);
-    BOOST_CHECK(category->m_Counters.empty());
+    CHECK_NOTHROW(category = counterDirectory.RegisterCategory(categoryName));
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(category);
+    CHECK(category->m_Name == categoryName);
+    CHECK(category->m_Counters.empty());
 
     // Register a new device with cores and valid parent category
     const std::string deviceWCoresWValidParentCategoryName = "some_device_with_cores_with_valid_parent_category";
     const Device* deviceWCoresWValidParentCategory         = nullptr;
-    BOOST_CHECK_NO_THROW(deviceWCoresWValidParentCategory =
+    CHECK_NOTHROW(deviceWCoresWValidParentCategory =
                              counterDirectory.RegisterDevice(deviceWCoresWValidParentCategoryName, 4, categoryName));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 3);
-    BOOST_CHECK(deviceWCoresWValidParentCategory);
-    BOOST_CHECK(deviceWCoresWValidParentCategory->m_Name == deviceWCoresWValidParentCategoryName);
-    BOOST_CHECK(deviceWCoresWValidParentCategory->m_Uid >= 1);
-    BOOST_CHECK(deviceWCoresWValidParentCategory->m_Uid > device->m_Uid);
-    BOOST_CHECK(deviceWCoresWValidParentCategory->m_Uid > deviceWCores->m_Uid);
-    BOOST_CHECK(deviceWCoresWValidParentCategory->m_Cores == 4);
+    CHECK(counterDirectory.GetDeviceCount() == 3);
+    CHECK(deviceWCoresWValidParentCategory);
+    CHECK(deviceWCoresWValidParentCategory->m_Name == deviceWCoresWValidParentCategoryName);
+    CHECK(deviceWCoresWValidParentCategory->m_Uid >= 1);
+    CHECK(deviceWCoresWValidParentCategory->m_Uid > device->m_Uid);
+    CHECK(deviceWCoresWValidParentCategory->m_Uid > deviceWCores->m_Uid);
+    CHECK(deviceWCoresWValidParentCategory->m_Cores == 4);
 }
 
-BOOST_AUTO_TEST_CASE(CheckCounterDirectoryRegisterCounterSet)
+TEST_CASE("CheckCounterDirectoryRegisterCounterSet")
 {
     CounterDirectory counterDirectory;
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 0);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 0);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 0);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(counterDirectory.GetCategoryCount() == 0);
+    CHECK(counterDirectory.GetDeviceCount() == 0);
+    CHECK(counterDirectory.GetCounterSetCount() == 0);
+    CHECK(counterDirectory.GetCounterCount() == 0);
 
     // Register a counter set with an invalid name
     const CounterSet* noCounterSet = nullptr;
-    BOOST_CHECK_THROW(noCounterSet = counterDirectory.RegisterCounterSet(""), armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 0);
-    BOOST_CHECK(!noCounterSet);
+    CHECK_THROWS_AS(noCounterSet = counterDirectory.RegisterCounterSet(""), armnn::InvalidArgumentException);
+    CHECK(counterDirectory.GetCounterSetCount() == 0);
+    CHECK(!noCounterSet);
 
     // Register a counter set with an invalid name
-    BOOST_CHECK_THROW(noCounterSet = counterDirectory.RegisterCounterSet("invalid name"),
+    CHECK_THROWS_AS(noCounterSet = counterDirectory.RegisterCounterSet("invalid name"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 0);
-    BOOST_CHECK(!noCounterSet);
+    CHECK(counterDirectory.GetCounterSetCount() == 0);
+    CHECK(!noCounterSet);
 
     // Register a new counter set with no count or parent category
     const std::string counterSetName = "some_counter_set";
     const CounterSet* counterSet     = nullptr;
-    BOOST_CHECK_NO_THROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 1);
-    BOOST_CHECK(counterSet);
-    BOOST_CHECK(counterSet->m_Name == counterSetName);
-    BOOST_CHECK(counterSet->m_Uid >= 1);
-    BOOST_CHECK(counterSet->m_Count == 0);
+    CHECK_NOTHROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
+    CHECK(counterDirectory.GetCounterSetCount() == 1);
+    CHECK(counterSet);
+    CHECK(counterSet->m_Name == counterSetName);
+    CHECK(counterSet->m_Uid >= 1);
+    CHECK(counterSet->m_Count == 0);
 
     // Try getting an unregistered counter set
     const CounterSet* unregisteredCounterSet = counterDirectory.GetCounterSet(9999);
-    BOOST_CHECK(!unregisteredCounterSet);
+    CHECK(!unregisteredCounterSet);
 
     // Get the registered counter set
     const CounterSet* registeredCounterSet = counterDirectory.GetCounterSet(counterSet->m_Uid);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 1);
-    BOOST_CHECK(registeredCounterSet);
-    BOOST_CHECK(registeredCounterSet == counterSet);
+    CHECK(counterDirectory.GetCounterSetCount() == 1);
+    CHECK(registeredCounterSet);
+    CHECK(registeredCounterSet == counterSet);
 
     // Register a counter set with the name of a counter set already registered
     const CounterSet* counterSetSameName = nullptr;
-    BOOST_CHECK_THROW(counterSetSameName = counterDirectory.RegisterCounterSet(counterSetName),
+    CHECK_THROWS_AS(counterSetSameName = counterDirectory.RegisterCounterSet(counterSetName),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 1);
-    BOOST_CHECK(!counterSetSameName);
+    CHECK(counterDirectory.GetCounterSetCount() == 1);
+    CHECK(!counterSetSameName);
 
     // Register a new counter set with count and no parent category
     const std::string counterSetWCountName = "some_counter_set_with_count";
     const CounterSet* counterSetWCount     = nullptr;
-    BOOST_CHECK_NO_THROW(counterSetWCount = counterDirectory.RegisterCounterSet(counterSetWCountName, 37));
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 2);
-    BOOST_CHECK(counterSetWCount);
-    BOOST_CHECK(counterSetWCount->m_Name == counterSetWCountName);
-    BOOST_CHECK(counterSetWCount->m_Uid >= 1);
-    BOOST_CHECK(counterSetWCount->m_Uid > counterSet->m_Uid);
-    BOOST_CHECK(counterSetWCount->m_Count == 37);
+    CHECK_NOTHROW(counterSetWCount = counterDirectory.RegisterCounterSet(counterSetWCountName, 37));
+    CHECK(counterDirectory.GetCounterSetCount() == 2);
+    CHECK(counterSetWCount);
+    CHECK(counterSetWCount->m_Name == counterSetWCountName);
+    CHECK(counterSetWCount->m_Uid >= 1);
+    CHECK(counterSetWCount->m_Uid > counterSet->m_Uid);
+    CHECK(counterSetWCount->m_Count == 37);
 
     // Get the registered counter set
     const CounterSet* registeredCounterSetWCount = counterDirectory.GetCounterSet(counterSetWCount->m_Uid);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 2);
-    BOOST_CHECK(registeredCounterSetWCount);
-    BOOST_CHECK(registeredCounterSetWCount == counterSetWCount);
-    BOOST_CHECK(registeredCounterSetWCount != counterSet);
+    CHECK(counterDirectory.GetCounterSetCount() == 2);
+    CHECK(registeredCounterSetWCount);
+    CHECK(registeredCounterSetWCount == counterSetWCount);
+    CHECK(registeredCounterSetWCount != counterSet);
 
     // Register a new counter set with count and invalid parent category
     const std::string counterSetWCountWInvalidParentCategoryName = "some_counter_set_with_count_"
                                                                    "with_invalid_parent_category";
     const CounterSet* counterSetWCountWInvalidParentCategory = nullptr;
-    BOOST_CHECK_THROW(counterSetWCountWInvalidParentCategory = counterDirectory.RegisterCounterSet(
+    CHECK_THROWS_AS(counterSetWCountWInvalidParentCategory = counterDirectory.RegisterCounterSet(
                           counterSetWCountWInvalidParentCategoryName, 42, std::string("")),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 2);
-    BOOST_CHECK(!counterSetWCountWInvalidParentCategory);
+    CHECK(counterDirectory.GetCounterSetCount() == 2);
+    CHECK(!counterSetWCountWInvalidParentCategory);
 
     // Register a new counter set with count and invalid parent category
     const std::string counterSetWCountWInvalidParentCategoryName2 = "some_counter_set_with_count_"
                                                                     "with_invalid_parent_category2";
     const CounterSet* counterSetWCountWInvalidParentCategory2 = nullptr;
-    BOOST_CHECK_THROW(counterSetWCountWInvalidParentCategory2 = counterDirectory.RegisterCounterSet(
+    CHECK_THROWS_AS(counterSetWCountWInvalidParentCategory2 = counterDirectory.RegisterCounterSet(
                           counterSetWCountWInvalidParentCategoryName2, 42, std::string("invalid_parent_category")),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 2);
-    BOOST_CHECK(!counterSetWCountWInvalidParentCategory2);
+    CHECK(counterDirectory.GetCounterSetCount() == 2);
+    CHECK(!counterSetWCountWInvalidParentCategory2);
 
     // Register a category for testing
     const std::string categoryName = "some_category";
     const Category* category       = nullptr;
-    BOOST_CHECK_NO_THROW(category = counterDirectory.RegisterCategory(categoryName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(category);
-    BOOST_CHECK(category->m_Name == categoryName);
-    BOOST_CHECK(category->m_Counters.empty());
+    CHECK_NOTHROW(category = counterDirectory.RegisterCategory(categoryName));
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(category);
+    CHECK(category->m_Name == categoryName);
+    CHECK(category->m_Counters.empty());
 
     // Register a new counter set with count and valid parent category
     const std::string counterSetWCountWValidParentCategoryName = "some_counter_set_with_count_"
                                                                  "with_valid_parent_category";
     const CounterSet* counterSetWCountWValidParentCategory = nullptr;
-    BOOST_CHECK_NO_THROW(counterSetWCountWValidParentCategory = counterDirectory.RegisterCounterSet(
+    CHECK_NOTHROW(counterSetWCountWValidParentCategory = counterDirectory.RegisterCounterSet(
                              counterSetWCountWValidParentCategoryName, 42, categoryName));
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 3);
-    BOOST_CHECK(counterSetWCountWValidParentCategory);
-    BOOST_CHECK(counterSetWCountWValidParentCategory->m_Name == counterSetWCountWValidParentCategoryName);
-    BOOST_CHECK(counterSetWCountWValidParentCategory->m_Uid >= 1);
-    BOOST_CHECK(counterSetWCountWValidParentCategory->m_Uid > counterSet->m_Uid);
-    BOOST_CHECK(counterSetWCountWValidParentCategory->m_Uid > counterSetWCount->m_Uid);
-    BOOST_CHECK(counterSetWCountWValidParentCategory->m_Count == 42);
+    CHECK(counterDirectory.GetCounterSetCount() == 3);
+    CHECK(counterSetWCountWValidParentCategory);
+    CHECK(counterSetWCountWValidParentCategory->m_Name == counterSetWCountWValidParentCategoryName);
+    CHECK(counterSetWCountWValidParentCategory->m_Uid >= 1);
+    CHECK(counterSetWCountWValidParentCategory->m_Uid > counterSet->m_Uid);
+    CHECK(counterSetWCountWValidParentCategory->m_Uid > counterSetWCount->m_Uid);
+    CHECK(counterSetWCountWValidParentCategory->m_Count == 42);
 
     // Register a counter set associated to a category with invalid name
     const std::string counterSetSameCategoryName = "some_counter_set_with_invalid_parent_category";
     const std::string invalidCategoryName = "";
     const CounterSet* counterSetSameCategory     = nullptr;
-    BOOST_CHECK_THROW(counterSetSameCategory =
+    CHECK_THROWS_AS(counterSetSameCategory =
                           counterDirectory.RegisterCounterSet(counterSetSameCategoryName, 0, invalidCategoryName),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 3);
-    BOOST_CHECK(!counterSetSameCategory);
+    CHECK(counterDirectory.GetCounterSetCount() == 3);
+    CHECK(!counterSetSameCategory);
 }
 
-BOOST_AUTO_TEST_CASE(CheckCounterDirectoryRegisterCounter)
+TEST_CASE("CheckCounterDirectoryRegisterCounter")
 {
     CounterDirectory counterDirectory;
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 0);
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 0);
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 0);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(counterDirectory.GetCategoryCount() == 0);
+    CHECK(counterDirectory.GetDeviceCount() == 0);
+    CHECK(counterDirectory.GetCounterSetCount() == 0);
+    CHECK(counterDirectory.GetCounterCount() == 0);
 
     // Register a counter with an invalid parent category name
     const Counter* noCounter = nullptr;
-    BOOST_CHECK_THROW(noCounter =
+    CHECK_THROWS_AS(noCounter =
                           counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                            0,
                                                            "",
@@ -1193,11 +1195,11 @@
                                                            "valid ",
                                                            "name"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with an invalid parent category name
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    1,
                                                                    "invalid parent category",
                                                                    0,
@@ -1206,11 +1208,11 @@
                                                                    "valid name",
                                                                    "valid description"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with an invalid class
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    2,
                                                                    "valid_parent_category",
                                                                    2,
@@ -1220,11 +1222,11 @@
                                                                    "name",
                                                                    "valid description"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with an invalid interpolation
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    4,
                                                                    "valid_parent_category",
                                                                    0,
@@ -1234,11 +1236,11 @@
                                                                    "name",
                                                                    "valid description"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with an invalid multiplier
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    5,
                                                                    "valid_parent_category",
                                                                    0,
@@ -1248,11 +1250,11 @@
                                                                    "name",
                                                                    "valid description"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with an invalid name
-    BOOST_CHECK_THROW(
+    CHECK_THROWS_AS(
         noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                      6,
                                                      "valid_parent_category",
@@ -1262,11 +1264,11 @@
                                                      "",
                                                      "valid description"),
         armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with an invalid name
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    7,
                                                                    "valid_parent_category",
                                                                    0,
@@ -1275,11 +1277,11 @@
                                                                    "invalid nam€",
                                                                    "valid description"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with an invalid description
-    BOOST_CHECK_THROW(noCounter =
+    CHECK_THROWS_AS(noCounter =
                           counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                            8,
                                                            "valid_parent_category",
@@ -1289,11 +1291,11 @@
                                                            "valid name",
                                                            ""),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with an invalid description
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    9,
                                                                    "valid_parent_category",
                                                                    0,
@@ -1303,11 +1305,11 @@
                                                                    "name",
                                                                    "inv@lid description"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with an invalid unit2
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    10,
                                                                    "valid_parent_category",
                                                                    0,
@@ -1317,11 +1319,11 @@
                                                                    "valid description",
                                                                    std::string("Mb/s2")),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Register a counter with a non-existing parent category name
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    11,
                                                                    "invalid_parent_category",
                                                                    0,
@@ -1330,25 +1332,25 @@
                                                                    "valid name",
                                                                    "valid description"),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 0);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 0);
+    CHECK(!noCounter);
 
     // Try getting an unregistered counter
     const Counter* unregisteredCounter = counterDirectory.GetCounter(9999);
-    BOOST_CHECK(!unregisteredCounter);
+    CHECK(!unregisteredCounter);
 
     // Register a category for testing
     const std::string categoryName = "some_category";
     const Category* category       = nullptr;
-    BOOST_CHECK_NO_THROW(category = counterDirectory.RegisterCategory(categoryName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(category);
-    BOOST_CHECK(category->m_Name == categoryName);
-    BOOST_CHECK(category->m_Counters.empty());
+    CHECK_NOTHROW(category = counterDirectory.RegisterCategory(categoryName));
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(category);
+    CHECK(category->m_Name == categoryName);
+    CHECK(category->m_Counters.empty());
 
     // Register a counter with a valid parent category name
     const Counter* counter = nullptr;
-    BOOST_CHECK_NO_THROW(
+    CHECK_NOTHROW(
         counter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                    12,
                                                    categoryName,
@@ -1357,23 +1359,23 @@
                                                    123.45f,
                                                    "valid name",
                                                    "valid description"));
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 1);
-    BOOST_CHECK(counter);
-    BOOST_CHECK(counter->m_MaxCounterUid == counter->m_Uid);
-    BOOST_CHECK(counter->m_Class == 0);
-    BOOST_CHECK(counter->m_Interpolation == 1);
-    BOOST_CHECK(counter->m_Multiplier == 123.45f);
-    BOOST_CHECK(counter->m_Name == "valid name");
-    BOOST_CHECK(counter->m_Description == "valid description");
-    BOOST_CHECK(counter->m_Units == "");
-    BOOST_CHECK(counter->m_DeviceUid == 0);
-    BOOST_CHECK(counter->m_CounterSetUid == 0);
-    BOOST_CHECK(category->m_Counters.size() == 1);
-    BOOST_CHECK(category->m_Counters.back() == counter->m_Uid);
+    CHECK(counterDirectory.GetCounterCount() == 1);
+    CHECK(counter);
+    CHECK(counter->m_MaxCounterUid == counter->m_Uid);
+    CHECK(counter->m_Class == 0);
+    CHECK(counter->m_Interpolation == 1);
+    CHECK(counter->m_Multiplier == 123.45f);
+    CHECK(counter->m_Name == "valid name");
+    CHECK(counter->m_Description == "valid description");
+    CHECK(counter->m_Units == "");
+    CHECK(counter->m_DeviceUid == 0);
+    CHECK(counter->m_CounterSetUid == 0);
+    CHECK(category->m_Counters.size() == 1);
+    CHECK(category->m_Counters.back() == counter->m_Uid);
 
     // Register a counter with a name of a counter already registered for the given parent category name
     const Counter* counterSameName = nullptr;
-    BOOST_CHECK_THROW(counterSameName =
+    CHECK_THROWS_AS(counterSameName =
                           counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                            13,
                                                            categoryName,
@@ -1384,12 +1386,12 @@
                                                            "valid description",
                                                            std::string("description")),
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 1);
-    BOOST_CHECK(!counterSameName);
+    CHECK(counterDirectory.GetCounterCount() == 1);
+    CHECK(!counterSameName);
 
     // Register a counter with a valid parent category name and units
     const Counter* counterWUnits = nullptr;
-    BOOST_CHECK_NO_THROW(counterWUnits = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_NOTHROW(counterWUnits = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                              14,
                                                                              categoryName,
                                                                              0,
@@ -1398,24 +1400,24 @@
                                                                              "valid name 2",
                                                                              "valid description",
                                                                              std::string("Mnnsq2")));    // Units
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 2);
-    BOOST_CHECK(counterWUnits);
-    BOOST_CHECK(counterWUnits->m_Uid > counter->m_Uid);
-    BOOST_CHECK(counterWUnits->m_MaxCounterUid == counterWUnits->m_Uid);
-    BOOST_CHECK(counterWUnits->m_Class == 0);
-    BOOST_CHECK(counterWUnits->m_Interpolation == 1);
-    BOOST_CHECK(counterWUnits->m_Multiplier == 123.45f);
-    BOOST_CHECK(counterWUnits->m_Name == "valid name 2");
-    BOOST_CHECK(counterWUnits->m_Description == "valid description");
-    BOOST_CHECK(counterWUnits->m_Units == "Mnnsq2");
-    BOOST_CHECK(counterWUnits->m_DeviceUid == 0);
-    BOOST_CHECK(counterWUnits->m_CounterSetUid == 0);
-    BOOST_CHECK(category->m_Counters.size() == 2);
-    BOOST_CHECK(category->m_Counters.back() == counterWUnits->m_Uid);
+    CHECK(counterDirectory.GetCounterCount() == 2);
+    CHECK(counterWUnits);
+    CHECK(counterWUnits->m_Uid > counter->m_Uid);
+    CHECK(counterWUnits->m_MaxCounterUid == counterWUnits->m_Uid);
+    CHECK(counterWUnits->m_Class == 0);
+    CHECK(counterWUnits->m_Interpolation == 1);
+    CHECK(counterWUnits->m_Multiplier == 123.45f);
+    CHECK(counterWUnits->m_Name == "valid name 2");
+    CHECK(counterWUnits->m_Description == "valid description");
+    CHECK(counterWUnits->m_Units == "Mnnsq2");
+    CHECK(counterWUnits->m_DeviceUid == 0);
+    CHECK(counterWUnits->m_CounterSetUid == 0);
+    CHECK(category->m_Counters.size() == 2);
+    CHECK(category->m_Counters.back() == counterWUnits->m_Uid);
 
     // Register a counter with a valid parent category name and not associated with a device
     const Counter* counterWoDevice = nullptr;
-    BOOST_CHECK_NO_THROW(counterWoDevice = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_NOTHROW(counterWoDevice = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                             26,
                                                                             categoryName,
                                                                             0,
@@ -1426,23 +1428,23 @@
                                                                             armnn::EmptyOptional(),// Units
                                                                             armnn::EmptyOptional(),// Number of cores
                                                                             0));                   // Device UID
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 3);
-    BOOST_CHECK(counterWoDevice);
-    BOOST_CHECK(counterWoDevice->m_Uid > counter->m_Uid);
-    BOOST_CHECK(counterWoDevice->m_MaxCounterUid == counterWoDevice->m_Uid);
-    BOOST_CHECK(counterWoDevice->m_Class == 0);
-    BOOST_CHECK(counterWoDevice->m_Interpolation == 1);
-    BOOST_CHECK(counterWoDevice->m_Multiplier == 123.45f);
-    BOOST_CHECK(counterWoDevice->m_Name == "valid name 3");
-    BOOST_CHECK(counterWoDevice->m_Description == "valid description");
-    BOOST_CHECK(counterWoDevice->m_Units == "");
-    BOOST_CHECK(counterWoDevice->m_DeviceUid == 0);
-    BOOST_CHECK(counterWoDevice->m_CounterSetUid == 0);
-    BOOST_CHECK(category->m_Counters.size() == 3);
-    BOOST_CHECK(category->m_Counters.back() == counterWoDevice->m_Uid);
+    CHECK(counterDirectory.GetCounterCount() == 3);
+    CHECK(counterWoDevice);
+    CHECK(counterWoDevice->m_Uid > counter->m_Uid);
+    CHECK(counterWoDevice->m_MaxCounterUid == counterWoDevice->m_Uid);
+    CHECK(counterWoDevice->m_Class == 0);
+    CHECK(counterWoDevice->m_Interpolation == 1);
+    CHECK(counterWoDevice->m_Multiplier == 123.45f);
+    CHECK(counterWoDevice->m_Name == "valid name 3");
+    CHECK(counterWoDevice->m_Description == "valid description");
+    CHECK(counterWoDevice->m_Units == "");
+    CHECK(counterWoDevice->m_DeviceUid == 0);
+    CHECK(counterWoDevice->m_CounterSetUid == 0);
+    CHECK(category->m_Counters.size() == 3);
+    CHECK(category->m_Counters.back() == counterWoDevice->m_Uid);
 
     // Register a counter with a valid parent category name and associated to an invalid device
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    15,
                                                                    categoryName,
                                                                    0,
@@ -1454,22 +1456,22 @@
                                                                    armnn::EmptyOptional(),    // Number of cores
                                                                    100),                      // Device UID
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 3);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 3);
+    CHECK(!noCounter);
 
     // Register a device for testing
     const std::string deviceName = "some_device";
     const Device* device         = nullptr;
-    BOOST_CHECK_NO_THROW(device = counterDirectory.RegisterDevice(deviceName));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(device);
-    BOOST_CHECK(device->m_Name == deviceName);
-    BOOST_CHECK(device->m_Uid >= 1);
-    BOOST_CHECK(device->m_Cores == 0);
+    CHECK_NOTHROW(device = counterDirectory.RegisterDevice(deviceName));
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(device);
+    CHECK(device->m_Name == deviceName);
+    CHECK(device->m_Uid >= 1);
+    CHECK(device->m_Cores == 0);
 
     // Register a counter with a valid parent category name and associated to a device
     const Counter* counterWDevice = nullptr;
-    BOOST_CHECK_NO_THROW(counterWDevice = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_NOTHROW(counterWDevice = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                            16,
                                                                            categoryName,
                                                                            0,
@@ -1480,24 +1482,24 @@
                                                                            armnn::EmptyOptional(), // Units
                                                                            armnn::EmptyOptional(), // Number of cores
                                                                            device->m_Uid));        // Device UID
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 4);
-    BOOST_CHECK(counterWDevice);
-    BOOST_CHECK(counterWDevice->m_Uid > counter->m_Uid);
-    BOOST_CHECK(counterWDevice->m_MaxCounterUid == counterWDevice->m_Uid);
-    BOOST_CHECK(counterWDevice->m_Class == 0);
-    BOOST_CHECK(counterWDevice->m_Interpolation == 1);
-    BOOST_CHECK(counterWDevice->m_Multiplier == 123.45f);
-    BOOST_CHECK(counterWDevice->m_Name == "valid name 5");
-    BOOST_CHECK(counterWDevice->m_Description == "valid description");
-    BOOST_CHECK(counterWDevice->m_Units == "");
-    BOOST_CHECK(counterWDevice->m_DeviceUid == device->m_Uid);
-    BOOST_CHECK(counterWDevice->m_CounterSetUid == 0);
-    BOOST_CHECK(category->m_Counters.size() == 4);
-    BOOST_CHECK(category->m_Counters.back() == counterWDevice->m_Uid);
+    CHECK(counterDirectory.GetCounterCount() == 4);
+    CHECK(counterWDevice);
+    CHECK(counterWDevice->m_Uid > counter->m_Uid);
+    CHECK(counterWDevice->m_MaxCounterUid == counterWDevice->m_Uid);
+    CHECK(counterWDevice->m_Class == 0);
+    CHECK(counterWDevice->m_Interpolation == 1);
+    CHECK(counterWDevice->m_Multiplier == 123.45f);
+    CHECK(counterWDevice->m_Name == "valid name 5");
+    CHECK(counterWDevice->m_Description == "valid description");
+    CHECK(counterWDevice->m_Units == "");
+    CHECK(counterWDevice->m_DeviceUid == device->m_Uid);
+    CHECK(counterWDevice->m_CounterSetUid == 0);
+    CHECK(category->m_Counters.size() == 4);
+    CHECK(category->m_Counters.back() == counterWDevice->m_Uid);
 
     // Register a counter with a valid parent category name and not associated with a counter set
     const Counter* counterWoCounterSet = nullptr;
-    BOOST_CHECK_NO_THROW(counterWoCounterSet = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_NOTHROW(counterWoCounterSet = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                                 17,
                                                                                 categoryName,
                                                                                 0,
@@ -1509,23 +1511,23 @@
                                                                                 armnn::EmptyOptional(),// No of cores
                                                                                 armnn::EmptyOptional(),// Device UID
                                                                                 0));               // CounterSet UID
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 5);
-    BOOST_CHECK(counterWoCounterSet);
-    BOOST_CHECK(counterWoCounterSet->m_Uid > counter->m_Uid);
-    BOOST_CHECK(counterWoCounterSet->m_MaxCounterUid == counterWoCounterSet->m_Uid);
-    BOOST_CHECK(counterWoCounterSet->m_Class == 0);
-    BOOST_CHECK(counterWoCounterSet->m_Interpolation == 1);
-    BOOST_CHECK(counterWoCounterSet->m_Multiplier == 123.45f);
-    BOOST_CHECK(counterWoCounterSet->m_Name == "valid name 6");
-    BOOST_CHECK(counterWoCounterSet->m_Description == "valid description");
-    BOOST_CHECK(counterWoCounterSet->m_Units == "");
-    BOOST_CHECK(counterWoCounterSet->m_DeviceUid == 0);
-    BOOST_CHECK(counterWoCounterSet->m_CounterSetUid == 0);
-    BOOST_CHECK(category->m_Counters.size() == 5);
-    BOOST_CHECK(category->m_Counters.back() == counterWoCounterSet->m_Uid);
+    CHECK(counterDirectory.GetCounterCount() == 5);
+    CHECK(counterWoCounterSet);
+    CHECK(counterWoCounterSet->m_Uid > counter->m_Uid);
+    CHECK(counterWoCounterSet->m_MaxCounterUid == counterWoCounterSet->m_Uid);
+    CHECK(counterWoCounterSet->m_Class == 0);
+    CHECK(counterWoCounterSet->m_Interpolation == 1);
+    CHECK(counterWoCounterSet->m_Multiplier == 123.45f);
+    CHECK(counterWoCounterSet->m_Name == "valid name 6");
+    CHECK(counterWoCounterSet->m_Description == "valid description");
+    CHECK(counterWoCounterSet->m_Units == "");
+    CHECK(counterWoCounterSet->m_DeviceUid == 0);
+    CHECK(counterWoCounterSet->m_CounterSetUid == 0);
+    CHECK(category->m_Counters.size() == 5);
+    CHECK(category->m_Counters.back() == counterWoCounterSet->m_Uid);
 
     // Register a counter with a valid parent category name and associated to an invalid counter set
-    BOOST_CHECK_THROW(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_THROWS_AS(noCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                    18,
                                                                    categoryName,
                                                                    0,
@@ -1538,92 +1540,92 @@
                                                                    armnn::EmptyOptional(),    // Number of cores
                                                                    100),            // Counter set UID
                       armnn::InvalidArgumentException);
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 5);
-    BOOST_CHECK(!noCounter);
+    CHECK(counterDirectory.GetCounterCount() == 5);
+    CHECK(!noCounter);
 
     // Register a counter with a valid parent category name and with a given number of cores
     const Counter* counterWNumberOfCores = nullptr;
     uint16_t numberOfCores               = 15;
-    BOOST_CHECK_NO_THROW(counterWNumberOfCores = counterDirectory.RegisterCounter(
+    CHECK_NOTHROW(counterWNumberOfCores = counterDirectory.RegisterCounter(
                              armnn::profiling::BACKEND_ID, 50,
                              categoryName, 0, 1, 123.45f, "valid name 8", "valid description",
                              armnn::EmptyOptional(),      // Units
                              numberOfCores,               // Number of cores
                              armnn::EmptyOptional(),      // Device UID
                              armnn::EmptyOptional()));    // Counter set UID
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 20);
-    BOOST_CHECK(counterWNumberOfCores);
-    BOOST_CHECK(counterWNumberOfCores->m_Uid > counter->m_Uid);
-    BOOST_CHECK(counterWNumberOfCores->m_MaxCounterUid == counterWNumberOfCores->m_Uid + numberOfCores - 1);
-    BOOST_CHECK(counterWNumberOfCores->m_Class == 0);
-    BOOST_CHECK(counterWNumberOfCores->m_Interpolation == 1);
-    BOOST_CHECK(counterWNumberOfCores->m_Multiplier == 123.45f);
-    BOOST_CHECK(counterWNumberOfCores->m_Name == "valid name 8");
-    BOOST_CHECK(counterWNumberOfCores->m_Description == "valid description");
-    BOOST_CHECK(counterWNumberOfCores->m_Units == "");
-    BOOST_CHECK(counterWNumberOfCores->m_DeviceUid == 0);
-    BOOST_CHECK(counterWNumberOfCores->m_CounterSetUid == 0);
-    BOOST_CHECK(category->m_Counters.size() == 20);
+    CHECK(counterDirectory.GetCounterCount() == 20);
+    CHECK(counterWNumberOfCores);
+    CHECK(counterWNumberOfCores->m_Uid > counter->m_Uid);
+    CHECK(counterWNumberOfCores->m_MaxCounterUid == counterWNumberOfCores->m_Uid + numberOfCores - 1);
+    CHECK(counterWNumberOfCores->m_Class == 0);
+    CHECK(counterWNumberOfCores->m_Interpolation == 1);
+    CHECK(counterWNumberOfCores->m_Multiplier == 123.45f);
+    CHECK(counterWNumberOfCores->m_Name == "valid name 8");
+    CHECK(counterWNumberOfCores->m_Description == "valid description");
+    CHECK(counterWNumberOfCores->m_Units == "");
+    CHECK(counterWNumberOfCores->m_DeviceUid == 0);
+    CHECK(counterWNumberOfCores->m_CounterSetUid == 0);
+    CHECK(category->m_Counters.size() == 20);
     for (size_t i = 0; i < numberOfCores; i++)
     {
-        BOOST_CHECK(category->m_Counters[category->m_Counters.size() - numberOfCores + i] ==
+        CHECK(category->m_Counters[category->m_Counters.size() - numberOfCores + i] ==
                     counterWNumberOfCores->m_Uid + i);
     }
 
     // Register a multi-core device for testing
     const std::string multiCoreDeviceName = "some_multi_core_device";
     const Device* multiCoreDevice         = nullptr;
-    BOOST_CHECK_NO_THROW(multiCoreDevice = counterDirectory.RegisterDevice(multiCoreDeviceName, 4));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 2);
-    BOOST_CHECK(multiCoreDevice);
-    BOOST_CHECK(multiCoreDevice->m_Name == multiCoreDeviceName);
-    BOOST_CHECK(multiCoreDevice->m_Uid >= 1);
-    BOOST_CHECK(multiCoreDevice->m_Cores == 4);
+    CHECK_NOTHROW(multiCoreDevice = counterDirectory.RegisterDevice(multiCoreDeviceName, 4));
+    CHECK(counterDirectory.GetDeviceCount() == 2);
+    CHECK(multiCoreDevice);
+    CHECK(multiCoreDevice->m_Name == multiCoreDeviceName);
+    CHECK(multiCoreDevice->m_Uid >= 1);
+    CHECK(multiCoreDevice->m_Cores == 4);
 
     // Register a counter with a valid parent category name and associated to the multi-core device
     const Counter* counterWMultiCoreDevice = nullptr;
-    BOOST_CHECK_NO_THROW(counterWMultiCoreDevice = counterDirectory.RegisterCounter(
+    CHECK_NOTHROW(counterWMultiCoreDevice = counterDirectory.RegisterCounter(
                              armnn::profiling::BACKEND_ID, 19, categoryName, 0, 1,
                              123.45f, "valid name 9", "valid description",
                              armnn::EmptyOptional(),      // Units
                              armnn::EmptyOptional(),      // Number of cores
                              multiCoreDevice->m_Uid,      // Device UID
                              armnn::EmptyOptional()));    // Counter set UID
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 24);
-    BOOST_CHECK(counterWMultiCoreDevice);
-    BOOST_CHECK(counterWMultiCoreDevice->m_Uid > counter->m_Uid);
-    BOOST_CHECK(counterWMultiCoreDevice->m_MaxCounterUid ==
+    CHECK(counterDirectory.GetCounterCount() == 24);
+    CHECK(counterWMultiCoreDevice);
+    CHECK(counterWMultiCoreDevice->m_Uid > counter->m_Uid);
+    CHECK(counterWMultiCoreDevice->m_MaxCounterUid ==
                 counterWMultiCoreDevice->m_Uid + multiCoreDevice->m_Cores - 1);
-    BOOST_CHECK(counterWMultiCoreDevice->m_Class == 0);
-    BOOST_CHECK(counterWMultiCoreDevice->m_Interpolation == 1);
-    BOOST_CHECK(counterWMultiCoreDevice->m_Multiplier == 123.45f);
-    BOOST_CHECK(counterWMultiCoreDevice->m_Name == "valid name 9");
-    BOOST_CHECK(counterWMultiCoreDevice->m_Description == "valid description");
-    BOOST_CHECK(counterWMultiCoreDevice->m_Units == "");
-    BOOST_CHECK(counterWMultiCoreDevice->m_DeviceUid == multiCoreDevice->m_Uid);
-    BOOST_CHECK(counterWMultiCoreDevice->m_CounterSetUid == 0);
-    BOOST_CHECK(category->m_Counters.size() == 24);
+    CHECK(counterWMultiCoreDevice->m_Class == 0);
+    CHECK(counterWMultiCoreDevice->m_Interpolation == 1);
+    CHECK(counterWMultiCoreDevice->m_Multiplier == 123.45f);
+    CHECK(counterWMultiCoreDevice->m_Name == "valid name 9");
+    CHECK(counterWMultiCoreDevice->m_Description == "valid description");
+    CHECK(counterWMultiCoreDevice->m_Units == "");
+    CHECK(counterWMultiCoreDevice->m_DeviceUid == multiCoreDevice->m_Uid);
+    CHECK(counterWMultiCoreDevice->m_CounterSetUid == 0);
+    CHECK(category->m_Counters.size() == 24);
     for (size_t i = 0; i < 4; i++)
     {
-        BOOST_CHECK(category->m_Counters[category->m_Counters.size() - 4 + i] == counterWMultiCoreDevice->m_Uid + i);
+        CHECK(category->m_Counters[category->m_Counters.size() - 4 + i] == counterWMultiCoreDevice->m_Uid + i);
     }
 
     // Register a multi-core device associate to a parent category for testing
     const std::string multiCoreDeviceNameWParentCategory = "some_multi_core_device_with_parent_category";
     const Device* multiCoreDeviceWParentCategory         = nullptr;
-    BOOST_CHECK_NO_THROW(multiCoreDeviceWParentCategory =
+    CHECK_NOTHROW(multiCoreDeviceWParentCategory =
                              counterDirectory.RegisterDevice(multiCoreDeviceNameWParentCategory, 2, categoryName));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 3);
-    BOOST_CHECK(multiCoreDeviceWParentCategory);
-    BOOST_CHECK(multiCoreDeviceWParentCategory->m_Name == multiCoreDeviceNameWParentCategory);
-    BOOST_CHECK(multiCoreDeviceWParentCategory->m_Uid >= 1);
-    BOOST_CHECK(multiCoreDeviceWParentCategory->m_Cores == 2);
+    CHECK(counterDirectory.GetDeviceCount() == 3);
+    CHECK(multiCoreDeviceWParentCategory);
+    CHECK(multiCoreDeviceWParentCategory->m_Name == multiCoreDeviceNameWParentCategory);
+    CHECK(multiCoreDeviceWParentCategory->m_Uid >= 1);
+    CHECK(multiCoreDeviceWParentCategory->m_Cores == 2);
 
     // Register a counter with a valid parent category name and getting the number of cores of the multi-core device
     // associated to that category
     const Counter* counterWMultiCoreDeviceWParentCategory = nullptr;
     uint16_t numberOfCourse = multiCoreDeviceWParentCategory->m_Cores;
-    BOOST_CHECK_NO_THROW(counterWMultiCoreDeviceWParentCategory =
+    CHECK_NOTHROW(counterWMultiCoreDeviceWParentCategory =
                                                 counterDirectory.RegisterCounter(
                                                     armnn::profiling::BACKEND_ID,
                                                     100,
@@ -1637,117 +1639,117 @@
                                                     numberOfCourse,          // Number of cores
                                                     armnn::EmptyOptional(),  // Device UID
                                                     armnn::EmptyOptional()));// Counter set UID
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 26);
-    BOOST_CHECK(counterWMultiCoreDeviceWParentCategory);
-    BOOST_CHECK(counterWMultiCoreDeviceWParentCategory->m_Uid > counter->m_Uid);
-    BOOST_CHECK(counterWMultiCoreDeviceWParentCategory->m_MaxCounterUid ==
+    CHECK(counterDirectory.GetCounterCount() == 26);
+    CHECK(counterWMultiCoreDeviceWParentCategory);
+    CHECK(counterWMultiCoreDeviceWParentCategory->m_Uid > counter->m_Uid);
+    CHECK(counterWMultiCoreDeviceWParentCategory->m_MaxCounterUid ==
                 counterWMultiCoreDeviceWParentCategory->m_Uid + multiCoreDeviceWParentCategory->m_Cores - 1);
-    BOOST_CHECK(counterWMultiCoreDeviceWParentCategory->m_Class == 0);
-    BOOST_CHECK(counterWMultiCoreDeviceWParentCategory->m_Interpolation == 1);
-    BOOST_CHECK(counterWMultiCoreDeviceWParentCategory->m_Multiplier == 123.45f);
-    BOOST_CHECK(counterWMultiCoreDeviceWParentCategory->m_Name == "valid name 10");
-    BOOST_CHECK(counterWMultiCoreDeviceWParentCategory->m_Description == "valid description");
-    BOOST_CHECK(counterWMultiCoreDeviceWParentCategory->m_Units == "");
-    BOOST_CHECK(category->m_Counters.size() == 26);
+    CHECK(counterWMultiCoreDeviceWParentCategory->m_Class == 0);
+    CHECK(counterWMultiCoreDeviceWParentCategory->m_Interpolation == 1);
+    CHECK(counterWMultiCoreDeviceWParentCategory->m_Multiplier == 123.45f);
+    CHECK(counterWMultiCoreDeviceWParentCategory->m_Name == "valid name 10");
+    CHECK(counterWMultiCoreDeviceWParentCategory->m_Description == "valid description");
+    CHECK(counterWMultiCoreDeviceWParentCategory->m_Units == "");
+    CHECK(category->m_Counters.size() == 26);
     for (size_t i = 0; i < 2; i++)
     {
-        BOOST_CHECK(category->m_Counters[category->m_Counters.size() - 2 + i] ==
+        CHECK(category->m_Counters[category->m_Counters.size() - 2 + i] ==
                     counterWMultiCoreDeviceWParentCategory->m_Uid + i);
     }
 
     // Register a counter set for testing
     const std::string counterSetName = "some_counter_set";
     const CounterSet* counterSet     = nullptr;
-    BOOST_CHECK_NO_THROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 1);
-    BOOST_CHECK(counterSet);
-    BOOST_CHECK(counterSet->m_Name == counterSetName);
-    BOOST_CHECK(counterSet->m_Uid >= 1);
-    BOOST_CHECK(counterSet->m_Count == 0);
+    CHECK_NOTHROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
+    CHECK(counterDirectory.GetCounterSetCount() == 1);
+    CHECK(counterSet);
+    CHECK(counterSet->m_Name == counterSetName);
+    CHECK(counterSet->m_Uid >= 1);
+    CHECK(counterSet->m_Count == 0);
 
     // Register a counter with a valid parent category name and associated to a counter set
     const Counter* counterWCounterSet = nullptr;
-    BOOST_CHECK_NO_THROW(counterWCounterSet = counterDirectory.RegisterCounter(
+    CHECK_NOTHROW(counterWCounterSet = counterDirectory.RegisterCounter(
                              armnn::profiling::BACKEND_ID, 300,
                              categoryName, 0, 1, 123.45f, "valid name 11", "valid description",
                              armnn::EmptyOptional(),    // Units
                              0,                         // Number of cores
                              armnn::EmptyOptional(),    // Device UID
                              counterSet->m_Uid));       // Counter set UID
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 27);
-    BOOST_CHECK(counterWCounterSet);
-    BOOST_CHECK(counterWCounterSet->m_Uid > counter->m_Uid);
-    BOOST_CHECK(counterWCounterSet->m_MaxCounterUid == counterWCounterSet->m_Uid);
-    BOOST_CHECK(counterWCounterSet->m_Class == 0);
-    BOOST_CHECK(counterWCounterSet->m_Interpolation == 1);
-    BOOST_CHECK(counterWCounterSet->m_Multiplier == 123.45f);
-    BOOST_CHECK(counterWCounterSet->m_Name == "valid name 11");
-    BOOST_CHECK(counterWCounterSet->m_Description == "valid description");
-    BOOST_CHECK(counterWCounterSet->m_Units == "");
-    BOOST_CHECK(counterWCounterSet->m_DeviceUid == 0);
-    BOOST_CHECK(counterWCounterSet->m_CounterSetUid == counterSet->m_Uid);
-    BOOST_CHECK(category->m_Counters.size() == 27);
-    BOOST_CHECK(category->m_Counters.back() == counterWCounterSet->m_Uid);
+    CHECK(counterDirectory.GetCounterCount() == 27);
+    CHECK(counterWCounterSet);
+    CHECK(counterWCounterSet->m_Uid > counter->m_Uid);
+    CHECK(counterWCounterSet->m_MaxCounterUid == counterWCounterSet->m_Uid);
+    CHECK(counterWCounterSet->m_Class == 0);
+    CHECK(counterWCounterSet->m_Interpolation == 1);
+    CHECK(counterWCounterSet->m_Multiplier == 123.45f);
+    CHECK(counterWCounterSet->m_Name == "valid name 11");
+    CHECK(counterWCounterSet->m_Description == "valid description");
+    CHECK(counterWCounterSet->m_Units == "");
+    CHECK(counterWCounterSet->m_DeviceUid == 0);
+    CHECK(counterWCounterSet->m_CounterSetUid == counterSet->m_Uid);
+    CHECK(category->m_Counters.size() == 27);
+    CHECK(category->m_Counters.back() == counterWCounterSet->m_Uid);
 
     // Register a counter with a valid parent category name and associated to a device and a counter set
     const Counter* counterWDeviceWCounterSet = nullptr;
-    BOOST_CHECK_NO_THROW(counterWDeviceWCounterSet = counterDirectory.RegisterCounter(
+    CHECK_NOTHROW(counterWDeviceWCounterSet = counterDirectory.RegisterCounter(
                              armnn::profiling::BACKEND_ID, 23,
                              categoryName, 0, 1, 123.45f, "valid name 12", "valid description",
                              armnn::EmptyOptional(),    // Units
                              1,                         // Number of cores
                              device->m_Uid,             // Device UID
                              counterSet->m_Uid));       // Counter set UID
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 28);
-    BOOST_CHECK(counterWDeviceWCounterSet);
-    BOOST_CHECK(counterWDeviceWCounterSet->m_Uid > counter->m_Uid);
-    BOOST_CHECK(counterWDeviceWCounterSet->m_MaxCounterUid == counterWDeviceWCounterSet->m_Uid);
-    BOOST_CHECK(counterWDeviceWCounterSet->m_Class == 0);
-    BOOST_CHECK(counterWDeviceWCounterSet->m_Interpolation == 1);
-    BOOST_CHECK(counterWDeviceWCounterSet->m_Multiplier == 123.45f);
-    BOOST_CHECK(counterWDeviceWCounterSet->m_Name == "valid name 12");
-    BOOST_CHECK(counterWDeviceWCounterSet->m_Description == "valid description");
-    BOOST_CHECK(counterWDeviceWCounterSet->m_Units == "");
-    BOOST_CHECK(counterWDeviceWCounterSet->m_DeviceUid == device->m_Uid);
-    BOOST_CHECK(counterWDeviceWCounterSet->m_CounterSetUid == counterSet->m_Uid);
-    BOOST_CHECK(category->m_Counters.size() == 28);
-    BOOST_CHECK(category->m_Counters.back() == counterWDeviceWCounterSet->m_Uid);
+    CHECK(counterDirectory.GetCounterCount() == 28);
+    CHECK(counterWDeviceWCounterSet);
+    CHECK(counterWDeviceWCounterSet->m_Uid > counter->m_Uid);
+    CHECK(counterWDeviceWCounterSet->m_MaxCounterUid == counterWDeviceWCounterSet->m_Uid);
+    CHECK(counterWDeviceWCounterSet->m_Class == 0);
+    CHECK(counterWDeviceWCounterSet->m_Interpolation == 1);
+    CHECK(counterWDeviceWCounterSet->m_Multiplier == 123.45f);
+    CHECK(counterWDeviceWCounterSet->m_Name == "valid name 12");
+    CHECK(counterWDeviceWCounterSet->m_Description == "valid description");
+    CHECK(counterWDeviceWCounterSet->m_Units == "");
+    CHECK(counterWDeviceWCounterSet->m_DeviceUid == device->m_Uid);
+    CHECK(counterWDeviceWCounterSet->m_CounterSetUid == counterSet->m_Uid);
+    CHECK(category->m_Counters.size() == 28);
+    CHECK(category->m_Counters.back() == counterWDeviceWCounterSet->m_Uid);
 
     // Register another category for testing
     const std::string anotherCategoryName = "some_other_category";
     const Category* anotherCategory       = nullptr;
-    BOOST_CHECK_NO_THROW(anotherCategory = counterDirectory.RegisterCategory(anotherCategoryName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 2);
-    BOOST_CHECK(anotherCategory);
-    BOOST_CHECK(anotherCategory != category);
-    BOOST_CHECK(anotherCategory->m_Name == anotherCategoryName);
-    BOOST_CHECK(anotherCategory->m_Counters.empty());
+    CHECK_NOTHROW(anotherCategory = counterDirectory.RegisterCategory(anotherCategoryName));
+    CHECK(counterDirectory.GetCategoryCount() == 2);
+    CHECK(anotherCategory);
+    CHECK(anotherCategory != category);
+    CHECK(anotherCategory->m_Name == anotherCategoryName);
+    CHECK(anotherCategory->m_Counters.empty());
 
     // Register a counter to the other category
     const Counter* anotherCounter = nullptr;
-    BOOST_CHECK_NO_THROW(anotherCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID, 24,
+    CHECK_NOTHROW(anotherCounter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID, 24,
                                                                            anotherCategoryName, 1, 0, .00043f,
                                                                            "valid name", "valid description",
                                                                            armnn::EmptyOptional(), // Units
                                                                            armnn::EmptyOptional(), // Number of cores
                                                                            device->m_Uid,          // Device UID
                                                                            counterSet->m_Uid));    // Counter set UID
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 29);
-    BOOST_CHECK(anotherCounter);
-    BOOST_CHECK(anotherCounter->m_MaxCounterUid == anotherCounter->m_Uid);
-    BOOST_CHECK(anotherCounter->m_Class == 1);
-    BOOST_CHECK(anotherCounter->m_Interpolation == 0);
-    BOOST_CHECK(anotherCounter->m_Multiplier == .00043f);
-    BOOST_CHECK(anotherCounter->m_Name == "valid name");
-    BOOST_CHECK(anotherCounter->m_Description == "valid description");
-    BOOST_CHECK(anotherCounter->m_Units == "");
-    BOOST_CHECK(anotherCounter->m_DeviceUid == device->m_Uid);
-    BOOST_CHECK(anotherCounter->m_CounterSetUid == counterSet->m_Uid);
-    BOOST_CHECK(anotherCategory->m_Counters.size() == 1);
-    BOOST_CHECK(anotherCategory->m_Counters.back() == anotherCounter->m_Uid);
+    CHECK(counterDirectory.GetCounterCount() == 29);
+    CHECK(anotherCounter);
+    CHECK(anotherCounter->m_MaxCounterUid == anotherCounter->m_Uid);
+    CHECK(anotherCounter->m_Class == 1);
+    CHECK(anotherCounter->m_Interpolation == 0);
+    CHECK(anotherCounter->m_Multiplier == .00043f);
+    CHECK(anotherCounter->m_Name == "valid name");
+    CHECK(anotherCounter->m_Description == "valid description");
+    CHECK(anotherCounter->m_Units == "");
+    CHECK(anotherCounter->m_DeviceUid == device->m_Uid);
+    CHECK(anotherCounter->m_CounterSetUid == counterSet->m_Uid);
+    CHECK(anotherCategory->m_Counters.size() == 1);
+    CHECK(anotherCategory->m_Counters.back() == anotherCounter->m_Uid);
 }
 
-BOOST_AUTO_TEST_CASE(CounterSelectionCommandHandlerParseData)
+TEST_CASE("CounterSelectionCommandHandlerParseData")
 {
     ProfilingStateMachine profilingStateMachine;
 
@@ -1819,20 +1821,20 @@
                                                           readCounterValues, sendCounterPacket, profilingStateMachine);
 
     profilingStateMachine.TransitionToState(ProfilingState::Uninitialised);
-    BOOST_CHECK_THROW(commandHandler(packetA), armnn::RuntimeException);
+    CHECK_THROWS_AS(commandHandler(packetA), armnn::RuntimeException);
     profilingStateMachine.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK_THROW(commandHandler(packetA), armnn::RuntimeException);
+    CHECK_THROWS_AS(commandHandler(packetA), armnn::RuntimeException);
     profilingStateMachine.TransitionToState(ProfilingState::WaitingForAck);
-    BOOST_CHECK_THROW(commandHandler(packetA), armnn::RuntimeException);
+    CHECK_THROWS_AS(commandHandler(packetA), armnn::RuntimeException);
     profilingStateMachine.TransitionToState(ProfilingState::Active);
-    BOOST_CHECK_NO_THROW(commandHandler(packetA));
+    CHECK_NOTHROW(commandHandler(packetA));
 
     const std::vector<uint16_t> counterIdsA = holder.GetCaptureData().GetCounterIds();
 
-    BOOST_TEST(holder.GetCaptureData().GetCapturePeriod() == period1);
-    BOOST_TEST(counterIdsA.size() == 2);
-    BOOST_TEST(counterIdsA[0] == 4000);
-    BOOST_TEST(counterIdsA[1] == 5000);
+    CHECK(holder.GetCaptureData().GetCapturePeriod() == period1);
+    CHECK(counterIdsA.size() == 2);
+    CHECK(counterIdsA[0] == 4000);
+    CHECK(counterIdsA[1] == 5000);
 
     auto readBuffer = mockBuffer.GetReadableBuffer();
 
@@ -1844,18 +1846,18 @@
     offset += sizeOfUint32;
     uint32_t period = ReadUint32(readBuffer, offset);
 
-    BOOST_TEST(((headerWord0 >> 26) & 0x3F) == 0);             // packet family
-    BOOST_TEST(((headerWord0 >> 16) & 0x3FF) == 4);            // packet id
-    BOOST_TEST(headerWord1 == 8);                              // data length
-    BOOST_TEST(period ==  armnn::LOWEST_CAPTURE_PERIOD);       // capture period
+    CHECK(((headerWord0 >> 26) & 0x3F) == 0);             // packet family
+    CHECK(((headerWord0 >> 16) & 0x3FF) == 4);            // packet id
+    CHECK(headerWord1 == 8);                              // data length
+    CHECK(period ==  armnn::LOWEST_CAPTURE_PERIOD);       // capture period
 
     uint16_t counterId = 0;
     offset += sizeOfUint32;
     counterId = ReadUint16(readBuffer, offset);
-    BOOST_TEST(counterId == 4000);
+    CHECK(counterId == 4000);
     offset += sizeOfUint16;
     counterId = ReadUint16(readBuffer, offset);
-    BOOST_TEST(counterId == 5000);
+    CHECK(counterId == 5000);
 
     mockBuffer.MarkRead(readBuffer);
 
@@ -1874,8 +1876,8 @@
     const std::vector<uint16_t> counterIdsB = holder.GetCaptureData().GetCounterIds();
 
     // Value should have been pulled up from 9000 to LOWEST_CAPTURE_PERIOD.
-    BOOST_TEST(holder.GetCaptureData().GetCapturePeriod() ==  armnn::LOWEST_CAPTURE_PERIOD);
-    BOOST_TEST(counterIdsB.size() == 0);
+    CHECK(holder.GetCaptureData().GetCapturePeriod() ==  armnn::LOWEST_CAPTURE_PERIOD);
+    CHECK(counterIdsB.size() == 0);
 
     readBuffer = mockBuffer.GetReadableBuffer();
 
@@ -1887,13 +1889,13 @@
     offset += sizeOfUint32;
     period = ReadUint32(readBuffer, offset);
 
-    BOOST_TEST(((headerWord0 >> 26) & 0x3F) == 0);         // packet family
-    BOOST_TEST(((headerWord0 >> 16) & 0x3FF) == 4);        // packet id
-    BOOST_TEST(headerWord1 == 4);                          // data length
-    BOOST_TEST(period == armnn::LOWEST_CAPTURE_PERIOD);    // capture period
+    CHECK(((headerWord0 >> 26) & 0x3F) == 0);         // packet family
+    CHECK(((headerWord0 >> 16) & 0x3FF) == 4);        // packet id
+    CHECK(headerWord1 == 4);                          // data length
+    CHECK(period == armnn::LOWEST_CAPTURE_PERIOD);    // capture period
 }
 
-BOOST_AUTO_TEST_CASE(CheckTimelineActivationAndDeactivation)
+TEST_CASE("CheckTimelineActivationAndDeactivation")
 {
     class TestReportStructure : public IReportStructure
     {
@@ -1946,23 +1948,23 @@
     // Create the ActivateTimelineReportingPacket
     arm::pipe::Packet ActivateTimelineReportingPacket(packetHeader1); // Length == 0
 
-    BOOST_CHECK_THROW(
+    CHECK_THROWS_AS(
             activateTimelineReportingCommandHandler.operator()(ActivateTimelineReportingPacket), armnn::Exception);
 
     stateMachine.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK_THROW(
+    CHECK_THROWS_AS(
             activateTimelineReportingCommandHandler.operator()(ActivateTimelineReportingPacket), armnn::Exception);
 
     stateMachine.TransitionToState(ProfilingState::WaitingForAck);
-    BOOST_CHECK_THROW(
+    CHECK_THROWS_AS(
             activateTimelineReportingCommandHandler.operator()(ActivateTimelineReportingPacket), armnn::Exception);
 
     stateMachine.TransitionToState(ProfilingState::Active);
     activateTimelineReportingCommandHandler.operator()(ActivateTimelineReportingPacket);
 
-    BOOST_CHECK(testReportStructure.m_ReportStructureCalled);
-    BOOST_CHECK(testNotifyBackends.m_TestNotifyBackendsCalled);
-    BOOST_CHECK(testNotifyBackends.m_timelineReporting.load());
+    CHECK(testReportStructure.m_ReportStructureCalled);
+    CHECK(testNotifyBackends.m_TestNotifyBackendsCalled);
+    CHECK(testNotifyBackends.m_timelineReporting.load());
 
     DeactivateTimelineReportingCommandHandler deactivateTimelineReportingCommandHandler(0,
                                                   7,
@@ -1979,25 +1981,25 @@
     arm::pipe::Packet deactivateTimelineReportingPacket(packetHeader2); // Length == 0
 
     stateMachine.Reset();
-    BOOST_CHECK_THROW(
+    CHECK_THROWS_AS(
             deactivateTimelineReportingCommandHandler.operator()(deactivateTimelineReportingPacket), armnn::Exception);
 
     stateMachine.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK_THROW(
+    CHECK_THROWS_AS(
             deactivateTimelineReportingCommandHandler.operator()(deactivateTimelineReportingPacket), armnn::Exception);
 
     stateMachine.TransitionToState(ProfilingState::WaitingForAck);
-    BOOST_CHECK_THROW(
+    CHECK_THROWS_AS(
             deactivateTimelineReportingCommandHandler.operator()(deactivateTimelineReportingPacket), armnn::Exception);
 
     stateMachine.TransitionToState(ProfilingState::Active);
     deactivateTimelineReportingCommandHandler.operator()(deactivateTimelineReportingPacket);
 
-    BOOST_CHECK(!testNotifyBackends.m_TestNotifyBackendsCalled);
-    BOOST_CHECK(!testNotifyBackends.m_timelineReporting.load());
+    CHECK(!testNotifyBackends.m_TestNotifyBackendsCalled);
+    CHECK(!testNotifyBackends.m_timelineReporting.load());
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceNotActive)
+TEST_CASE("CheckProfilingServiceNotActive")
 {
     using namespace armnn;
     using namespace armnn::profiling;
@@ -2016,10 +2018,10 @@
     auto readableBuffer = bufferManager.GetReadableBuffer();
 
     // Profiling is enabled, the post-optimisation structure should be created
-    BOOST_CHECK(readableBuffer == nullptr);
+    CHECK(readableBuffer == nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(CheckConnectionAcknowledged)
+TEST_CASE("CheckConnectionAcknowledged")
 {
     const uint32_t packetFamilyId     = 0;
     const uint32_t connectionPacketId = 0x10000;
@@ -2045,7 +2047,7 @@
     arm::pipe::Packet packetA(connectionPacketId, dataLength1, uniqueData1);
 
     ProfilingStateMachine profilingState(ProfilingState::Uninitialised);
-    BOOST_CHECK(profilingState.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingState.GetCurrentState() == ProfilingState::Uninitialised);
     CounterDirectory counterDirectory;
     MockBufferManager mockBuffer(1024);
     SendCounterPacket sendCounterPacket(mockBuffer);
@@ -2063,22 +2065,22 @@
                                                         mockProfilingServiceStatus);
 
     // command handler received packet on ProfilingState::Uninitialised
-    BOOST_CHECK_THROW(commandHandler(packetA), armnn::Exception);
+    CHECK_THROWS_AS(commandHandler(packetA), armnn::Exception);
 
     profilingState.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK(profilingState.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingState.GetCurrentState() == ProfilingState::NotConnected);
     // command handler received packet on ProfilingState::NotConnected
-    BOOST_CHECK_THROW(commandHandler(packetA), armnn::Exception);
+    CHECK_THROWS_AS(commandHandler(packetA), armnn::Exception);
 
     profilingState.TransitionToState(ProfilingState::WaitingForAck);
-    BOOST_CHECK(profilingState.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingState.GetCurrentState() == ProfilingState::WaitingForAck);
     // command handler received packet on ProfilingState::WaitingForAck
-    BOOST_CHECK_NO_THROW(commandHandler(packetA));
-    BOOST_CHECK(profilingState.GetCurrentState() == ProfilingState::Active);
+    CHECK_NOTHROW(commandHandler(packetA));
+    CHECK(profilingState.GetCurrentState() == ProfilingState::Active);
 
     // command handler received packet on ProfilingState::Active
-    BOOST_CHECK_NO_THROW(commandHandler(packetA));
-    BOOST_CHECK(profilingState.GetCurrentState() == ProfilingState::Active);
+    CHECK_NOTHROW(commandHandler(packetA));
+    CHECK(profilingState.GetCurrentState() == ProfilingState::Active);
 
     // command handler received different packet
     const uint32_t differentPacketId = 0x40000;
@@ -2093,16 +2095,16 @@
                                                                  sendTimelinePacket,
                                                                  profilingState,
                                                                  mockProfilingServiceStatus);
-    BOOST_CHECK_THROW(differentCommandHandler(packetB), armnn::Exception);
+    CHECK_THROWS_AS(differentCommandHandler(packetB), armnn::Exception);
 }
 
-BOOST_AUTO_TEST_CASE(CheckSocketConnectionException)
+TEST_CASE("CheckSocketConnectionException")
 {
     // Check that creating a SocketProfilingConnection armnnProfiling in an exception as the Gator UDS doesn't exist.
-    BOOST_CHECK_THROW(new SocketProfilingConnection(), arm::pipe::SocketConnectionException);
+    CHECK_THROWS_AS(new SocketProfilingConnection(), arm::pipe::SocketConnectionException);
 }
 
-BOOST_AUTO_TEST_CASE(CheckSocketConnectionException2)
+TEST_CASE("CheckSocketConnectionException2")
 {
     try
     {
@@ -2110,128 +2112,128 @@
     }
     catch (const arm::pipe::SocketConnectionException& ex)
     {
-        BOOST_CHECK(ex.GetSocketFd() == 0);
-        BOOST_CHECK(ex.GetErrorNo() == ECONNREFUSED);
-        BOOST_CHECK(ex.what()
+        CHECK(ex.GetSocketFd() == 0);
+        CHECK(ex.GetErrorNo() == ECONNREFUSED);
+        CHECK(ex.what()
                     == std::string("SocketProfilingConnection: Cannot connect to stream socket: Connection refused"));
     }
 }
 
-BOOST_AUTO_TEST_CASE(SwTraceIsValidCharTest)
+TEST_CASE("SwTraceIsValidCharTest")
 {
     // Only ASCII 7-bit encoding supported
     for (unsigned char c = 0; c < 128; c++)
     {
-        BOOST_CHECK(arm::pipe::SwTraceCharPolicy::IsValidChar(c));
+        CHECK(arm::pipe::SwTraceCharPolicy::IsValidChar(c));
     }
 
     // Not ASCII
     for (unsigned char c = 255; c >= 128; c++)
     {
-        BOOST_CHECK(!arm::pipe::SwTraceCharPolicy::IsValidChar(c));
+        CHECK(!arm::pipe::SwTraceCharPolicy::IsValidChar(c));
     }
 }
 
-BOOST_AUTO_TEST_CASE(SwTraceIsValidNameCharTest)
+TEST_CASE("SwTraceIsValidNameCharTest")
 {
     // Only alpha-numeric and underscore ASCII 7-bit encoding supported
     const unsigned char validChars[] = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
     for (unsigned char i = 0; i < sizeof(validChars) / sizeof(validChars[0]) - 1; i++)
     {
-        BOOST_CHECK(arm::pipe::SwTraceNameCharPolicy::IsValidChar(validChars[i]));
+        CHECK(arm::pipe::SwTraceNameCharPolicy::IsValidChar(validChars[i]));
     }
 
     // Non alpha-numeric chars
     for (unsigned char c = 0; c < 48; c++)
     {
-        BOOST_CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
+        CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
     }
     for (unsigned char c = 58; c < 65; c++)
     {
-        BOOST_CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
+        CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
     }
     for (unsigned char c = 91; c < 95; c++)
     {
-        BOOST_CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
+        CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
     }
     for (unsigned char c = 96; c < 97; c++)
     {
-        BOOST_CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
+        CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
     }
     for (unsigned char c = 123; c < 128; c++)
     {
-        BOOST_CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
+        CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
     }
 
     // Not ASCII
     for (unsigned char c = 255; c >= 128; c++)
     {
-        BOOST_CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
+        CHECK(!arm::pipe::SwTraceNameCharPolicy::IsValidChar(c));
     }
 }
 
-BOOST_AUTO_TEST_CASE(IsValidSwTraceStringTest)
+TEST_CASE("IsValidSwTraceStringTest")
 {
     // Valid SWTrace strings
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>(""));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("_"));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("0123"));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("valid_string"));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("VALID_string_456"));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>(" "));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("valid string"));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("!$%"));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("valid|\\~string#123"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>(""));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("_"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("0123"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("valid_string"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("VALID_string_456"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>(" "));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("valid string"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("!$%"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("valid|\\~string#123"));
 
     // Invalid SWTrace strings
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("€£"));
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("invalid‡string"));
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("12Ž34"));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("€£"));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("invalid‡string"));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceCharPolicy>("12Ž34"));
 }
 
-BOOST_AUTO_TEST_CASE(IsValidSwTraceNameStringTest)
+TEST_CASE("IsValidSwTraceNameStringTest")
 {
     // Valid SWTrace name strings
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>(""));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("_"));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("0123"));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("valid_string"));
-    BOOST_CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("VALID_string_456"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>(""));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("_"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("0123"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("valid_string"));
+    CHECK(arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("VALID_string_456"));
 
     // Invalid SWTrace name strings
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>(" "));
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid string"));
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("!$%"));
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid|\\~string#123"));
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("€£"));
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid‡string"));
-    BOOST_CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("12Ž34"));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>(" "));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid string"));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("!$%"));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid|\\~string#123"));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("€£"));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid‡string"));
+    CHECK(!arm::pipe::IsValidSwTraceString<arm::pipe::SwTraceNameCharPolicy>("12Ž34"));
 }
 
 template <typename SwTracePolicy>
 void StringToSwTraceStringTestHelper(const std::string& testString, std::vector<uint32_t> buffer, size_t expectedSize)
 {
     // Convert the test string to a SWTrace string
-    BOOST_CHECK(arm::pipe::StringToSwTraceString<SwTracePolicy>(testString, buffer));
+    CHECK(arm::pipe::StringToSwTraceString<SwTracePolicy>(testString, buffer));
 
     // The buffer must contain at least the length of the string
-    BOOST_CHECK(!buffer.empty());
+    CHECK(!buffer.empty());
 
     // The buffer must be of the expected size (in words)
-    BOOST_CHECK(buffer.size() == expectedSize);
+    CHECK(buffer.size() == expectedSize);
 
     // The first word of the byte must be the length of the string including the null-terminator
-    BOOST_CHECK(buffer[0] == testString.size() + 1);
+    CHECK(buffer[0] == testString.size() + 1);
 
     // The contents of the buffer must match the test string
-    BOOST_CHECK(std::memcmp(testString.data(), buffer.data() + 1, testString.size()) == 0);
+    CHECK(std::memcmp(testString.data(), buffer.data() + 1, testString.size()) == 0);
 
     // The buffer must include the null-terminator at the end of the string
     size_t nullTerminatorIndex = sizeof(uint32_t) + testString.size();
-    BOOST_CHECK(reinterpret_cast<unsigned char*>(buffer.data())[nullTerminatorIndex] == '\0');
+    CHECK(reinterpret_cast<unsigned char*>(buffer.data())[nullTerminatorIndex] == '\0');
 }
 
-BOOST_AUTO_TEST_CASE(StringToSwTraceStringTest)
+TEST_CASE("StringToSwTraceStringTest")
 {
     std::vector<uint32_t> buffer;
 
@@ -2247,15 +2249,15 @@
     StringToSwTraceStringTestHelper<arm::pipe::SwTraceCharPolicy>("valid|\\~string#123", buffer, 6);
 
     // Invalid SWTrace strings
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>("€£", buffer));
-    BOOST_CHECK(buffer.empty());
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>("invalid‡string", buffer));
-    BOOST_CHECK(buffer.empty());
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>("12Ž34", buffer));
-    BOOST_CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>("€£", buffer));
+    CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>("invalid‡string", buffer));
+    CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>("12Ž34", buffer));
+    CHECK(buffer.empty());
 }
 
-BOOST_AUTO_TEST_CASE(StringToSwTraceNameStringTest)
+TEST_CASE("StringToSwTraceNameStringTest")
 {
     std::vector<uint32_t> buffer;
 
@@ -2267,23 +2269,23 @@
     StringToSwTraceStringTestHelper<arm::pipe::SwTraceNameCharPolicy>("VALID_string_456", buffer, 6);
 
     // Invalid SWTrace namestrings
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>(" ", buffer));
-    BOOST_CHECK(buffer.empty());
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid string", buffer));
-    BOOST_CHECK(buffer.empty());
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("!$%", buffer));
-    BOOST_CHECK(buffer.empty());
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid|\\~string#123", buffer));
-    BOOST_CHECK(buffer.empty());
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("€£", buffer));
-    BOOST_CHECK(buffer.empty());
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid‡string", buffer));
-    BOOST_CHECK(buffer.empty());
-    BOOST_CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("12Ž34", buffer));
-    BOOST_CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>(" ", buffer));
+    CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid string", buffer));
+    CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("!$%", buffer));
+    CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid|\\~string#123", buffer));
+    CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("€£", buffer));
+    CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("invalid‡string", buffer));
+    CHECK(buffer.empty());
+    CHECK(!arm::pipe::StringToSwTraceString<arm::pipe::SwTraceNameCharPolicy>("12Ž34", buffer));
+    CHECK(buffer.empty());
 }
 
-BOOST_AUTO_TEST_CASE(CheckPeriodicCounterCaptureThread)
+TEST_CASE("CheckPeriodicCounterCaptureThread")
 {
     class CaptureReader : public IReadCounterValues
     {
@@ -2312,7 +2314,7 @@
         {
             if (counterUid > m_CounterSize)
             {
-                BOOST_FAIL("Invalid counter Uid");
+                FAIL("Invalid counter Uid");
             }
             return m_Data.at(counterUid).load();
         }
@@ -2321,7 +2323,7 @@
         {
             if (counterUid > m_CounterSize)
             {
-                BOOST_FAIL("Invalid counter Uid");
+                FAIL("Invalid counter Uid");
             }
             return m_Data.at(counterUid).load();
         }
@@ -2330,7 +2332,7 @@
         {
             if (counterUid > m_CounterSize)
             {
-                BOOST_FAIL("Invalid counter Uid");
+                FAIL("Invalid counter Uid");
             }
             m_Data.at(counterUid).store(value);
         }
@@ -2378,29 +2380,29 @@
     uint32_t headerWord0 = ReadUint32(buffer, 0);
     uint32_t headerWord1 = ReadUint32(buffer, 4);
 
-    BOOST_TEST(((headerWord0 >> 26) & 0x0000003F) == 3);    // packet family
-    BOOST_TEST(((headerWord0 >> 19) & 0x0000007F) == 0);    // packet class
-    BOOST_TEST(((headerWord0 >> 16) & 0x00000007) == 0);    // packet type
-    BOOST_TEST(headerWord1 == 20);
+    CHECK(((headerWord0 >> 26) & 0x0000003F) == 3);    // packet family
+    CHECK(((headerWord0 >> 19) & 0x0000007F) == 0);    // packet class
+    CHECK(((headerWord0 >> 16) & 0x00000007) == 0);    // packet type
+    CHECK(headerWord1 == 20);
 
     uint32_t offset    = 16;
     uint16_t readIndex = ReadUint16(buffer, offset);
-    BOOST_TEST(0 == readIndex);
+    CHECK(0 == readIndex);
 
     offset += 2;
     uint32_t readValue = ReadUint32(buffer, offset);
-    BOOST_TEST((valueA * numSteps) == readValue);
+    CHECK((valueA * numSteps) == readValue);
 
     offset += 4;
     readIndex = ReadUint16(buffer, offset);
-    BOOST_TEST(1 == readIndex);
+    CHECK(1 == readIndex);
 
     offset += 2;
     readValue = ReadUint32(buffer, offset);
-    BOOST_TEST((valueB * numSteps) == readValue);
+    CHECK((valueB * numSteps) == readValue);
 }
 
-BOOST_AUTO_TEST_CASE(RequestCounterDirectoryCommandHandlerTest1)
+TEST_CASE("RequestCounterDirectoryCommandHandlerTest1")
 {
     const uint32_t familyId = 0;
     const uint32_t packetId = 3;
@@ -2421,19 +2423,19 @@
     arm::pipe::Packet wrongPacket(wrongHeader);
 
     profilingStateMachine.TransitionToState(ProfilingState::Uninitialised);
-    BOOST_CHECK_THROW(commandHandler(wrongPacket), armnn::RuntimeException); // Wrong profiling state
+    CHECK_THROWS_AS(commandHandler(wrongPacket), armnn::RuntimeException); // Wrong profiling state
     profilingStateMachine.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK_THROW(commandHandler(wrongPacket), armnn::RuntimeException); // Wrong profiling state
+    CHECK_THROWS_AS(commandHandler(wrongPacket), armnn::RuntimeException); // Wrong profiling state
     profilingStateMachine.TransitionToState(ProfilingState::WaitingForAck);
-    BOOST_CHECK_THROW(commandHandler(wrongPacket), armnn::RuntimeException); // Wrong profiling state
+    CHECK_THROWS_AS(commandHandler(wrongPacket), armnn::RuntimeException); // Wrong profiling state
     profilingStateMachine.TransitionToState(ProfilingState::Active);
-    BOOST_CHECK_THROW(commandHandler(wrongPacket), armnn::InvalidArgumentException); // Wrong packet
+    CHECK_THROWS_AS(commandHandler(wrongPacket), armnn::InvalidArgumentException); // Wrong packet
 
     const uint32_t rightHeader = (packetId & 0x000003FF) << 16;
 
     arm::pipe::Packet rightPacket(rightHeader);
 
-    BOOST_CHECK_NO_THROW(commandHandler(rightPacket)); // Right packet
+    CHECK_NOTHROW(commandHandler(rightPacket)); // Right packet
 
     auto readBuffer1 = mockBuffer1.GetReadableBuffer();
 
@@ -2441,13 +2443,13 @@
     uint32_t header1Word1 = ReadUint32(readBuffer1, 4);
 
     // Counter directory packet
-    BOOST_TEST(((header1Word0 >> 26) & 0x0000003F) == 0); // packet family
-    BOOST_TEST(((header1Word0 >> 16) & 0x000003FF) == 2); // packet id
-    BOOST_TEST(header1Word1 == 24);                       // data length
+    CHECK(((header1Word0 >> 26) & 0x0000003F) == 0); // packet family
+    CHECK(((header1Word0 >> 16) & 0x000003FF) == 2); // packet id
+    CHECK(header1Word1 == 24);                       // data length
 
     uint32_t bodyHeader1Word0   = ReadUint32(readBuffer1, 8);
     uint16_t deviceRecordCount = armnn::numeric_cast<uint16_t>(bodyHeader1Word0 >> 16);
-    BOOST_TEST(deviceRecordCount == 0); // device_records_count
+    CHECK(deviceRecordCount == 0); // device_records_count
 
     auto readBuffer2 = mockBuffer2.GetReadableBuffer();
 
@@ -2455,12 +2457,12 @@
     uint32_t header2Word1 = ReadUint32(readBuffer2, 4);
 
     // Timeline message directory packet
-    BOOST_TEST(((header2Word0 >> 26) & 0x0000003F) == 1); // packet family
-    BOOST_TEST(((header2Word0 >> 16) & 0x000003FF) == 0); // packet id
-    BOOST_TEST(header2Word1 == 443);                      // data length
+    CHECK(((header2Word0 >> 26) & 0x0000003F) == 1); // packet family
+    CHECK(((header2Word0 >> 16) & 0x000003FF) == 0); // packet id
+    CHECK(header2Word1 == 443);                      // data length
 }
 
-BOOST_AUTO_TEST_CASE(RequestCounterDirectoryCommandHandlerTest2)
+TEST_CASE("RequestCounterDirectoryCommandHandlerTest2")
 {
     const uint32_t familyId = 0;
     const uint32_t packetId = 3;
@@ -2478,9 +2480,9 @@
     const arm::pipe::Packet packet(header);
 
     const Device* device = counterDirectory.RegisterDevice("deviceA", 1);
-    BOOST_CHECK(device != nullptr);
+    CHECK(device != nullptr);
     const CounterSet* counterSet = counterDirectory.RegisterCounterSet("countersetA");
-    BOOST_CHECK(counterSet != nullptr);
+    CHECK(counterSet != nullptr);
     counterDirectory.RegisterCategory("categoryA");
     counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID, 24,
                                      "categoryA", 0, 1, 2.0f, "counterA", "descA");
@@ -2488,22 +2490,22 @@
                                      "categoryA", 1, 1, 3.0f, "counterB", "descB");
 
     profilingStateMachine.TransitionToState(ProfilingState::Uninitialised);
-    BOOST_CHECK_THROW(commandHandler(packet), armnn::RuntimeException);    // Wrong profiling state
+    CHECK_THROWS_AS(commandHandler(packet), armnn::RuntimeException);    // Wrong profiling state
     profilingStateMachine.TransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK_THROW(commandHandler(packet), armnn::RuntimeException);    // Wrong profiling state
+    CHECK_THROWS_AS(commandHandler(packet), armnn::RuntimeException);    // Wrong profiling state
     profilingStateMachine.TransitionToState(ProfilingState::WaitingForAck);
-    BOOST_CHECK_THROW(commandHandler(packet), armnn::RuntimeException);    // Wrong profiling state
+    CHECK_THROWS_AS(commandHandler(packet), armnn::RuntimeException);    // Wrong profiling state
     profilingStateMachine.TransitionToState(ProfilingState::Active);
-    BOOST_CHECK_NO_THROW(commandHandler(packet));
+    CHECK_NOTHROW(commandHandler(packet));
 
     auto readBuffer1 = mockBuffer1.GetReadableBuffer();
 
     const uint32_t header1Word0 = ReadUint32(readBuffer1, 0);
     const uint32_t header1Word1 = ReadUint32(readBuffer1, 4);
 
-    BOOST_TEST(((header1Word0 >> 26) & 0x0000003F) == 0); // packet family
-    BOOST_TEST(((header1Word0 >> 16) & 0x000003FF) == 2); // packet id
-    BOOST_TEST(header1Word1 == 236);                      // data length
+    CHECK(((header1Word0 >> 26) & 0x0000003F) == 0); // packet family
+    CHECK(((header1Word0 >> 16) & 0x000003FF) == 2); // packet id
+    CHECK(header1Word1 == 236);                      // data length
 
     const uint32_t bodyHeaderSizeBytes = bodyHeaderSize * sizeof(uint32_t);
 
@@ -2516,21 +2518,21 @@
     const uint16_t deviceRecordCount     = armnn::numeric_cast<uint16_t>(bodyHeader1Word0 >> 16);
     const uint16_t counterSetRecordCount = armnn::numeric_cast<uint16_t>(bodyHeader1Word2 >> 16);
     const uint16_t categoryRecordCount   = armnn::numeric_cast<uint16_t>(bodyHeader1Word4 >> 16);
-    BOOST_TEST(deviceRecordCount == 1);                      // device_records_count
-    BOOST_TEST(bodyHeader1Word1 == 0 + bodyHeaderSizeBytes);      // device_records_pointer_table_offset
-    BOOST_TEST(counterSetRecordCount == 1);                  // counter_set_count
-    BOOST_TEST(bodyHeader1Word3 == 4 + bodyHeaderSizeBytes);      // counter_set_pointer_table_offset
-    BOOST_TEST(categoryRecordCount == 1);                    // categories_count
-    BOOST_TEST(bodyHeader1Word5 == 8 + bodyHeaderSizeBytes);      // categories_pointer_table_offset
+    CHECK(deviceRecordCount == 1);                      // device_records_count
+    CHECK(bodyHeader1Word1 == 0 + bodyHeaderSizeBytes);      // device_records_pointer_table_offset
+    CHECK(counterSetRecordCount == 1);                  // counter_set_count
+    CHECK(bodyHeader1Word3 == 4 + bodyHeaderSizeBytes);      // counter_set_pointer_table_offset
+    CHECK(categoryRecordCount == 1);                    // categories_count
+    CHECK(bodyHeader1Word5 == 8 + bodyHeaderSizeBytes);      // categories_pointer_table_offset
 
     const uint32_t deviceRecordOffset = ReadUint32(readBuffer1, 32);
-    BOOST_TEST(deviceRecordOffset == 12);
+    CHECK(deviceRecordOffset == 12);
 
     const uint32_t counterSetRecordOffset = ReadUint32(readBuffer1, 36);
-    BOOST_TEST(counterSetRecordOffset == 28);
+    CHECK(counterSetRecordOffset == 28);
 
     const uint32_t categoryRecordOffset = ReadUint32(readBuffer1, 40);
-    BOOST_TEST(categoryRecordOffset == 48);
+    CHECK(categoryRecordOffset == 48);
 
     auto readBuffer2 = mockBuffer2.GetReadableBuffer();
 
@@ -2538,12 +2540,12 @@
     const uint32_t header2Word1 = ReadUint32(readBuffer2, 4);
 
     // Timeline message directory packet
-    BOOST_TEST(((header2Word0 >> 26) & 0x0000003F) == 1); // packet family
-    BOOST_TEST(((header2Word0 >> 16) & 0x000003FF) == 0); // packet id
-    BOOST_TEST(header2Word1 == 443);                      // data length
+    CHECK(((header2Word0 >> 26) & 0x0000003F) == 1); // packet family
+    CHECK(((header2Word0 >> 16) & 0x000003FF) == 0); // packet id
+    CHECK(header2Word1 == 443);                      // data length
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceGoodConnectionAcknowledgedPacket)
+TEST_CASE("CheckProfilingServiceGoodConnectionAcknowledgedPacket")
 {
     unsigned int streamMetadataPacketsize = GetStreamMetaDataPacketSize();
 
@@ -2557,23 +2559,23 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "WaitingForAck" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Remove the packets received so far
     mockProfilingConnection->Clear();
 
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
     profilingService.Update();    // Start the command handler and the send thread
 
     // Wait for the Stream Metadata packet to be sent
-    BOOST_CHECK(helper.WaitForPacketsSent(
+    CHECK(helper.WaitForPacketsSent(
             mockProfilingConnection, PacketType::StreamMetaData, streamMetadataPacketsize) >= 1);
 
     // Write a valid "Connection Acknowledged" packet into the mock profiling connection, to simulate a valid
@@ -2595,17 +2597,17 @@
     mockProfilingConnection->WritePacket(std::move(connectionAcknowledgedPacket));
 
     // Wait for the counter directory packet to ensure the ConnectionAcknowledgedCommandHandler has run.
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::CounterDirectory) == 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::CounterDirectory) == 1);
 
     // The Connection Acknowledged Command Handler should have updated the profiling state accordingly
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceGoodRequestCounterDirectoryPacket)
+TEST_CASE("CheckProfilingServiceGoodRequestCounterDirectoryPacket")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
@@ -2617,20 +2619,20 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "Active" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
     profilingService.Update();    // Start the command handler and the send thread
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Force the profiling service to the "Active" state
     helper.ForceTransitionToState(ProfilingState::Active);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Write a valid "Request Counter Directory" packet into the mock profiling connection, to simulate a valid
     // reply from an external profiling service
@@ -2652,18 +2654,18 @@
 
     // Expecting one CounterDirectory Packet of length 652
     // and one TimelineMessageDirectory packet of length 451
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::CounterDirectory, 652) == 1);
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::TimelineMessageDirectory, 451) == 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::CounterDirectory, 652) == 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::TimelineMessageDirectory, 451) == 1);
 
     // The Request Counter Directory Command Handler should not have updated the profiling state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceBadPeriodicCounterSelectionPacketInvalidCounterUid)
+TEST_CASE("CheckProfilingServiceBadPeriodicCounterSelectionPacketInvalidCounterUid")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
@@ -2675,20 +2677,20 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "Active" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
     profilingService.Update();    // Start the command handler and the send thread
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Force the profiling service to the "Active" state
     helper.ForceTransitionToState(ProfilingState::Active);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Remove the packets received so far
     mockProfilingConnection->Clear();
@@ -2710,7 +2712,7 @@
     // Get the first valid counter UID
     const ICounterDirectory& counterDirectory = profilingService.GetCounterDirectory();
     const Counters& counters                  = counterDirectory.GetCounters();
-    BOOST_CHECK(counters.size() > 1);
+    CHECK(counters.size() > 1);
     uint16_t counterUidA = counters.begin()->first;    // First valid counter UID
     uint16_t counterUidB = 9999;                       // Second invalid counter UID
 
@@ -2731,18 +2733,18 @@
 
     // Expecting one Periodic Counter Selection packet of length 14
     // and at least one Periodic Counter Capture packet of length 22
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterSelection, 14) == 1);
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterCapture, 22) >= 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterSelection, 14) == 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterCapture, 22) >= 1);
 
     // The Periodic Counter Selection Handler should not have updated the profiling state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceGoodPeriodicCounterSelectionPacketNoCounters)
+TEST_CASE("CheckProfilingServiceGoodPeriodicCounterSelectionPacketNoCounters")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
@@ -2754,16 +2756,16 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "Active" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
     profilingService.Update();    // Start the command handler and the send thread
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Wait for the Stream Metadata packet the be sent
     // (we are not testing the connection acknowledgement here so it will be ignored by this test)
@@ -2771,7 +2773,7 @@
 
     // Force the profiling service to the "Active" state
     helper.ForceTransitionToState(ProfilingState::Active);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Write a "Periodic Counter Selection" packet into the mock profiling connection, to simulate an input from an
     // external profiling service
@@ -2794,20 +2796,20 @@
 
     // Wait for the Periodic Counter Selection packet of length 12 to be sent
     // The size of the expected Periodic Counter Selection (echos the sent one)
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterSelection, 12) == 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterSelection, 12) == 1);
 
     // The Periodic Counter Selection Handler should not have updated the profiling state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // No Periodic Counter packets are expected
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterCapture, 0, 0) == 0);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterCapture, 0, 0) == 0);
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceGoodPeriodicCounterSelectionPacketSingleCounter)
+TEST_CASE("CheckProfilingServiceGoodPeriodicCounterSelectionPacketSingleCounter")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
@@ -2819,16 +2821,16 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "Active" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
     profilingService.Update();    // Start the command handler and the send thread
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Wait for the Stream Metadata packet to be sent
     // (we are not testing the connection acknowledgement here so it will be ignored by this test)
@@ -2836,7 +2838,7 @@
 
     // Force the profiling service to the "Active" state
     helper.ForceTransitionToState(ProfilingState::Active);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Write a "Periodic Counter Selection" packet into the mock profiling connection, to simulate an input from an
     // external profiling service
@@ -2855,7 +2857,7 @@
     // Get the first valid counter UID
     const ICounterDirectory& counterDirectory = profilingService.GetCounterDirectory();
     const Counters& counters                  = counterDirectory.GetCounters();
-    BOOST_CHECK(!counters.empty());
+    CHECK(!counters.empty());
     uint16_t counterUid = counters.begin()->first;    // Valid counter UID
 
     uint32_t length = 6;
@@ -2873,18 +2875,18 @@
 
     // Expecting one Periodic Counter Selection packet of length 14
     // and at least one Periodic Counter Capture packet of length 22
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterSelection, 14) == 1);
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterCapture, 22) >= 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterSelection, 14) == 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterCapture, 22) >= 1);
 
     // The Periodic Counter Selection Handler should not have updated the profiling state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceGoodPeriodicCounterSelectionPacketMultipleCounters)
+TEST_CASE("CheckProfilingServiceGoodPeriodicCounterSelectionPacketMultipleCounters")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
@@ -2896,16 +2898,16 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "Active" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
     profilingService.Update();    // Start the command handler and the send thread
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Wait for the Stream Metadata packet the be sent
     // (we are not testing the connection acknowledgement here so it will be ignored by this test)
@@ -2913,7 +2915,7 @@
 
     // Force the profiling service to the "Active" state
     helper.ForceTransitionToState(ProfilingState::Active);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Write a "Periodic Counter Selection" packet into the mock profiling connection, to simulate an input from an
     // external profiling service
@@ -2932,7 +2934,7 @@
     // Get the first valid counter UID
     const ICounterDirectory& counterDirectory = profilingService.GetCounterDirectory();
     const Counters& counters                  = counterDirectory.GetCounters();
-    BOOST_CHECK(counters.size() > 1);
+    CHECK(counters.size() > 1);
     uint16_t counterUidA = counters.begin()->first;        // First valid counter UID
     uint16_t counterUidB = (counters.begin()++)->first;    // Second valid counter UID
 
@@ -2952,18 +2954,18 @@
 
     // Expecting one PeriodicCounterSelection Packet with a length of 16
     // And at least one PeriodicCounterCapture Packet with a length of 28
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterSelection, 16) == 1);
-    BOOST_CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterCapture, 28) >= 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterSelection, 16) == 1);
+    CHECK(helper.WaitForPacketsSent(mockProfilingConnection, PacketType::PeriodicCounterCapture, 28) >= 1);
 
     // The Periodic Counter Selection Handler should not have updated the profiling state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceDisconnect)
+TEST_CASE("CheckProfilingServiceDisconnect")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
@@ -2975,28 +2977,28 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Try to disconnect the profiling service while in the "Uninitialised" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Disconnect();
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);    // The state should not change
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);    // The state should not change
 
     // Try to disconnect the profiling service while in the "NotConnected" state
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Disconnect();
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);    // The state should not change
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);    // The state should not change
 
     // Try to disconnect the profiling service while in the "WaitingForAck" state
     profilingService.Update();    // Create the profiling connection
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
     profilingService.Disconnect();
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);    // The state should not change
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);    // The state should not change
 
     // Try to disconnect the profiling service while in the "Active" state
     profilingService.Update();    // Start the command handler and the send thread
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Wait for the Stream Metadata packet the be sent
     // (we are not testing the connection acknowledgement here so it will be ignored by this test)
@@ -3004,24 +3006,24 @@
 
     // Force the profiling service to the "Active" state
     helper.ForceTransitionToState(ProfilingState::Active);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Check that the profiling connection is open
-    BOOST_CHECK(mockProfilingConnection->IsOpen());
+    CHECK(mockProfilingConnection->IsOpen());
 
     profilingService.Disconnect();
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);   // The state should have changed
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);   // The state should have changed
 
     // Check that the profiling connection has been reset
     mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection == nullptr);
+    CHECK(mockProfilingConnection == nullptr);
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceGoodPerJobCounterSelectionPacket)
+TEST_CASE("CheckProfilingServiceGoodPerJobCounterSelectionPacket")
 {
     // Reset the profiling service to the uninitialized state
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
@@ -3033,16 +3035,16 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "Active" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
     profilingService.Update();    // Start the command handler and the send thread
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Wait for the Stream Metadata packet the be sent
     // (we are not testing the connection acknowledgement here so it will be ignored by this test)
@@ -3050,7 +3052,7 @@
 
     // Force the profiling service to the "Active" state
     helper.ForceTransitionToState(ProfilingState::Active);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // Write a "Per-Job Counter Selection" packet into the mock profiling connection, to simulate an input from an
     // external profiling service
@@ -3076,47 +3078,47 @@
     std::this_thread::sleep_for(std::chrono::milliseconds(5));
 
     // The Per-Job Counter Selection Command Handler should not have updated the profiling state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Active);
 
     // The Per-Job Counter Selection packets are dropped silently, so there should be no reply coming
     // from the profiling service
     const auto StreamMetaDataSize = static_cast<unsigned long>(
             helper.WaitForPacketsSent(mockProfilingConnection, PacketType::StreamMetaData, 0, 0));
-    BOOST_CHECK(StreamMetaDataSize == mockProfilingConnection->GetWrittenDataSize());
+    CHECK(StreamMetaDataSize == mockProfilingConnection->GetWrittenDataSize());
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckConfigureProfilingServiceOn)
+TEST_CASE("CheckConfigureProfilingServiceOn")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     options.m_EnableProfiling          = true;
     armnn::profiling::ProfilingService profilingService;
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.ConfigureProfilingService(options);
     // should get as far as NOT_CONNECTED
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckConfigureProfilingServiceOff)
+TEST_CASE("CheckConfigureProfilingServiceOff")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     armnn::profiling::ProfilingService profilingService;
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.ConfigureProfilingService(options);
     // should not move from Uninitialised
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceEnabled)
+TEST_CASE("CheckProfilingServiceEnabled")
 {
     // Locally reduce log level to "Warning", as this test needs to parse a warning message from the standard output
     LogLevelSwapper logLevelSwapper(armnn::LogSeverity::Warning);
@@ -3124,9 +3126,9 @@
     options.m_EnableProfiling          = true;
     armnn::profiling::ProfilingService profilingService;
     profilingService.ResetExternalProfilingOptions(options, true);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
 
     // Redirect the output to a local stream so that we can parse the warning message
     std::stringstream ss;
@@ -3143,25 +3145,25 @@
     if (ss.str().find("Cannot connect to stream socket: Connection refused") == std::string::npos)
     {
         std::cout << ss.str();
-        BOOST_FAIL("Expected string not found.");
+        FAIL("Expected string not found.");
     }
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceEnabledRuntime)
+TEST_CASE("CheckProfilingServiceEnabledRuntime")
 {
     // Locally reduce log level to "Warning", as this test needs to parse a warning message from the standard output
     LogLevelSwapper logLevelSwapper(armnn::LogSeverity::Warning);
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     armnn::profiling::ProfilingService profilingService;
     profilingService.ResetExternalProfilingOptions(options, true);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     options.m_EnableProfiling = true;
     profilingService.ResetExternalProfilingOptions(options);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
 
     // Redirect the output to a local stream so that we can parse the warning message
     std::stringstream ss;
@@ -3178,11 +3180,11 @@
     if (ss.str().find("Cannot connect to stream socket: Connection refused") == std::string::npos)
     {
         std::cout << ss.str();
-        BOOST_FAIL("Expected string not found.");
+        FAIL("Expected string not found.");
     }
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceBadConnectionAcknowledgedPacket)
+TEST_CASE("CheckProfilingServiceBadConnectionAcknowledgedPacket")
 {
     // Locally reduce log level to "Warning", as this test needs to parse a warning message from the standard output
     LogLevelSwapper logLevelSwapper(armnn::LogSeverity::Warning);
@@ -3202,16 +3204,16 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "WaitingForAck" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
 
     // Connection Acknowledged Packet header (word 0, word 1 is always zero):
     // 26:31 [6]  packet_family: Control Packet Family, value 0b000000
@@ -3241,11 +3243,11 @@
     if (ss.str().find("Functor with requested PacketId=37 and Version=4194304 does not exist") == std::string::npos)
     {
         std::cout << ss.str();
-        BOOST_FAIL("Expected string not found.");
+        FAIL("Expected string not found.");
     }
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceBadRequestCounterDirectoryPacket)
+TEST_CASE("CheckProfilingServiceBadRequestCounterDirectoryPacket")
 {
     // Locally reduce log level to "Warning", as this test needs to parse a warning message from the standard output
     LogLevelSwapper logLevelSwapper(armnn::LogSeverity::Warning);
@@ -3264,15 +3266,15 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "Active" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     helper.ForceTransitionToState(ProfilingState::NotConnected);
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Write a valid "Request Counter Directory" packet into the mock profiling connection, to simulate a valid
     // reply from an external profiling service
@@ -3305,11 +3307,11 @@
     if (ss.str().find("Functor with requested PacketId=123 and Version=4194304 does not exist") == std::string::npos)
     {
         std::cout << ss.str();
-        BOOST_FAIL("Expected string not found.");
+        FAIL("Expected string not found.");
     }
 }
 
-BOOST_AUTO_TEST_CASE(CheckProfilingServiceBadPeriodicCounterSelectionPacket)
+TEST_CASE("CheckProfilingServiceBadPeriodicCounterSelectionPacket")
 {
     // Locally reduce log level to "Warning", as this test needs to parse a warning message from the standard output
     LogLevelSwapper logLevelSwapper(armnn::LogSeverity::Warning);
@@ -3328,16 +3330,16 @@
     SwapProfilingConnectionFactoryHelper helper(profilingService);
 
     // Bring the profiling service to the "Active" state
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::Uninitialised);
     profilingService.Update();    // Initialize the counter directory
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::NotConnected);
     profilingService.Update();    // Create the profiling connection
-    BOOST_CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
+    CHECK(profilingService.GetCurrentState() == ProfilingState::WaitingForAck);
     profilingService.Update();    // Start the command handler and the send thread
 
     // Get the mock profiling connection
     MockProfilingConnection* mockProfilingConnection = helper.GetMockProfilingConnection();
-    BOOST_CHECK(mockProfilingConnection);
+    CHECK(mockProfilingConnection);
 
     // Write a "Periodic Counter Selection" packet into the mock profiling connection, to simulate an input from an
     // external profiling service
@@ -3370,15 +3372,15 @@
     if (ss.str().find("Functor with requested PacketId=999 and Version=4194304 does not exist") == std::string::npos)
     {
         std::cout << ss.str();
-        BOOST_FAIL("Expected string not found.");
+        FAIL("Expected string not found.");
     }
 }
 
-BOOST_AUTO_TEST_CASE(CheckCounterIdMap)
+TEST_CASE("CheckCounterIdMap")
 {
     CounterIdMap counterIdMap;
-    BOOST_CHECK_THROW(counterIdMap.GetBackendId(0), armnn::Exception);
-    BOOST_CHECK_THROW(counterIdMap.GetGlobalId(0, armnn::profiling::BACKEND_ID), armnn::Exception);
+    CHECK_THROWS_AS(counterIdMap.GetBackendId(0), armnn::Exception);
+    CHECK_THROWS_AS(counterIdMap.GetGlobalId(0, armnn::profiling::BACKEND_ID), armnn::Exception);
 
     uint16_t globalCounterIds = 0;
 
@@ -3399,22 +3401,22 @@
         ++globalCounterIds;
     }
 
-    BOOST_CHECK(counterIdMap.GetBackendId(0) == (std::pair<uint16_t, armnn::BackendId>(0, cpuRefId)));
-    BOOST_CHECK(counterIdMap.GetBackendId(1) == (std::pair<uint16_t, armnn::BackendId>(1, cpuRefId)));
-    BOOST_CHECK(counterIdMap.GetBackendId(2) == (std::pair<uint16_t, armnn::BackendId>(2, cpuRefId)));
-    BOOST_CHECK(counterIdMap.GetBackendId(3) == (std::pair<uint16_t, armnn::BackendId>(3, cpuRefId)));
-    BOOST_CHECK(counterIdMap.GetBackendId(4) == (std::pair<uint16_t, armnn::BackendId>(0, cpuAccId)));
-    BOOST_CHECK(counterIdMap.GetBackendId(5) == (std::pair<uint16_t, armnn::BackendId>(1, cpuAccId)));
+    CHECK(counterIdMap.GetBackendId(0) == (std::pair<uint16_t, armnn::BackendId>(0, cpuRefId)));
+    CHECK(counterIdMap.GetBackendId(1) == (std::pair<uint16_t, armnn::BackendId>(1, cpuRefId)));
+    CHECK(counterIdMap.GetBackendId(2) == (std::pair<uint16_t, armnn::BackendId>(2, cpuRefId)));
+    CHECK(counterIdMap.GetBackendId(3) == (std::pair<uint16_t, armnn::BackendId>(3, cpuRefId)));
+    CHECK(counterIdMap.GetBackendId(4) == (std::pair<uint16_t, armnn::BackendId>(0, cpuAccId)));
+    CHECK(counterIdMap.GetBackendId(5) == (std::pair<uint16_t, armnn::BackendId>(1, cpuAccId)));
 
-    BOOST_CHECK(counterIdMap.GetGlobalId(0, cpuRefId) == 0);
-    BOOST_CHECK(counterIdMap.GetGlobalId(1, cpuRefId) == 1);
-    BOOST_CHECK(counterIdMap.GetGlobalId(2, cpuRefId) == 2);
-    BOOST_CHECK(counterIdMap.GetGlobalId(3, cpuRefId) == 3);
-    BOOST_CHECK(counterIdMap.GetGlobalId(0, cpuAccId) == 4);
-    BOOST_CHECK(counterIdMap.GetGlobalId(1, cpuAccId) == 5);
+    CHECK(counterIdMap.GetGlobalId(0, cpuRefId) == 0);
+    CHECK(counterIdMap.GetGlobalId(1, cpuRefId) == 1);
+    CHECK(counterIdMap.GetGlobalId(2, cpuRefId) == 2);
+    CHECK(counterIdMap.GetGlobalId(3, cpuRefId) == 3);
+    CHECK(counterIdMap.GetGlobalId(0, cpuAccId) == 4);
+    CHECK(counterIdMap.GetGlobalId(1, cpuAccId) == 5);
 }
 
-BOOST_AUTO_TEST_CASE(CheckRegisterBackendCounters)
+TEST_CASE("CheckRegisterBackendCounters")
 {
     uint16_t globalCounterIds = armnn::profiling::INFERENCES_RUN;
     armnn::BackendId cpuRefId(armnn::Compute::CpuRef);
@@ -3429,22 +3431,22 @@
 
 
 
-    BOOST_CHECK(profilingService.GetCounterDirectory().GetCategories().empty());
+    CHECK(profilingService.GetCounterDirectory().GetCategories().empty());
     registerBackendCounters.RegisterCategory("categoryOne");
     auto categoryOnePtr = profilingService.GetCounterDirectory().GetCategory("categoryOne");
-    BOOST_CHECK(categoryOnePtr);
+    CHECK(categoryOnePtr);
 
-    BOOST_CHECK(profilingService.GetCounterDirectory().GetDevices().empty());
+    CHECK(profilingService.GetCounterDirectory().GetDevices().empty());
     globalCounterIds = registerBackendCounters.RegisterDevice("deviceOne");
     auto deviceOnePtr = profilingService.GetCounterDirectory().GetDevice(globalCounterIds);
-    BOOST_CHECK(deviceOnePtr);
-    BOOST_CHECK(deviceOnePtr->m_Name == "deviceOne");
+    CHECK(deviceOnePtr);
+    CHECK(deviceOnePtr->m_Name == "deviceOne");
 
-    BOOST_CHECK(profilingService.GetCounterDirectory().GetCounterSets().empty());
+    CHECK(profilingService.GetCounterDirectory().GetCounterSets().empty());
     globalCounterIds = registerBackendCounters.RegisterCounterSet("counterSetOne");
     auto counterSetOnePtr = profilingService.GetCounterDirectory().GetCounterSet(globalCounterIds);
-    BOOST_CHECK(counterSetOnePtr);
-    BOOST_CHECK(counterSetOnePtr->m_Name == "counterSetOne");
+    CHECK(counterSetOnePtr);
+    CHECK(counterSetOnePtr->m_Name == "counterSetOne");
 
     uint16_t newGlobalCounterId = registerBackendCounters.RegisterCounter(0,
                                                                           "categoryOne",
@@ -3453,19 +3455,19 @@
                                                                           1.f,
                                                                           "CounterOne",
                                                                           "first test counter");
-    BOOST_CHECK(newGlobalCounterId = armnn::profiling::INFERENCES_RUN + 1);
+    CHECK((newGlobalCounterId = armnn::profiling::INFERENCES_RUN + 1));
     uint16_t mappedGlobalId = profilingService.GetCounterMappings().GetGlobalId(0, cpuRefId);
-    BOOST_CHECK(mappedGlobalId == newGlobalCounterId);
+    CHECK(mappedGlobalId == newGlobalCounterId);
     auto backendMapping = profilingService.GetCounterMappings().GetBackendId(newGlobalCounterId);
-    BOOST_CHECK(backendMapping.first == 0);
-    BOOST_CHECK(backendMapping.second == cpuRefId);
+    CHECK(backendMapping.first == 0);
+    CHECK(backendMapping.second == cpuRefId);
 
     // Reset the profiling service to stop any running thread
     options.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckCounterStatusQuery)
+TEST_CASE("CheckCounterStatusQuery")
 {
     armnn::IRuntime::CreationOptions options;
     options.m_ProfilingOptions.m_EnableProfiling = true;
@@ -3487,10 +3489,10 @@
     RegisterBackendCounters registerBackendCountersCpuRef(initialNumGlobalCounterIds, cpuRefId, profilingService);
 
     // Create 'testCategory' in CounterDirectory (backend agnostic)
-    BOOST_CHECK(profilingService.GetCounterDirectory().GetCategories().empty());
+    CHECK(profilingService.GetCounterDirectory().GetCategories().empty());
     registerBackendCountersCpuRef.RegisterCategory("testCategory");
     auto categoryOnePtr = profilingService.GetCounterDirectory().GetCategory("testCategory");
-    BOOST_CHECK(categoryOnePtr);
+    CHECK(categoryOnePtr);
 
     // Counters:
     // Global | Local | Backend
@@ -3504,21 +3506,21 @@
     // Register the backend counters for CpuRef and validate GetGlobalId and GetBackendId
     uint16_t currentNumGlobalCounterIds = registerBackendCountersCpuRef.RegisterCounter(
             0, "testCategory", 0, 0, 1.f, "CpuRefCounter0", "Zeroth CpuRef Counter");
-    BOOST_CHECK(currentNumGlobalCounterIds == initialNumGlobalCounterIds + 1);
+    CHECK(currentNumGlobalCounterIds == initialNumGlobalCounterIds + 1);
     uint16_t mappedGlobalId = profilingService.GetCounterMappings().GetGlobalId(0, cpuRefId);
-    BOOST_CHECK(mappedGlobalId == currentNumGlobalCounterIds);
+    CHECK(mappedGlobalId == currentNumGlobalCounterIds);
     auto backendMapping = profilingService.GetCounterMappings().GetBackendId(currentNumGlobalCounterIds);
-    BOOST_CHECK(backendMapping.first == 0);
-    BOOST_CHECK(backendMapping.second == cpuRefId);
+    CHECK(backendMapping.first == 0);
+    CHECK(backendMapping.second == cpuRefId);
 
     currentNumGlobalCounterIds = registerBackendCountersCpuRef.RegisterCounter(
             1, "testCategory", 0, 0, 1.f, "CpuRefCounter1", "First CpuRef Counter");
-    BOOST_CHECK(currentNumGlobalCounterIds == initialNumGlobalCounterIds + 2);
+    CHECK(currentNumGlobalCounterIds == initialNumGlobalCounterIds + 2);
     mappedGlobalId = profilingService.GetCounterMappings().GetGlobalId(1, cpuRefId);
-    BOOST_CHECK(mappedGlobalId == currentNumGlobalCounterIds);
+    CHECK(mappedGlobalId == currentNumGlobalCounterIds);
     backendMapping = profilingService.GetCounterMappings().GetBackendId(currentNumGlobalCounterIds);
-    BOOST_CHECK(backendMapping.first == 1);
-    BOOST_CHECK(backendMapping.second == cpuRefId);
+    CHECK(backendMapping.first == 1);
+    CHECK(backendMapping.second == cpuRefId);
 
     // Create RegisterBackendCounters for CpuAcc
     RegisterBackendCounters registerBackendCountersCpuAcc(currentNumGlobalCounterIds, cpuAccId, profilingService);
@@ -3526,12 +3528,12 @@
     // Register the backend counter for CpuAcc and validate GetGlobalId and GetBackendId
     currentNumGlobalCounterIds = registerBackendCountersCpuAcc.RegisterCounter(
             0, "testCategory", 0, 0, 1.f, "CpuAccCounter0", "Zeroth CpuAcc Counter");
-    BOOST_CHECK(currentNumGlobalCounterIds == initialNumGlobalCounterIds + 3);
+    CHECK(currentNumGlobalCounterIds == initialNumGlobalCounterIds + 3);
     mappedGlobalId = profilingService.GetCounterMappings().GetGlobalId(0, cpuAccId);
-    BOOST_CHECK(mappedGlobalId == currentNumGlobalCounterIds);
+    CHECK(mappedGlobalId == currentNumGlobalCounterIds);
     backendMapping = profilingService.GetCounterMappings().GetBackendId(currentNumGlobalCounterIds);
-    BOOST_CHECK(backendMapping.first == 0);
-    BOOST_CHECK(backendMapping.second == cpuAccId);
+    CHECK(backendMapping.first == 0);
+    CHECK(backendMapping.second == cpuAccId);
 
     // Create vectors for active counters
     const std::vector<uint16_t> activeGlobalCounterIds = {5}; // CpuRef(0) activated
@@ -3546,28 +3548,28 @@
     // Get vector of active counters for CpuRef and CpuAcc backends
     std::vector<CounterStatus> cpuRefCounterStatus = backendProfilingCpuRef.GetActiveCounters();
     std::vector<CounterStatus> cpuAccCounterStatus = backendProfilingCpuAcc.GetActiveCounters();
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus.size(), 1);
-    BOOST_CHECK_EQUAL(cpuAccCounterStatus.size(), 0);
+    CHECK_EQ(cpuRefCounterStatus.size(), 1);
+    CHECK_EQ(cpuAccCounterStatus.size(), 0);
 
     // Check active CpuRef counter
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus[0].m_GlobalCounterId, activeGlobalCounterIds[0]);
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus[0].m_BackendCounterId, cpuRefCounters[0]);
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus[0].m_SamplingRateInMicroseconds, capturePeriod);
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus[0].m_Enabled, true);
+    CHECK_EQ(cpuRefCounterStatus[0].m_GlobalCounterId, activeGlobalCounterIds[0]);
+    CHECK_EQ(cpuRefCounterStatus[0].m_BackendCounterId, cpuRefCounters[0]);
+    CHECK_EQ(cpuRefCounterStatus[0].m_SamplingRateInMicroseconds, capturePeriod);
+    CHECK_EQ(cpuRefCounterStatus[0].m_Enabled, true);
 
     // Check inactive CpuRef counter
     CounterStatus inactiveCpuRefCounter = backendProfilingCpuRef.GetCounterStatus(cpuRefCounters[1]);
-    BOOST_CHECK_EQUAL(inactiveCpuRefCounter.m_GlobalCounterId, 6);
-    BOOST_CHECK_EQUAL(inactiveCpuRefCounter.m_BackendCounterId, cpuRefCounters[1]);
-    BOOST_CHECK_EQUAL(inactiveCpuRefCounter.m_SamplingRateInMicroseconds, 0);
-    BOOST_CHECK_EQUAL(inactiveCpuRefCounter.m_Enabled, false);
+    CHECK_EQ(inactiveCpuRefCounter.m_GlobalCounterId, 6);
+    CHECK_EQ(inactiveCpuRefCounter.m_BackendCounterId, cpuRefCounters[1]);
+    CHECK_EQ(inactiveCpuRefCounter.m_SamplingRateInMicroseconds, 0);
+    CHECK_EQ(inactiveCpuRefCounter.m_Enabled, false);
 
     // Check inactive CpuAcc counter
     CounterStatus inactiveCpuAccCounter = backendProfilingCpuAcc.GetCounterStatus(cpuAccCounters[0]);
-    BOOST_CHECK_EQUAL(inactiveCpuAccCounter.m_GlobalCounterId, 7);
-    BOOST_CHECK_EQUAL(inactiveCpuAccCounter.m_BackendCounterId, cpuAccCounters[0]);
-    BOOST_CHECK_EQUAL(inactiveCpuAccCounter.m_SamplingRateInMicroseconds, 0);
-    BOOST_CHECK_EQUAL(inactiveCpuAccCounter.m_Enabled, false);
+    CHECK_EQ(inactiveCpuAccCounter.m_GlobalCounterId, 7);
+    CHECK_EQ(inactiveCpuAccCounter.m_BackendCounterId, cpuAccCounters[0]);
+    CHECK_EQ(inactiveCpuAccCounter.m_SamplingRateInMicroseconds, 0);
+    CHECK_EQ(inactiveCpuAccCounter.m_Enabled, false);
 
     // Set new capture period and new active counters in CaptureData
     profilingService.SetCaptureData(newCapturePeriod, newActiveGlobalCounterIds, {});
@@ -3575,34 +3577,34 @@
     // Get vector of active counters for CpuRef and CpuAcc backends
     cpuRefCounterStatus = backendProfilingCpuRef.GetActiveCounters();
     cpuAccCounterStatus = backendProfilingCpuAcc.GetActiveCounters();
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus.size(), 1);
-    BOOST_CHECK_EQUAL(cpuAccCounterStatus.size(), 1);
+    CHECK_EQ(cpuRefCounterStatus.size(), 1);
+    CHECK_EQ(cpuAccCounterStatus.size(), 1);
 
     // Check active CpuRef counter
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus[0].m_GlobalCounterId, newActiveGlobalCounterIds[0]);
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus[0].m_BackendCounterId, cpuRefCounters[1]);
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus[0].m_SamplingRateInMicroseconds, newCapturePeriod);
-    BOOST_CHECK_EQUAL(cpuRefCounterStatus[0].m_Enabled, true);
+    CHECK_EQ(cpuRefCounterStatus[0].m_GlobalCounterId, newActiveGlobalCounterIds[0]);
+    CHECK_EQ(cpuRefCounterStatus[0].m_BackendCounterId, cpuRefCounters[1]);
+    CHECK_EQ(cpuRefCounterStatus[0].m_SamplingRateInMicroseconds, newCapturePeriod);
+    CHECK_EQ(cpuRefCounterStatus[0].m_Enabled, true);
 
     // Check active CpuAcc counter
-    BOOST_CHECK_EQUAL(cpuAccCounterStatus[0].m_GlobalCounterId, newActiveGlobalCounterIds[1]);
-    BOOST_CHECK_EQUAL(cpuAccCounterStatus[0].m_BackendCounterId, cpuAccCounters[0]);
-    BOOST_CHECK_EQUAL(cpuAccCounterStatus[0].m_SamplingRateInMicroseconds, newCapturePeriod);
-    BOOST_CHECK_EQUAL(cpuAccCounterStatus[0].m_Enabled, true);
+    CHECK_EQ(cpuAccCounterStatus[0].m_GlobalCounterId, newActiveGlobalCounterIds[1]);
+    CHECK_EQ(cpuAccCounterStatus[0].m_BackendCounterId, cpuAccCounters[0]);
+    CHECK_EQ(cpuAccCounterStatus[0].m_SamplingRateInMicroseconds, newCapturePeriod);
+    CHECK_EQ(cpuAccCounterStatus[0].m_Enabled, true);
 
     // Check inactive CpuRef counter
     inactiveCpuRefCounter = backendProfilingCpuRef.GetCounterStatus(cpuRefCounters[0]);
-    BOOST_CHECK_EQUAL(inactiveCpuRefCounter.m_GlobalCounterId, 5);
-    BOOST_CHECK_EQUAL(inactiveCpuRefCounter.m_BackendCounterId, cpuRefCounters[0]);
-    BOOST_CHECK_EQUAL(inactiveCpuRefCounter.m_SamplingRateInMicroseconds, 0);
-    BOOST_CHECK_EQUAL(inactiveCpuRefCounter.m_Enabled, false);
+    CHECK_EQ(inactiveCpuRefCounter.m_GlobalCounterId, 5);
+    CHECK_EQ(inactiveCpuRefCounter.m_BackendCounterId, cpuRefCounters[0]);
+    CHECK_EQ(inactiveCpuRefCounter.m_SamplingRateInMicroseconds, 0);
+    CHECK_EQ(inactiveCpuRefCounter.m_Enabled, false);
 
     // Reset the profiling service to stop any running thread
     options.m_ProfilingOptions.m_EnableProfiling = false;
     profilingService.ResetExternalProfilingOptions(options.m_ProfilingOptions, true);
 }
 
-BOOST_AUTO_TEST_CASE(CheckRegisterCounters)
+TEST_CASE("CheckRegisterCounters")
 {
     armnn::IRuntime::CreationOptions options;
     options.m_ProfilingOptions.m_EnableProfiling = true;
@@ -3634,34 +3636,34 @@
     uint32_t headerWord1 = ReadUint32(readBuffer, 4);
     uint64_t readTimestamp = ReadUint64(readBuffer, 8);
 
-    BOOST_TEST(((headerWord0 >> 26) & 0x0000003F) == 3); // packet family
-    BOOST_TEST(((headerWord0 >> 19) & 0x0000007F) == 0); // packet class
-    BOOST_TEST(((headerWord0 >> 16) & 0x00000007) == 0); // packet type
-    BOOST_TEST(headerWord1 == 20);                       // data length
-    BOOST_TEST(1000998 == readTimestamp);                // capture period
+    CHECK(((headerWord0 >> 26) & 0x0000003F) == 3); // packet family
+    CHECK(((headerWord0 >> 19) & 0x0000007F) == 0); // packet class
+    CHECK(((headerWord0 >> 16) & 0x00000007) == 0); // packet type
+    CHECK(headerWord1 == 20);                       // data length
+    CHECK(1000998 == readTimestamp);                // capture period
 
     uint32_t offset = 16;
     // Check Counter Index
     uint16_t readIndex = ReadUint16(readBuffer, offset);
-    BOOST_TEST(6 == readIndex);
+    CHECK(6 == readIndex);
 
     // Check Counter Value
     offset += 2;
     uint32_t readValue = ReadUint32(readBuffer, offset);
-    BOOST_TEST(700 == readValue);
+    CHECK(700 == readValue);
 
     // Check Counter Index
     offset += 4;
     readIndex = ReadUint16(readBuffer, offset);
-    BOOST_TEST(8 == readIndex);
+    CHECK(8 == readIndex);
 
     // Check Counter Value
     offset += 2;
     readValue = ReadUint32(readBuffer, offset);
-    BOOST_TEST(93 == readValue);
+    CHECK(93 == readValue);
 }
 
-BOOST_AUTO_TEST_CASE(CheckFileFormat) {
+TEST_CASE("CheckFileFormat") {
     // Locally reduce log level to "Warning", as this test needs to parse a warning message from the standard output
     LogLevelSwapper logLevelSwapper(armnn::LogSeverity::Warning);
 
@@ -3669,7 +3671,7 @@
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     options.m_EnableProfiling = true;
     // Check the default value set to binary
-    BOOST_CHECK(options.m_FileFormat == "binary");
+    CHECK(options.m_FileFormat == "binary");
 
     // Change file format to an unsupported value
     options.m_FileFormat = "json";
@@ -3678,7 +3680,7 @@
     profilingService.ResetExternalProfilingOptions(options, true);
     // Start the command handler and the send thread
     profilingService.Update();
-    BOOST_CHECK(profilingService.GetCurrentState()==ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState()==ProfilingState::NotConnected);
 
     // Redirect the output to a local stream so that we can parse the warning message
     std::stringstream ss;
@@ -3694,8 +3696,8 @@
     if (ss.str().find("Unsupported profiling file format, only binary is supported") == std::string::npos)
     {
         std::cout << ss.str();
-        BOOST_FAIL("Expected string not found.");
+        FAIL("Expected string not found.");
     }
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/profiling/test/ProfilingTests.hpp b/src/profiling/test/ProfilingTests.hpp
index f96a1c8..a8ca1b9 100644
--- a/src/profiling/test/ProfilingTests.hpp
+++ b/src/profiling/test/ProfilingTests.hpp
@@ -16,7 +16,7 @@
 #include <common/include/CommandHandlerFunctor.hpp>
 
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <chrono>
 #include <thread>
@@ -213,15 +213,15 @@
         , m_BackupProfilingConnectionFactory(nullptr)
 
     {
-        BOOST_CHECK(m_MockProfilingConnectionFactory);
+        CHECK(m_MockProfilingConnectionFactory);
         SwapProfilingConnectionFactory(m_ProfilingService,
                                        m_MockProfilingConnectionFactory.get(),
                                        m_BackupProfilingConnectionFactory);
-        BOOST_CHECK(m_BackupProfilingConnectionFactory);
+        CHECK(m_BackupProfilingConnectionFactory);
     }
     ~SwapProfilingConnectionFactoryHelper()
     {
-        BOOST_CHECK(m_BackupProfilingConnectionFactory);
+        CHECK(m_BackupProfilingConnectionFactory);
         IProfilingConnectionFactory* temp = nullptr;
         SwapProfilingConnectionFactory(m_ProfilingService,
                                        m_BackupProfilingConnectionFactory,
diff --git a/src/profiling/test/SendCounterPacketTests.cpp b/src/profiling/test/SendCounterPacketTests.cpp
index 950f8ff..675d083 100644
--- a/src/profiling/test/SendCounterPacketTests.cpp
+++ b/src/profiling/test/SendCounterPacketTests.cpp
@@ -20,7 +20,9 @@
 
 #include <common/include/Constants.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <armnn/utility/Assert.hpp>
+
+#include <doctest/doctest.h>
 
 #include <chrono>
 
@@ -48,7 +50,7 @@
     case ProfilingState::NotConnected:
         return;
     default:
-        BOOST_CHECK_MESSAGE(false, "Invalid profiling state");
+        CHECK_MESSAGE(false, "Invalid profiling state");
     }
 }
 
@@ -68,7 +70,7 @@
     case ProfilingState::WaitingForAck:
         return;
     default:
-        BOOST_CHECK_MESSAGE(false, "Invalid profiling state");
+        CHECK_MESSAGE(false, "Invalid profiling state");
     }
 }
 
@@ -89,17 +91,17 @@
     case ProfilingState::Active:
         return;
     default:
-        BOOST_CHECK_MESSAGE(false, "Invalid profiling state");
+        CHECK_MESSAGE(false, "Invalid profiling state");
     }
 }
 
 } // Anonymous namespace
 
-BOOST_AUTO_TEST_SUITE(SendCounterPacketTests)
-
+TEST_SUITE("SendCounterPacketTests")
+{
 using PacketType = MockProfilingConnection::PacketType;
 
-BOOST_AUTO_TEST_CASE(MockSendCounterPacketTest)
+TEST_CASE("MockSendCounterPacketTest")
 {
     MockBufferManager mockBuffer(512);
     MockSendCounterPacket mockSendCounterPacket(mockBuffer);
@@ -109,7 +111,7 @@
     auto packetBuffer = mockBuffer.GetReadableBuffer();
     const char* buffer = reinterpret_cast<const char*>(packetBuffer->GetReadableData());
 
-    BOOST_TEST(strcmp(buffer, "SendStreamMetaDataPacket") == 0);
+    CHECK(strcmp(buffer, "SendStreamMetaDataPacket") == 0);
 
     mockBuffer.MarkRead(packetBuffer);
 
@@ -119,7 +121,7 @@
     packetBuffer = mockBuffer.GetReadableBuffer();
     buffer = reinterpret_cast<const char*>(packetBuffer->GetReadableData());
 
-    BOOST_TEST(strcmp(buffer, "SendCounterDirectoryPacket") == 0);
+    CHECK(strcmp(buffer, "SendCounterDirectoryPacket") == 0);
 
     mockBuffer.MarkRead(packetBuffer);
 
@@ -131,7 +133,7 @@
     packetBuffer = mockBuffer.GetReadableBuffer();
     buffer = reinterpret_cast<const char*>(packetBuffer->GetReadableData());
 
-    BOOST_TEST(strcmp(buffer, "SendPeriodicCounterCapturePacket") == 0);
+    CHECK(strcmp(buffer, "SendPeriodicCounterCapturePacket") == 0);
 
     mockBuffer.MarkRead(packetBuffer);
 
@@ -142,12 +144,12 @@
     packetBuffer = mockBuffer.GetReadableBuffer();
     buffer = reinterpret_cast<const char*>(packetBuffer->GetReadableData());
 
-    BOOST_TEST(strcmp(buffer, "SendPeriodicCounterSelectionPacket") == 0);
+    CHECK(strcmp(buffer, "SendPeriodicCounterSelectionPacket") == 0);
 
     mockBuffer.MarkRead(packetBuffer);
 }
 
-BOOST_AUTO_TEST_CASE(SendPeriodicCounterSelectionPacketTest)
+TEST_CASE("SendPeriodicCounterSelectionPacketTest")
 {
     // Error no space left in buffer
     MockBufferManager mockBuffer1(10);
@@ -155,7 +157,7 @@
 
     uint32_t capturePeriod = 1000;
     std::vector<uint16_t> selectedCounterIds;
-    BOOST_CHECK_THROW(sendPacket1.SendPeriodicCounterSelectionPacket(capturePeriod, selectedCounterIds),
+    CHECK_THROWS_AS(sendPacket1.SendPeriodicCounterSelectionPacket(capturePeriod, selectedCounterIds),
                       BufferExhaustion);
 
     // Packet without any counters
@@ -169,10 +171,10 @@
     uint32_t headerWord1 = ReadUint32(readBuffer2, 4);
     uint32_t period = ReadUint32(readBuffer2, 8);
 
-    BOOST_TEST(((headerWord0 >> 26) & 0x3F) == 0);  // packet family
-    BOOST_TEST(((headerWord0 >> 16) & 0x3FF) == 4); // packet id
-    BOOST_TEST(headerWord1 == 4);                   // data lenght
-    BOOST_TEST(period == 1000);                     // capture period
+    CHECK(((headerWord0 >> 26) & 0x3F) == 0);  // packet family
+    CHECK(((headerWord0 >> 16) & 0x3FF) == 4); // packet id
+    CHECK(headerWord1 == 4);                   // data lenght
+    CHECK(period == 1000);                     // capture period
 
     // Full packet message
     MockBufferManager mockBuffer3(512);
@@ -191,10 +193,10 @@
     headerWord1 = ReadUint32(readBuffer3, 4);
     period = ReadUint32(readBuffer3, 8);
 
-    BOOST_TEST(((headerWord0 >> 26) & 0x3F) == 0);  // packet family
-    BOOST_TEST(((headerWord0 >> 16) & 0x3FF) == 4); // packet id
-    BOOST_TEST(headerWord1 == 14);                  // data lenght
-    BOOST_TEST(period == 1000);                     // capture period
+    CHECK(((headerWord0 >> 26) & 0x3F) == 0);  // packet family
+    CHECK(((headerWord0 >> 16) & 0x3FF) == 4); // packet id
+    CHECK(headerWord1 == 14);                  // data lenght
+    CHECK(period == 1000);                     // capture period
 
     uint16_t counterId = 0;
     uint32_t offset = 12;
@@ -203,12 +205,12 @@
     for(const uint16_t& id : selectedCounterIds)
     {
         counterId = ReadUint16(readBuffer3, offset);
-        BOOST_TEST(counterId == id);
+        CHECK(counterId == id);
         offset += 2;
     }
 }
 
-BOOST_AUTO_TEST_CASE(SendPeriodicCounterCapturePacketTest)
+TEST_CASE("SendPeriodicCounterCapturePacketTest")
 {
     ProfilingStateMachine profilingStateMachine;
 
@@ -220,7 +222,7 @@
     uint64_t time =  static_cast<uint64_t >(captureTimestamp.time_since_epoch().count());
     std::vector<CounterValue> indexValuePairs;
 
-    BOOST_CHECK_THROW(sendPacket1.SendPeriodicCounterCapturePacket(time, indexValuePairs),
+    CHECK_THROWS_AS(sendPacket1.SendPeriodicCounterCapturePacket(time, indexValuePairs),
                       BufferExhaustion);
 
     // Packet without any counters
@@ -234,11 +236,11 @@
     uint32_t headerWord1 = ReadUint32(readBuffer2, 4);
     uint64_t readTimestamp = ReadUint64(readBuffer2, 8);
 
-    BOOST_TEST(((headerWord0 >> 26) & 0x0000003F) == 3); // packet family
-    BOOST_TEST(((headerWord0 >> 19) & 0x0000007F) == 0); // packet class
-    BOOST_TEST(((headerWord0 >> 16) & 0x00000007) == 0); // packet type
-    BOOST_TEST(headerWord1 == 8);                        // data length
-    BOOST_TEST(time == readTimestamp);                   // capture period
+    CHECK(((headerWord0 >> 26) & 0x0000003F) == 3); // packet family
+    CHECK(((headerWord0 >> 19) & 0x0000007F) == 0); // packet class
+    CHECK(((headerWord0 >> 16) & 0x00000007) == 0); // packet type
+    CHECK(headerWord1 == 8);                        // data length
+    CHECK(time == readTimestamp);                   // capture period
 
     // Full packet message
     MockBufferManager mockBuffer3(512);
@@ -257,11 +259,11 @@
     headerWord1 = ReadUint32(readBuffer3, 4);
     uint64_t readTimestamp2 = ReadUint64(readBuffer3, 8);
 
-    BOOST_TEST(((headerWord0 >> 26) & 0x0000003F) == 3); // packet family
-    BOOST_TEST(((headerWord0 >> 19) & 0x0000007F) == 0); // packet class
-    BOOST_TEST(((headerWord0 >> 16) & 0x00000007) == 0); // packet type
-    BOOST_TEST(headerWord1 == 38);                       // data length
-    BOOST_TEST(time == readTimestamp2);                  // capture period
+    CHECK(((headerWord0 >> 26) & 0x0000003F) == 3); // packet family
+    CHECK(((headerWord0 >> 19) & 0x0000007F) == 0); // packet class
+    CHECK(((headerWord0 >> 16) & 0x00000007) == 0); // packet type
+    CHECK(headerWord1 == 38);                       // data length
+    CHECK(time == readTimestamp2);                  // capture period
 
     uint16_t counterIndex = 0;
     uint32_t counterValue = 100;
@@ -272,27 +274,27 @@
     {
         // Check Counter Index
         uint16_t readIndex = ReadUint16(readBuffer3, offset);
-        BOOST_TEST(counterIndex == readIndex);
+        CHECK(counterIndex == readIndex);
         counterIndex++;
         offset += 2;
 
         // Check Counter Value
         uint32_t readValue = ReadUint32(readBuffer3, offset);
-        BOOST_TEST(counterValue == readValue);
+        CHECK(counterValue == readValue);
         counterValue += 100;
         offset += 4;
     }
 
 }
 
-BOOST_AUTO_TEST_CASE(SendStreamMetaDataPacketTest)
+TEST_CASE("SendStreamMetaDataPacketTest")
 {
     uint32_t sizeUint32 = armnn::numeric_cast<uint32_t>(sizeof(uint32_t));
 
     // Error no space left in buffer
     MockBufferManager mockBuffer1(10);
     SendCounterPacket sendPacket1(mockBuffer1);
-    BOOST_CHECK_THROW(sendPacket1.SendStreamMetaDataPacket(), armnn::profiling::BufferExhaustion);
+    CHECK_THROWS_AS(sendPacket1.SendStreamMetaDataPacket(), armnn::profiling::BufferExhaustion);
 
     // Full metadata packet
 
@@ -345,8 +347,8 @@
     uint32_t headerWord0 = ReadUint32(readBuffer2, 0);
     uint32_t headerWord1 = ReadUint32(readBuffer2, sizeUint32);
 
-    BOOST_TEST(((headerWord0 >> 26) & 0x3F) == 0); // packet family
-    BOOST_TEST(((headerWord0 >> 16) & 0x3FF) == 0); // packet id
+    CHECK(((headerWord0 >> 26) & 0x3F) == 0); // packet family
+    CHECK(((headerWord0 >> 16) & 0x3FF) == 0); // packet id
 
     uint32_t totalLength = armnn::numeric_cast<uint32_t>(2 * sizeUint32 +
                                                          10 * sizeUint32 + infoSize +
@@ -354,82 +356,82 @@
                                                          processNameSize + sizeUint32 +
                                                          2 * packetEntries * sizeUint32);
 
-    BOOST_TEST(headerWord1 == totalLength - (2 * sizeUint32)); // data length
+    CHECK(headerWord1 == totalLength - (2 * sizeUint32)); // data length
 
     uint32_t offset = sizeUint32 * 2;
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == arm::pipe::PIPE_MAGIC); // pipe_magic
+    CHECK(ReadUint32(readBuffer2, offset) == arm::pipe::PIPE_MAGIC); // pipe_magic
     offset += sizeUint32;
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == arm::pipe::EncodeVersion(1, 0, 0)); // stream_metadata_version
+    CHECK(ReadUint32(readBuffer2, offset) == arm::pipe::EncodeVersion(1, 0, 0)); // stream_metadata_version
     offset += sizeUint32;
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == MAX_METADATA_PACKET_LENGTH); // max_data_len
+    CHECK(ReadUint32(readBuffer2, offset) == MAX_METADATA_PACKET_LENGTH); // max_data_len
     offset += sizeUint32;
     int pid = armnnUtils::Processes::GetCurrentId();
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == armnn::numeric_cast<uint32_t>(pid));
+    CHECK(ReadUint32(readBuffer2, offset) == armnn::numeric_cast<uint32_t>(pid));
     offset += sizeUint32;
     uint32_t poolOffset = 10 * sizeUint32;
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == poolOffset); // offset_info
+    CHECK(ReadUint32(readBuffer2, offset) == poolOffset); // offset_info
     offset += sizeUint32;
     poolOffset += infoSize;
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == poolOffset); // offset_hw_version
+    CHECK(ReadUint32(readBuffer2, offset) == poolOffset); // offset_hw_version
     offset += sizeUint32;
     poolOffset += hardwareVersionSize;
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == poolOffset); // offset_sw_version
+    CHECK(ReadUint32(readBuffer2, offset) == poolOffset); // offset_sw_version
     offset += sizeUint32;
     poolOffset += softwareVersionSize;
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == poolOffset); // offset_process_name
+    CHECK(ReadUint32(readBuffer2, offset) == poolOffset); // offset_process_name
     offset += sizeUint32;
     poolOffset += processNameSize;
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == poolOffset); // offset_packet_version_table
+    CHECK(ReadUint32(readBuffer2, offset) == poolOffset); // offset_packet_version_table
     offset += sizeUint32;
-    BOOST_TEST(ReadUint32(readBuffer2, offset) == 0); // reserved
+    CHECK(ReadUint32(readBuffer2, offset) == 0); // reserved
 
     const unsigned char* readData2 = readBuffer2->GetReadableData();
 
     offset += sizeUint32;
     if (infoSize)
     {
-        BOOST_TEST(strcmp(reinterpret_cast<const char *>(&readData2[offset]), GetSoftwareInfo().c_str()) == 0);
+        CHECK(strcmp(reinterpret_cast<const char *>(&readData2[offset]), GetSoftwareInfo().c_str()) == 0);
         offset += infoSize;
     }
 
     if (hardwareVersionSize)
     {
-        BOOST_TEST(strcmp(reinterpret_cast<const char *>(&readData2[offset]), GetHardwareVersion().c_str()) == 0);
+        CHECK(strcmp(reinterpret_cast<const char *>(&readData2[offset]), GetHardwareVersion().c_str()) == 0);
         offset += hardwareVersionSize;
     }
 
     if (softwareVersionSize)
     {
-        BOOST_TEST(strcmp(reinterpret_cast<const char *>(&readData2[offset]), GetSoftwareVersion().c_str()) == 0);
+        CHECK(strcmp(reinterpret_cast<const char *>(&readData2[offset]), GetSoftwareVersion().c_str()) == 0);
         offset += softwareVersionSize;
     }
 
     if (processNameSize)
     {
-        BOOST_TEST(strcmp(reinterpret_cast<const char *>(&readData2[offset]), GetProcessName().c_str()) == 0);
+        CHECK(strcmp(reinterpret_cast<const char *>(&readData2[offset]), GetProcessName().c_str()) == 0);
         offset += processNameSize;
     }
 
     if (packetEntries)
     {
         uint32_t numberOfEntries = ReadUint32(readBuffer2, offset);
-        BOOST_TEST((numberOfEntries >> 16) == packetEntries);
+        CHECK((numberOfEntries >> 16) == packetEntries);
         offset += sizeUint32;
         for (std::pair<uint32_t, uint32_t>& packetVersion : packetVersions)
         {
             uint32_t readPacketId = ReadUint32(readBuffer2, offset);
-            BOOST_TEST(packetVersion.first == readPacketId);
+            CHECK(packetVersion.first == readPacketId);
             offset += sizeUint32;
             uint32_t readVersion = ReadUint32(readBuffer2, offset);
-            BOOST_TEST(packetVersion.second == readVersion);
+            CHECK(packetVersion.second == readVersion);
             offset += sizeUint32;
         }
     }
 
-    BOOST_TEST(offset == totalLength);
+    CHECK(offset == totalLength);
 }
 
-BOOST_AUTO_TEST_CASE(CreateDeviceRecordTest)
+TEST_CASE("CreateDeviceRecordTest")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -445,23 +447,23 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateDeviceRecordTest(device, deviceRecord, errorMessage);
 
-    BOOST_CHECK(result);
-    BOOST_CHECK(errorMessage.empty());
-    BOOST_CHECK(deviceRecord.size() == 6); // Size in words: header [2] + device name [4]
+    CHECK(result);
+    CHECK(errorMessage.empty());
+    CHECK(deviceRecord.size() == 6); // Size in words: header [2] + device name [4]
 
     uint16_t deviceRecordWord0[]
     {
         static_cast<uint16_t>(deviceRecord[0] >> 16),
         static_cast<uint16_t>(deviceRecord[0])
     };
-    BOOST_CHECK(deviceRecordWord0[0] == deviceUid); // uid
-    BOOST_CHECK(deviceRecordWord0[1] == deviceCores); // cores
-    BOOST_CHECK(deviceRecord[1] == 8); // name_offset
-    BOOST_CHECK(deviceRecord[2] == deviceName.size() + 1); // The length of the SWTrace string (name)
-    BOOST_CHECK(std::memcmp(deviceRecord.data() + 3, deviceName.data(), deviceName.size()) == 0); // name
+    CHECK(deviceRecordWord0[0] == deviceUid); // uid
+    CHECK(deviceRecordWord0[1] == deviceCores); // cores
+    CHECK(deviceRecord[1] == 8); // name_offset
+    CHECK(deviceRecord[2] == deviceName.size() + 1); // The length of the SWTrace string (name)
+    CHECK(std::memcmp(deviceRecord.data() + 3, deviceName.data(), deviceName.size()) == 0); // name
 }
 
-BOOST_AUTO_TEST_CASE(CreateInvalidDeviceRecordTest)
+TEST_CASE("CreateInvalidDeviceRecordTest")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -477,12 +479,12 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateDeviceRecordTest(device, deviceRecord, errorMessage);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK(!errorMessage.empty());
-    BOOST_CHECK(deviceRecord.empty());
+    CHECK(!result);
+    CHECK(!errorMessage.empty());
+    CHECK(deviceRecord.empty());
 }
 
-BOOST_AUTO_TEST_CASE(CreateCounterSetRecordTest)
+TEST_CASE("CreateCounterSetRecordTest")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -498,23 +500,23 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateCounterSetRecordTest(counterSet, counterSetRecord, errorMessage);
 
-    BOOST_CHECK(result);
-    BOOST_CHECK(errorMessage.empty());
-    BOOST_CHECK(counterSetRecord.size() == 8); // Size in words: header [2] + counter set name [6]
+    CHECK(result);
+    CHECK(errorMessage.empty());
+    CHECK(counterSetRecord.size() == 8); // Size in words: header [2] + counter set name [6]
 
     uint16_t counterSetRecordWord0[]
     {
         static_cast<uint16_t>(counterSetRecord[0] >> 16),
         static_cast<uint16_t>(counterSetRecord[0])
     };
-    BOOST_CHECK(counterSetRecordWord0[0] == counterSetUid); // uid
-    BOOST_CHECK(counterSetRecordWord0[1] == counterSetCount); // cores
-    BOOST_CHECK(counterSetRecord[1] == 8); // name_offset
-    BOOST_CHECK(counterSetRecord[2] == counterSetName.size() + 1); // The length of the SWTrace string (name)
-    BOOST_CHECK(std::memcmp(counterSetRecord.data() + 3, counterSetName.data(), counterSetName.size()) == 0); // name
+    CHECK(counterSetRecordWord0[0] == counterSetUid); // uid
+    CHECK(counterSetRecordWord0[1] == counterSetCount); // cores
+    CHECK(counterSetRecord[1] == 8); // name_offset
+    CHECK(counterSetRecord[2] == counterSetName.size() + 1); // The length of the SWTrace string (name)
+    CHECK(std::memcmp(counterSetRecord.data() + 3, counterSetName.data(), counterSetName.size()) == 0); // name
 }
 
-BOOST_AUTO_TEST_CASE(CreateInvalidCounterSetRecordTest)
+TEST_CASE("CreateInvalidCounterSetRecordTest")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -530,12 +532,12 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateCounterSetRecordTest(counterSet, counterSetRecord, errorMessage);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK(!errorMessage.empty());
-    BOOST_CHECK(counterSetRecord.empty());
+    CHECK(!result);
+    CHECK(!errorMessage.empty());
+    CHECK(counterSetRecord.empty());
 }
 
-BOOST_AUTO_TEST_CASE(CreateEventRecordTest)
+TEST_CASE("CreateEventRecordTest")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -569,9 +571,9 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateEventRecordTest(counter, eventRecord, errorMessage);
 
-    BOOST_CHECK(result);
-    BOOST_CHECK(errorMessage.empty());
-    BOOST_CHECK(eventRecord.size() == 24); // Size in words: header [8] + counter name [6] + description [7] + units [3]
+    CHECK(result);
+    CHECK(errorMessage.empty());
+    CHECK(eventRecord.size() == 24); // Size in words: header [8] + counter name [6] + description [7] + units [3]
 
     uint16_t eventRecordWord0[]
     {
@@ -594,14 +596,14 @@
         eventRecord[4]
     };
 
-    BOOST_CHECK(eventRecordWord0[0] == maxCounterUid); // max_counter_uid
-    BOOST_CHECK(eventRecordWord0[1] == counterUid); // counter_uid
-    BOOST_CHECK(eventRecordWord1[0] == deviceUid); // device
+    CHECK(eventRecordWord0[0] == maxCounterUid); // max_counter_uid
+    CHECK(eventRecordWord0[1] == counterUid); // counter_uid
+    CHECK(eventRecordWord1[0] == deviceUid); // device
 
-    BOOST_CHECK(eventRecordWord1[1] == counterSetUid); // counter_set
-    BOOST_CHECK(eventRecordWord2[0] == counterClass); // class
-    BOOST_CHECK(eventRecordWord2[1] == counterInterpolation); // interpolation
-    BOOST_CHECK(std::memcmp(eventRecordWord34, &counterMultiplier, sizeof(counterMultiplier)) == 0); // multiplier
+    CHECK(eventRecordWord1[1] == counterSetUid); // counter_set
+    CHECK(eventRecordWord2[0] == counterClass); // class
+    CHECK(eventRecordWord2[1] == counterInterpolation); // interpolation
+    CHECK(std::memcmp(eventRecordWord34, &counterMultiplier, sizeof(counterMultiplier)) == 0); // multiplier
 
     ARMNN_NO_CONVERSION_WARN_BEGIN
     uint32_t eventRecordBlockSize = 8u * sizeof(uint32_t);
@@ -620,49 +622,49 @@
 
     ARMNN_NO_CONVERSION_WARN_END
 
-    BOOST_CHECK(eventRecord[5] == counterNameOffset); // name_offset
-    BOOST_CHECK(eventRecord[6] == counterDescriptionOffset); // description_offset
-    BOOST_CHECK(eventRecord[7] == counterUnitsOffset); // units_offset
+    CHECK(eventRecord[5] == counterNameOffset); // name_offset
+    CHECK(eventRecord[6] == counterDescriptionOffset); // description_offset
+    CHECK(eventRecord[7] == counterUnitsOffset); // units_offset
 
     // Offsets are relative to the start of the eventRecord
     auto eventRecordPool = reinterpret_cast<unsigned char*>(eventRecord.data());
     size_t uint32_t_size = sizeof(uint32_t);
 
     // The length of the SWTrace string (name)
-    BOOST_CHECK(eventRecordPool[counterNameOffset] == counterName.size() + 1);
+    CHECK(eventRecordPool[counterNameOffset] == counterName.size() + 1);
     // The counter name
-    BOOST_CHECK(std::memcmp(eventRecordPool +
+    CHECK(std::memcmp(eventRecordPool +
                             counterNameOffset + // Offset
                             uint32_t_size /* The length of the name */,
                             counterName.data(),
                             counterName.size()) == 0); // name
     // The null-terminator at the end of the name
-    BOOST_CHECK(eventRecordPool[counterNameOffset + uint32_t_size + counterName.size()] == '\0');
+    CHECK(eventRecordPool[counterNameOffset + uint32_t_size + counterName.size()] == '\0');
 
     // The length of the SWTrace string (description)
-    BOOST_CHECK(eventRecordPool[counterDescriptionOffset] == counterDescription.size() + 1);
+    CHECK(eventRecordPool[counterDescriptionOffset] == counterDescription.size() + 1);
     // The counter description
-    BOOST_CHECK(std::memcmp(eventRecordPool +
+    CHECK(std::memcmp(eventRecordPool +
                             counterDescriptionOffset + // Offset
                             uint32_t_size /* The length of the description */,
                             counterDescription.data(),
                             counterDescription.size()) == 0); // description
     // The null-terminator at the end of the description
-    BOOST_CHECK(eventRecordPool[counterDescriptionOffset + uint32_t_size + counterDescription.size()] == '\0');
+    CHECK(eventRecordPool[counterDescriptionOffset + uint32_t_size + counterDescription.size()] == '\0');
 
     // The length of the SWTrace namestring (units)
-    BOOST_CHECK(eventRecordPool[counterUnitsOffset] == counterUnits.size() + 1);
+    CHECK(eventRecordPool[counterUnitsOffset] == counterUnits.size() + 1);
     // The counter units
-    BOOST_CHECK(std::memcmp(eventRecordPool +
+    CHECK(std::memcmp(eventRecordPool +
                             counterUnitsOffset + // Offset
                             uint32_t_size /* The length of the units */,
                             counterUnits.data(),
                             counterUnits.size()) == 0); // units
     // The null-terminator at the end of the units
-    BOOST_CHECK(eventRecordPool[counterUnitsOffset + uint32_t_size + counterUnits.size()] == '\0');
+    CHECK(eventRecordPool[counterUnitsOffset + uint32_t_size + counterUnits.size()] == '\0');
 }
 
-BOOST_AUTO_TEST_CASE(CreateEventRecordNoUnitsTest)
+TEST_CASE("CreateEventRecordNoUnitsTest")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -695,9 +697,9 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateEventRecordTest(counter, eventRecord, errorMessage);
 
-    BOOST_CHECK(result);
-    BOOST_CHECK(errorMessage.empty());
-    BOOST_CHECK(eventRecord.size() == 21); // Size in words: header [8] + counter name [6] + description [7]
+    CHECK(result);
+    CHECK(errorMessage.empty());
+    CHECK(eventRecord.size() == 21); // Size in words: header [8] + counter name [6] + description [7]
 
     uint16_t eventRecordWord0[]
     {
@@ -719,13 +721,13 @@
         eventRecord[3],
         eventRecord[4]
     };
-    BOOST_CHECK(eventRecordWord0[0] == maxCounterUid); // max_counter_uid
-    BOOST_CHECK(eventRecordWord0[1] == counterUid); // counter_uid
-    BOOST_CHECK(eventRecordWord1[0] == deviceUid); // device
-    BOOST_CHECK(eventRecordWord1[1] == counterSetUid); // counter_set
-    BOOST_CHECK(eventRecordWord2[0] == counterClass); // class
-    BOOST_CHECK(eventRecordWord2[1] == counterInterpolation); // interpolation
-    BOOST_CHECK(std::memcmp(eventRecordWord34, &counterMultiplier, sizeof(counterMultiplier)) == 0); // multiplier
+    CHECK(eventRecordWord0[0] == maxCounterUid); // max_counter_uid
+    CHECK(eventRecordWord0[1] == counterUid); // counter_uid
+    CHECK(eventRecordWord1[0] == deviceUid); // device
+    CHECK(eventRecordWord1[1] == counterSetUid); // counter_set
+    CHECK(eventRecordWord2[0] == counterClass); // class
+    CHECK(eventRecordWord2[1] == counterInterpolation); // interpolation
+    CHECK(std::memcmp(eventRecordWord34, &counterMultiplier, sizeof(counterMultiplier)) == 0); // multiplier
 
     ARMNN_NO_CONVERSION_WARN_BEGIN
     uint32_t eventRecordBlockSize = 8u * sizeof(uint32_t);
@@ -737,38 +739,38 @@
                                         1u; // Rounding to the next word
     ARMNN_NO_CONVERSION_WARN_END
 
-    BOOST_CHECK(eventRecord[5] == counterNameOffset); // name_offset
-    BOOST_CHECK(eventRecord[6] == counterDescriptionOffset); // description_offset
-    BOOST_CHECK(eventRecord[7] == 0); // units_offset
+    CHECK(eventRecord[5] == counterNameOffset); // name_offset
+    CHECK(eventRecord[6] == counterDescriptionOffset); // description_offset
+    CHECK(eventRecord[7] == 0); // units_offset
 
     // Offsets are relative to the start of the eventRecord
     auto eventRecordPool = reinterpret_cast<unsigned char*>(eventRecord.data());
     size_t uint32_t_size = sizeof(uint32_t);
 
     // The length of the SWTrace string (name)
-    BOOST_CHECK(eventRecordPool[counterNameOffset] == counterName.size() + 1);
+    CHECK(eventRecordPool[counterNameOffset] == counterName.size() + 1);
     // The counter name
-    BOOST_CHECK(std::memcmp(eventRecordPool +
+    CHECK(std::memcmp(eventRecordPool +
                             counterNameOffset + // Offset
                             uint32_t_size, // The length of the name
                             counterName.data(),
                             counterName.size()) == 0); // name
     // The null-terminator at the end of the name
-    BOOST_CHECK(eventRecordPool[counterNameOffset + uint32_t_size + counterName.size()] == '\0');
+    CHECK(eventRecordPool[counterNameOffset + uint32_t_size + counterName.size()] == '\0');
 
     // The length of the SWTrace string (description)
-    BOOST_CHECK(eventRecordPool[counterDescriptionOffset] == counterDescription.size() + 1);
+    CHECK(eventRecordPool[counterDescriptionOffset] == counterDescription.size() + 1);
     // The counter description
-    BOOST_CHECK(std::memcmp(eventRecordPool +
+    CHECK(std::memcmp(eventRecordPool +
                             counterDescriptionOffset + // Offset
                             uint32_t_size, // The length of the description
                             counterDescription.data(),
                             counterDescription.size()) == 0); // description
     // The null-terminator at the end of the description
-    BOOST_CHECK(eventRecordPool[counterDescriptionOffset + uint32_t_size + counterDescription.size()] == '\0');
+    CHECK(eventRecordPool[counterDescriptionOffset + uint32_t_size + counterDescription.size()] == '\0');
 }
 
-BOOST_AUTO_TEST_CASE(CreateInvalidEventRecordTest1)
+TEST_CASE("CreateInvalidEventRecordTest1")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -802,12 +804,12 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateEventRecordTest(counter, eventRecord, errorMessage);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK(!errorMessage.empty());
-    BOOST_CHECK(eventRecord.empty());
+    CHECK(!result);
+    CHECK(!errorMessage.empty());
+    CHECK(eventRecord.empty());
 }
 
-BOOST_AUTO_TEST_CASE(CreateInvalidEventRecordTest2)
+TEST_CASE("CreateInvalidEventRecordTest2")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -841,12 +843,12 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateEventRecordTest(counter, eventRecord, errorMessage);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK(!errorMessage.empty());
-    BOOST_CHECK(eventRecord.empty());
+    CHECK(!result);
+    CHECK(!errorMessage.empty());
+    CHECK(eventRecord.empty());
 }
 
-BOOST_AUTO_TEST_CASE(CreateInvalidEventRecordTest3)
+TEST_CASE("CreateInvalidEventRecordTest3")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -880,12 +882,12 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateEventRecordTest(counter, eventRecord, errorMessage);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK(!errorMessage.empty());
-    BOOST_CHECK(eventRecord.empty());
+    CHECK(!result);
+    CHECK(!errorMessage.empty());
+    CHECK(eventRecord.empty());
 }
 
-BOOST_AUTO_TEST_CASE(CreateCategoryRecordTest)
+TEST_CASE("CreateCategoryRecordTest")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -947,9 +949,9 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateCategoryRecordTest(category, counters, categoryRecord, errorMessage);
 
-    BOOST_CHECK(result);
-    BOOST_CHECK(errorMessage.empty());
-    BOOST_CHECK(categoryRecord.size() == 79); // Size in words: header [3] + event pointer table [3] +
+    CHECK(result);
+    CHECK(errorMessage.empty());
+    CHECK(categoryRecord.size() == 79); // Size in words: header [3] + event pointer table [3] +
                                               //                category name [5] + event records [68 = 22 + 20 + 26]
 
     uint16_t categoryRecordWord1[]
@@ -957,8 +959,8 @@
         static_cast<uint16_t>(categoryRecord[0] >> 16),
         static_cast<uint16_t>(categoryRecord[0])
     };
-    BOOST_CHECK(categoryRecordWord1[0] == categoryEventCount); // event_count
-    BOOST_CHECK(categoryRecordWord1[1] == 0); // reserved
+    CHECK(categoryRecordWord1[0] == categoryEventCount); // event_count
+    CHECK(categoryRecordWord1[1] == 0); // reserved
 
     size_t uint32_t_size = sizeof(uint32_t);
 
@@ -969,8 +971,8 @@
                                   categoryEventCount * uint32_t_size; // The size of the event pointer table
     ARMNN_NO_CONVERSION_WARN_END
 
-    BOOST_CHECK(categoryRecord[1] == eventPointerTableOffset); // event_pointer_table_offset
-    BOOST_CHECK(categoryRecord[2] == categoryNameOffset); // name_offset
+    CHECK(categoryRecord[1] == eventPointerTableOffset); // event_pointer_table_offset
+    CHECK(categoryRecord[2] == categoryNameOffset); // name_offset
     // Offsets are relative to the start of the category record
     auto categoryRecordPool = reinterpret_cast<unsigned char*>(categoryRecord.data());
 
@@ -978,20 +980,20 @@
     uint32_t eventRecord0Offset = categoryRecordPool[eventPointerTableOffset + 0 * uint32_t_size];
     uint32_t eventRecord1Offset = categoryRecordPool[eventPointerTableOffset + 1 * uint32_t_size];
     uint32_t eventRecord2Offset = categoryRecordPool[eventPointerTableOffset + 2 * uint32_t_size];
-    BOOST_CHECK(eventRecord0Offset == 32);
-    BOOST_CHECK(eventRecord1Offset == 120);
-    BOOST_CHECK(eventRecord2Offset == 200);
+    CHECK(eventRecord0Offset == 32);
+    CHECK(eventRecord1Offset == 120);
+    CHECK(eventRecord2Offset == 200);
 
     // The length of the SWTrace namestring (name)
-    BOOST_CHECK(categoryRecordPool[categoryNameOffset] == categoryName.size() + 1);
+    CHECK(categoryRecordPool[categoryNameOffset] == categoryName.size() + 1);
     // The category name
-    BOOST_CHECK(std::memcmp(categoryRecordPool +
+    CHECK(std::memcmp(categoryRecordPool +
                             categoryNameOffset + // Offset
                             uint32_t_size, // The length of the name
                             categoryName.data(),
                             categoryName.size()) == 0); // name
     // The null-terminator at the end of the name
-    BOOST_CHECK(categoryRecordPool[categoryNameOffset + uint32_t_size + categoryName.size()] == '\0');
+    CHECK(categoryRecordPool[categoryNameOffset + uint32_t_size + categoryName.size()] == '\0');
 
     // For brevity, checking only the UIDs, max counter UIDs and names of the counters in the event records,
     // as the event records already have a number of unit tests dedicated to them
@@ -1000,22 +1002,22 @@
     uint16_t eventRecord0Word0[2] = { 0u, 0u };
     std::memcpy(eventRecord0Word0, categoryRecordPool + categoryRecordBlockSize + eventRecord0Offset,
                 sizeof(eventRecord0Word0));
-    BOOST_CHECK(eventRecord0Word0[0] == counter1->m_Uid);
-    BOOST_CHECK(eventRecord0Word0[1] == counter1->m_MaxCounterUid);
+    CHECK(eventRecord0Word0[0] == counter1->m_Uid);
+    CHECK(eventRecord0Word0[1] == counter1->m_MaxCounterUid);
 
     // Counter1 name
     uint32_t counter1NameOffset = 0;
     std::memcpy(&counter1NameOffset, categoryRecordPool  + eventRecord0Offset + 5u * uint32_t_size, uint32_t_size);
-    BOOST_CHECK(counter1NameOffset == 0);
+    CHECK(counter1NameOffset == 0);
     // The length of the SWTrace string (name)
-    BOOST_CHECK(categoryRecordPool[eventRecord0Offset +       // Offset to the event record
+    CHECK(categoryRecordPool[eventRecord0Offset +       // Offset to the event record
                                    categoryRecordBlockSize  + // Offset to the end of the category record block
                                    8u * uint32_t_size +       // Offset to the event record pool
                                    counter1NameOffset         // Offset to the name of the counter
                                   ] == counter1->m_Name.size() + 1); // The length of the name including the
                                                                      // null-terminator
     // The counter1 name
-    BOOST_CHECK(std::memcmp(categoryRecordPool +      // The beginning of the category pool
+    CHECK(std::memcmp(categoryRecordPool +      // The beginning of the category pool
                             categoryRecordBlockSize + // Offset to the end of the category record block
                             eventRecord0Offset +      // Offset to the event record
                             8u * uint32_t_size +      // Offset to the event record pool
@@ -1024,7 +1026,7 @@
                             counter1->m_Name.data(),
                             counter1->m_Name.size()) == 0); // name
     // The null-terminator at the end of the counter1 name
-    BOOST_CHECK(categoryRecordPool[eventRecord0Offset +      // Offset to the event record
+    CHECK(categoryRecordPool[eventRecord0Offset +      // Offset to the event record
                                    categoryRecordBlockSize + // Offset to the end of the category record block
                                    8u * uint32_t_size +      // Offset to the event record pool
                                    counter1NameOffset +      // Offset to the name of the counter
@@ -1039,16 +1041,16 @@
                                      eventRecord1Offset +
                                      5u * uint32_t_size,
                                      uint32_t_size);
-    BOOST_CHECK(counter2NameOffset == 8u * uint32_t_size );
+    CHECK(counter2NameOffset == 8u * uint32_t_size );
     // The length of the SWTrace string (name)
 
-    BOOST_CHECK(categoryRecordPool[eventRecord1Offset + // Offset to the event record
+    CHECK(categoryRecordPool[eventRecord1Offset + // Offset to the event record
                                    categoryRecordBlockSize +
                                    counter2NameOffset   // Offset to the name of the counter
                                   ] == counter2->m_Name.size() + 1); // The length of the name including the
                                                                      // null-terminator
     // The counter2 name
-    BOOST_CHECK(std::memcmp(categoryRecordPool +      // The beginning of the category pool
+    CHECK(std::memcmp(categoryRecordPool +      // The beginning of the category pool
                             categoryRecordBlockSize + // Offset to the end of the category record block
                             eventRecord1Offset +      // Offset to the event record
                             counter2NameOffset +      // Offset to the name of the counter
@@ -1058,7 +1060,7 @@
 
 
     // The null-terminator at the end of the counter2 name
-    BOOST_CHECK(categoryRecordPool[eventRecord1Offset +      // Offset to the event record
+    CHECK(categoryRecordPool[eventRecord1Offset +      // Offset to the event record
                                    categoryRecordBlockSize + // Offset to the end of the category record block
                                    counter2NameOffset +      // Offset to the name of the counter
                                    uint32_t_size +           // The length of the name
@@ -1068,16 +1070,16 @@
     // Counter3 name
     uint32_t counter3NameOffset = 0;
     std::memcpy(&counter3NameOffset, categoryRecordPool + eventRecord2Offset + 5u * uint32_t_size, uint32_t_size);
-    BOOST_CHECK(counter3NameOffset == 0);
+    CHECK(counter3NameOffset == 0);
     // The length of the SWTrace string (name)
-    BOOST_CHECK(categoryRecordPool[eventRecord2Offset + // Offset to the event record
+    CHECK(categoryRecordPool[eventRecord2Offset + // Offset to the event record
                                    categoryRecordBlockSize +
                                    8u * uint32_t_size + // Offset to the event record pool
                                    counter3NameOffset   // Offset to the name of the counter
                                   ] == counter3->m_Name.size() + 1); // The length of the name including the
                                                                      // null-terminator
     // The counter3 name
-    BOOST_CHECK(std::memcmp(categoryRecordPool + // The beginning of the category pool
+    CHECK(std::memcmp(categoryRecordPool + // The beginning of the category pool
                             categoryRecordBlockSize +
                             eventRecord2Offset + // Offset to the event record
                             8u * uint32_t_size + // Offset to the event record pool
@@ -1086,7 +1088,7 @@
                             counter3->m_Name.data(),
                             counter3->m_Name.size()) == 0); // name
     // The null-terminator at the end of the counter3 name
-    BOOST_CHECK(categoryRecordPool[eventRecord2Offset +    // Offset to the event record
+    CHECK(categoryRecordPool[eventRecord2Offset +    // Offset to the event record
                                    categoryRecordBlockSize +
                                    8u * uint32_t_size +    // Offset to the event record pool
                                    counter3NameOffset +    // Offset to the name of the counter
@@ -1095,7 +1097,7 @@
                                    ] == '\0');
 }
 
-BOOST_AUTO_TEST_CASE(CreateInvalidCategoryRecordTest1)
+TEST_CASE("CreateInvalidCategoryRecordTest1")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -1103,7 +1105,7 @@
     // Create a category for testing
     const std::string categoryName = "some invalid category";
     const CategoryPtr category = std::make_unique<Category>(categoryName);
-    BOOST_CHECK(category);
+    CHECK(category);
 
     // Create a category record
     Counters counters;
@@ -1111,12 +1113,12 @@
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateCategoryRecordTest(category, counters, categoryRecord, errorMessage);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK(!errorMessage.empty());
-    BOOST_CHECK(categoryRecord.empty());
+    CHECK(!result);
+    CHECK(!errorMessage.empty());
+    CHECK(categoryRecord.empty());
 }
 
-BOOST_AUTO_TEST_CASE(CreateInvalidCategoryRecordTest2)
+TEST_CASE("CreateInvalidCategoryRecordTest2")
 {
     MockBufferManager mockBuffer(0);
     SendCounterPacketTest sendCounterPacketTest(mockBuffer);
@@ -1124,7 +1126,7 @@
     // Create a category for testing
     const std::string categoryName = "some_category";
     const CategoryPtr category = std::make_unique<Category>(categoryName);
-    BOOST_CHECK(category);
+    CHECK(category);
     category->m_Counters = { 11u, 23u, 5670u };
 
     // Create a collection of counters
@@ -1143,19 +1145,19 @@
                                                                                 0))));
 
     Counter* counter1 = counters.find(11)->second.get();
-    BOOST_CHECK(counter1);
+    CHECK(counter1);
 
     // Create a category record
     SendCounterPacket::CategoryRecord categoryRecord;
     std::string errorMessage;
     bool result = sendCounterPacketTest.CreateCategoryRecordTest(category, counters, categoryRecord, errorMessage);
 
-    BOOST_CHECK(!result);
-    BOOST_CHECK(!errorMessage.empty());
-    BOOST_CHECK(categoryRecord.empty());
+    CHECK(!result);
+    CHECK(!errorMessage.empty());
+    CHECK(categoryRecord.empty());
 }
 
-BOOST_AUTO_TEST_CASE(SendCounterDirectoryPacketTest1)
+TEST_CASE("SendCounterDirectoryPacketTest1")
 {
     // The counter directory used for testing
     CounterDirectory counterDirectory;
@@ -1163,25 +1165,25 @@
     // Register a device
     const std::string device1Name = "device1";
     const Device* device1 = nullptr;
-    BOOST_CHECK_NO_THROW(device1 = counterDirectory.RegisterDevice(device1Name, 3));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(device1);
+    CHECK_NOTHROW(device1 = counterDirectory.RegisterDevice(device1Name, 3));
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(device1);
 
     // Register a device
     const std::string device2Name = "device2";
     const Device* device2 = nullptr;
-    BOOST_CHECK_NO_THROW(device2 = counterDirectory.RegisterDevice(device2Name));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 2);
-    BOOST_CHECK(device2);
+    CHECK_NOTHROW(device2 = counterDirectory.RegisterDevice(device2Name));
+    CHECK(counterDirectory.GetDeviceCount() == 2);
+    CHECK(device2);
 
     // Buffer with not enough space
     MockBufferManager mockBuffer(10);
     SendCounterPacket sendCounterPacket(mockBuffer);
-    BOOST_CHECK_THROW(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory),
+    CHECK_THROWS_AS(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory),
                       armnn::profiling::BufferExhaustion);
 }
 
-BOOST_AUTO_TEST_CASE(SendCounterDirectoryPacketTest2)
+TEST_CASE("SendCounterDirectoryPacketTest2")
 {
     // The counter directory used for testing
     CounterDirectory counterDirectory;
@@ -1189,43 +1191,43 @@
     // Register a device
     const std::string device1Name = "device1";
     const Device* device1 = nullptr;
-    BOOST_CHECK_NO_THROW(device1 = counterDirectory.RegisterDevice(device1Name, 3));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(device1);
+    CHECK_NOTHROW(device1 = counterDirectory.RegisterDevice(device1Name, 3));
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(device1);
 
     // Register a device
     const std::string device2Name = "device2";
     const Device* device2 = nullptr;
-    BOOST_CHECK_NO_THROW(device2 = counterDirectory.RegisterDevice(device2Name));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 2);
-    BOOST_CHECK(device2);
+    CHECK_NOTHROW(device2 = counterDirectory.RegisterDevice(device2Name));
+    CHECK(counterDirectory.GetDeviceCount() == 2);
+    CHECK(device2);
 
     // Register a counter set
     const std::string counterSet1Name = "counterset1";
     const CounterSet* counterSet1 = nullptr;
-    BOOST_CHECK_NO_THROW(counterSet1 = counterDirectory.RegisterCounterSet(counterSet1Name));
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 1);
-    BOOST_CHECK(counterSet1);
+    CHECK_NOTHROW(counterSet1 = counterDirectory.RegisterCounterSet(counterSet1Name));
+    CHECK(counterDirectory.GetCounterSetCount() == 1);
+    CHECK(counterSet1);
 
     // Register a category associated to "device1" and "counterset1"
     const std::string category1Name = "category1";
     const Category* category1 = nullptr;
-    BOOST_CHECK_NO_THROW(category1 = counterDirectory.RegisterCategory(category1Name));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(category1);
+    CHECK_NOTHROW(category1 = counterDirectory.RegisterCategory(category1Name));
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(category1);
 
     // Register a category not associated to "device2" but no counter set
     const std::string category2Name = "category2";
     const Category* category2 = nullptr;
-    BOOST_CHECK_NO_THROW(category2 = counterDirectory.RegisterCategory(category2Name));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 2);
-    BOOST_CHECK(category2);
+    CHECK_NOTHROW(category2 = counterDirectory.RegisterCategory(category2Name));
+    CHECK(counterDirectory.GetCategoryCount() == 2);
+    CHECK(category2);
 
     uint16_t numberOfCores = 4;
 
     // Register a counter associated to "category1"
     const Counter* counter1 = nullptr;
-    BOOST_CHECK_NO_THROW(counter1 = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_NOTHROW(counter1 = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                      0,
                                                                      category1Name,
                                                                      0,
@@ -1235,12 +1237,12 @@
                                                                      "counter1description",
                                                                      std::string("counter1units"),
                                                                      numberOfCores));
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 4);
-    BOOST_CHECK(counter1);
+    CHECK(counterDirectory.GetCounterCount() == 4);
+    CHECK(counter1);
 
     // Register a counter associated to "category1"
     const Counter* counter2 = nullptr;
-    BOOST_CHECK_NO_THROW(counter2 = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_NOTHROW(counter2 = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                      4,
                                                                      category1Name,
                                                                      1,
@@ -1252,12 +1254,12 @@
                                                                      armnn::EmptyOptional(),
                                                                      device2->m_Uid,
                                                                      0));
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 5);
-    BOOST_CHECK(counter2);
+    CHECK(counterDirectory.GetCounterCount() == 5);
+    CHECK(counter2);
 
     // Register a counter associated to "category2"
     const Counter* counter3 = nullptr;
-    BOOST_CHECK_NO_THROW(counter3 = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_NOTHROW(counter3 = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                      5,
                                                                      category2Name,
                                                                      1,
@@ -1269,13 +1271,13 @@
                                                                      numberOfCores,
                                                                      device2->m_Uid,
                                                                      counterSet1->m_Uid));
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 9);
-    BOOST_CHECK(counter3);
+    CHECK(counterDirectory.GetCounterCount() == 9);
+    CHECK(counter3);
 
     // Buffer with enough space
     MockBufferManager mockBuffer(1024);
     SendCounterPacket sendCounterPacket(mockBuffer);
-    BOOST_CHECK_NO_THROW(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory));
+    CHECK_NOTHROW(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory));
 
     // Get the readable buffer
     auto readBuffer = mockBuffer.GetReadableBuffer();
@@ -1283,9 +1285,9 @@
     // Check the packet header
     const uint32_t packetHeaderWord0 = ReadUint32(readBuffer, 0);
     const uint32_t packetHeaderWord1 = ReadUint32(readBuffer, 4);
-    BOOST_TEST(((packetHeaderWord0 >> 26) & 0x3F) == 0);  // packet_family
-    BOOST_TEST(((packetHeaderWord0 >> 16) & 0x3FF) == 2); // packet_id
-    BOOST_TEST(packetHeaderWord1 == 432);                 // data_length
+    CHECK(((packetHeaderWord0 >> 26) & 0x3F) == 0);  // packet_family
+    CHECK(((packetHeaderWord0 >> 16) & 0x3FF) == 2); // packet_id
+    CHECK(packetHeaderWord1 == 432);                 // data_length
 
     // Check the body header
     const uint32_t bodyHeaderWord0 = ReadUint32(readBuffer,  8);
@@ -1297,28 +1299,28 @@
     const uint16_t deviceRecordCount     = static_cast<uint16_t>(bodyHeaderWord0 >> 16);
     const uint16_t counterSetRecordCount = static_cast<uint16_t>(bodyHeaderWord2 >> 16);
     const uint16_t categoryRecordCount   = static_cast<uint16_t>(bodyHeaderWord4 >> 16);
-    BOOST_TEST(deviceRecordCount == 2);                      // device_records_count
-    BOOST_TEST(bodyHeaderWord1 == bodyHeaderSize * 4);           // device_records_pointer_table_offset
-    BOOST_TEST(counterSetRecordCount == 1);                  // counter_set_count
-    BOOST_TEST(bodyHeaderWord3 == 8 + bodyHeaderSize * 4);       // counter_set_pointer_table_offset
-    BOOST_TEST(categoryRecordCount == 2);                    // categories_count
-    BOOST_TEST(bodyHeaderWord5 == 12 + bodyHeaderSize * 4);      // categories_pointer_table_offset
+    CHECK(deviceRecordCount == 2);                      // device_records_count
+    CHECK(bodyHeaderWord1 == bodyHeaderSize * 4);           // device_records_pointer_table_offset
+    CHECK(counterSetRecordCount == 1);                  // counter_set_count
+    CHECK(bodyHeaderWord3 == 8 + bodyHeaderSize * 4);       // counter_set_pointer_table_offset
+    CHECK(categoryRecordCount == 2);                    // categories_count
+    CHECK(bodyHeaderWord5 == 12 + bodyHeaderSize * 4);      // categories_pointer_table_offset
 
     // Check the device records pointer table
     const uint32_t deviceRecordOffset0 = ReadUint32(readBuffer, 32);
     const uint32_t deviceRecordOffset1 = ReadUint32(readBuffer, 36);
-    BOOST_TEST(deviceRecordOffset0 == 20); // Device record offset for "device1"
-    BOOST_TEST(deviceRecordOffset1 == 40); // Device record offset for "device2"
+    CHECK(deviceRecordOffset0 == 20); // Device record offset for "device1"
+    CHECK(deviceRecordOffset1 == 40); // Device record offset for "device2"
 
     // Check the counter set pointer table
     const uint32_t counterSetRecordOffset0 = ReadUint32(readBuffer, 40);
-    BOOST_TEST(counterSetRecordOffset0 == 52); // Counter set record offset for "counterset1"
+    CHECK(counterSetRecordOffset0 == 52); // Counter set record offset for "counterset1"
 
     // Check the category pointer table
     const uint32_t categoryRecordOffset0 = ReadUint32(readBuffer, 44);
     const uint32_t categoryRecordOffset1 = ReadUint32(readBuffer, 48);
-    BOOST_TEST(categoryRecordOffset0 ==  72); // Category record offset for "category1"
-    BOOST_TEST(categoryRecordOffset1 == 176); // Category record offset for "category2"
+    CHECK(categoryRecordOffset0 ==  72); // Category record offset for "category1"
+    CHECK(categoryRecordOffset1 == 176); // Category record offset for "category2"
 
     // Get the device record pool offset
     const uint32_t uint32_t_size = sizeof(uint32_t);
@@ -1373,7 +1375,7 @@
         deviceRecord.name_length = deviceRecordNameLength; // name_length
         unsigned char deviceRecordNameNullTerminator = // name null-terminator
                 ReadUint8(readBuffer, deviceRecordPoolOffset + uint32_t_size + deviceRecordNameLength - 1);
-        BOOST_CHECK(deviceRecordNameNullTerminator == '\0');
+        CHECK(deviceRecordNameNullTerminator == '\0');
         std::vector<unsigned char> deviceRecordNameBuffer(deviceRecord.name_length - 1);
         std::memcpy(deviceRecordNameBuffer.data(),
                     readData + deviceRecordPoolOffset + uint32_t_size, deviceRecordNameBuffer.size());
@@ -1383,14 +1385,14 @@
     }
 
     // Check that the device records are correct
-    BOOST_CHECK(deviceRecords.size() == 2);
+    CHECK(deviceRecords.size() == 2);
     for (const DeviceRecord& deviceRecord : deviceRecords)
     {
         const Device* device = counterDirectory.GetDevice(deviceRecord.uid);
-        BOOST_CHECK(device);
-        BOOST_CHECK(device->m_Uid   == deviceRecord.uid);
-        BOOST_CHECK(device->m_Cores == deviceRecord.cores);
-        BOOST_CHECK(device->m_Name  == deviceRecord.name);
+        CHECK(device);
+        CHECK(device->m_Uid   == deviceRecord.uid);
+        CHECK(device->m_Cores == deviceRecord.cores);
+        CHECK(device->m_Name  == deviceRecord.name);
     }
 
 
@@ -1433,7 +1435,7 @@
         counterSetRecord.name_length = counterSetRecordNameLength; // name_length
         unsigned char counterSetRecordNameNullTerminator = // name null-terminator
                 ReadUint8(readBuffer, counterSetRecordPoolOffset + uint32_t_size + counterSetRecordNameLength - 1);
-        BOOST_CHECK(counterSetRecordNameNullTerminator == '\0');
+        CHECK(counterSetRecordNameNullTerminator == '\0');
         std::vector<unsigned char> counterSetRecordNameBuffer(counterSetRecord.name_length - 1);
         std::memcpy(counterSetRecordNameBuffer.data(),
                     readData + counterSetRecordPoolOffset + uint32_t_size, counterSetRecordNameBuffer.size());
@@ -1443,14 +1445,14 @@
     }
 
     // Check that the counter set records are correct
-    BOOST_CHECK(counterSetRecords.size() == 1);
+    CHECK(counterSetRecords.size() == 1);
     for (const CounterSetRecord& counterSetRecord : counterSetRecords)
     {
         const CounterSet* counterSet = counterDirectory.GetCounterSet(counterSetRecord.uid);
-        BOOST_CHECK(counterSet);
-        BOOST_CHECK(counterSet->m_Uid   == counterSetRecord.uid);
-        BOOST_CHECK(counterSet->m_Count == counterSetRecord.count);
-        BOOST_CHECK(counterSet->m_Name  == counterSetRecord.name);
+        CHECK(counterSet);
+        CHECK(counterSet->m_Uid   == counterSetRecord.uid);
+        CHECK(counterSet->m_Count == counterSetRecord.count);
+        CHECK(counterSet->m_Name  == counterSetRecord.name);
     }
 
     // Event record structure/collection used for testing
@@ -1517,7 +1519,7 @@
                           categoryRecord.name_offset +
                           uint32_t_size +
                           categoryRecordNameLength - 1); // name null-terminator
-        BOOST_CHECK(categoryRecordNameNullTerminator == '\0');
+        CHECK(categoryRecordNameNullTerminator == '\0');
         std::vector<unsigned char> categoryRecordNameBuffer(categoryRecord.name_length - 1);
         std::memcpy(categoryRecordNameBuffer.data(),
                     readData +
@@ -1570,7 +1572,7 @@
                               eventRecord.name_offset +
                               uint32_t_size +
                               eventRecordNameLength - 1); // name null-terminator
-            BOOST_CHECK(eventRecordNameNullTerminator == '\0');
+            CHECK(eventRecordNameNullTerminator == '\0');
             std::vector<unsigned char> eventRecordNameBuffer(eventRecord.name_length - 1);
             std::memcpy(eventRecordNameBuffer.data(),
                         readData +
@@ -1589,7 +1591,7 @@
                               eventRecord.description_offset +
                               uint32_t_size +
                               eventRecordDescriptionLength - 1); // description null-terminator
-            BOOST_CHECK(eventRecordDescriptionNullTerminator == '\0');
+            CHECK(eventRecordDescriptionNullTerminator == '\0');
             std::vector<unsigned char> eventRecordDescriptionBuffer(eventRecord.description_length - 1);
             std::memcpy(eventRecordDescriptionBuffer.data(),
                         readData +
@@ -1611,7 +1613,7 @@
                                   eventRecord.units_offset +
                                   uint32_t_size +
                                   eventRecordUnitsLength - 1); // units null-terminator
-                BOOST_CHECK(eventRecordUnitsNullTerminator == '\0');
+                CHECK(eventRecordUnitsNullTerminator == '\0');
                 std::vector<unsigned char> eventRecordUnitsBuffer(eventRecord.units_length - 1);
                 std::memcpy(eventRecordUnitsBuffer.data(),
                             readData +
@@ -1629,34 +1631,34 @@
     }
 
     // Check that the category records are correct
-    BOOST_CHECK(categoryRecords.size() == 2);
+    CHECK(categoryRecords.size() == 2);
     for (const CategoryRecord& categoryRecord : categoryRecords)
     {
         const Category* category = counterDirectory.GetCategory(categoryRecord.name);
-        BOOST_CHECK(category);
-        BOOST_CHECK(category->m_Name == categoryRecord.name);
-        BOOST_CHECK(category->m_Counters.size() == categoryRecord.event_count + static_cast<size_t>(numberOfCores) -1);
-        BOOST_CHECK(category->m_Counters.size() == categoryRecord.event_count + static_cast<size_t>(numberOfCores) -1);
+        CHECK(category);
+        CHECK(category->m_Name == categoryRecord.name);
+        CHECK(category->m_Counters.size() == categoryRecord.event_count + static_cast<size_t>(numberOfCores) -1);
+        CHECK(category->m_Counters.size() == categoryRecord.event_count + static_cast<size_t>(numberOfCores) -1);
 
         // Check that the event records are correct
         for (const EventRecord& eventRecord : categoryRecord.event_records)
         {
             const Counter* counter = counterDirectory.GetCounter(eventRecord.counter_uid);
-            BOOST_CHECK(counter);
-            BOOST_CHECK(counter->m_MaxCounterUid == eventRecord.max_counter_uid);
-            BOOST_CHECK(counter->m_DeviceUid == eventRecord.device);
-            BOOST_CHECK(counter->m_CounterSetUid == eventRecord.counter_set);
-            BOOST_CHECK(counter->m_Class == eventRecord.counter_class);
-            BOOST_CHECK(counter->m_Interpolation == eventRecord.interpolation);
-            BOOST_CHECK(counter->m_Multiplier == eventRecord.multiplier);
-            BOOST_CHECK(counter->m_Name == eventRecord.name);
-            BOOST_CHECK(counter->m_Description == eventRecord.description);
-            BOOST_CHECK(counter->m_Units == eventRecord.units);
+            CHECK(counter);
+            CHECK(counter->m_MaxCounterUid == eventRecord.max_counter_uid);
+            CHECK(counter->m_DeviceUid == eventRecord.device);
+            CHECK(counter->m_CounterSetUid == eventRecord.counter_set);
+            CHECK(counter->m_Class == eventRecord.counter_class);
+            CHECK(counter->m_Interpolation == eventRecord.interpolation);
+            CHECK(counter->m_Multiplier == eventRecord.multiplier);
+            CHECK(counter->m_Name == eventRecord.name);
+            CHECK(counter->m_Description == eventRecord.description);
+            CHECK(counter->m_Units == eventRecord.units);
         }
     }
 }
 
-BOOST_AUTO_TEST_CASE(SendCounterDirectoryPacketTest3)
+TEST_CASE("SendCounterDirectoryPacketTest3")
 {
     // Using a mock counter directory that allows to register invalid objects
     MockCounterDirectory counterDirectory;
@@ -1664,17 +1666,17 @@
     // Register an invalid device
     const std::string deviceName = "inv@lid dev!c€";
     const Device* device = nullptr;
-    BOOST_CHECK_NO_THROW(device = counterDirectory.RegisterDevice(deviceName, 3));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(device);
+    CHECK_NOTHROW(device = counterDirectory.RegisterDevice(deviceName, 3));
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(device);
 
     // Buffer with enough space
     MockBufferManager mockBuffer(1024);
     SendCounterPacket sendCounterPacket(mockBuffer);
-    BOOST_CHECK_THROW(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
+    CHECK_THROWS_AS(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(SendCounterDirectoryPacketTest4)
+TEST_CASE("SendCounterDirectoryPacketTest4")
 {
     // Using a mock counter directory that allows to register invalid objects
     MockCounterDirectory counterDirectory;
@@ -1682,17 +1684,17 @@
     // Register an invalid counter set
     const std::string counterSetName = "inv@lid count€rs€t";
     const CounterSet* counterSet = nullptr;
-    BOOST_CHECK_NO_THROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 1);
-    BOOST_CHECK(counterSet);
+    CHECK_NOTHROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
+    CHECK(counterDirectory.GetCounterSetCount() == 1);
+    CHECK(counterSet);
 
     // Buffer with enough space
     MockBufferManager mockBuffer(1024);
     SendCounterPacket sendCounterPacket(mockBuffer);
-    BOOST_CHECK_THROW(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
+    CHECK_THROWS_AS(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(SendCounterDirectoryPacketTest5)
+TEST_CASE("SendCounterDirectoryPacketTest5")
 {
     // Using a mock counter directory that allows to register invalid objects
     MockCounterDirectory counterDirectory;
@@ -1700,17 +1702,17 @@
     // Register an invalid category
     const std::string categoryName = "c@t€gory";
     const Category* category = nullptr;
-    BOOST_CHECK_NO_THROW(category = counterDirectory.RegisterCategory(categoryName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(category);
+    CHECK_NOTHROW(category = counterDirectory.RegisterCategory(categoryName));
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(category);
 
     // Buffer with enough space
     MockBufferManager mockBuffer(1024);
     SendCounterPacket sendCounterPacket(mockBuffer);
-    BOOST_CHECK_THROW(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
+    CHECK_THROWS_AS(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(SendCounterDirectoryPacketTest6)
+TEST_CASE("SendCounterDirectoryPacketTest6")
 {
     // Using a mock counter directory that allows to register invalid objects
     MockCounterDirectory counterDirectory;
@@ -1718,31 +1720,31 @@
     // Register an invalid device
     const std::string deviceName = "inv@lid dev!c€";
     const Device* device = nullptr;
-    BOOST_CHECK_NO_THROW(device = counterDirectory.RegisterDevice(deviceName, 3));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(device);
+    CHECK_NOTHROW(device = counterDirectory.RegisterDevice(deviceName, 3));
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(device);
 
     // Register an invalid counter set
     const std::string counterSetName = "inv@lid count€rs€t";
     const CounterSet* counterSet = nullptr;
-    BOOST_CHECK_NO_THROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 1);
-    BOOST_CHECK(counterSet);
+    CHECK_NOTHROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
+    CHECK(counterDirectory.GetCounterSetCount() == 1);
+    CHECK(counterSet);
 
     // Register an invalid category associated to an invalid device and an invalid counter set
     const std::string categoryName = "c@t€gory";
     const Category* category = nullptr;
-    BOOST_CHECK_NO_THROW(category = counterDirectory.RegisterCategory(categoryName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(category);
+    CHECK_NOTHROW(category = counterDirectory.RegisterCategory(categoryName));
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(category);
 
     // Buffer with enough space
     MockBufferManager mockBuffer(1024);
     SendCounterPacket sendCounterPacket(mockBuffer);
-    BOOST_CHECK_THROW(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
+    CHECK_THROWS_AS(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(SendCounterDirectoryPacketTest7)
+TEST_CASE("SendCounterDirectoryPacketTest7")
 {
     // Using a mock counter directory that allows to register invalid objects
     MockCounterDirectory counterDirectory;
@@ -1750,27 +1752,27 @@
     // Register an valid device
     const std::string deviceName = "valid device";
     const Device* device = nullptr;
-    BOOST_CHECK_NO_THROW(device = counterDirectory.RegisterDevice(deviceName, 3));
-    BOOST_CHECK(counterDirectory.GetDeviceCount() == 1);
-    BOOST_CHECK(device);
+    CHECK_NOTHROW(device = counterDirectory.RegisterDevice(deviceName, 3));
+    CHECK(counterDirectory.GetDeviceCount() == 1);
+    CHECK(device);
 
     // Register an valid counter set
     const std::string counterSetName = "valid counterset";
     const CounterSet* counterSet = nullptr;
-    BOOST_CHECK_NO_THROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
-    BOOST_CHECK(counterDirectory.GetCounterSetCount() == 1);
-    BOOST_CHECK(counterSet);
+    CHECK_NOTHROW(counterSet = counterDirectory.RegisterCounterSet(counterSetName));
+    CHECK(counterDirectory.GetCounterSetCount() == 1);
+    CHECK(counterSet);
 
     // Register an valid category associated to a valid device and a valid counter set
     const std::string categoryName = "category";
     const Category* category = nullptr;
-    BOOST_CHECK_NO_THROW(category = counterDirectory.RegisterCategory(categoryName));
-    BOOST_CHECK(counterDirectory.GetCategoryCount() == 1);
-    BOOST_CHECK(category);
+    CHECK_NOTHROW(category = counterDirectory.RegisterCategory(categoryName));
+    CHECK(counterDirectory.GetCategoryCount() == 1);
+    CHECK(category);
 
     // Register an invalid counter associated to a valid category
     const Counter* counter = nullptr;
-    BOOST_CHECK_NO_THROW(counter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
+    CHECK_NOTHROW(counter = counterDirectory.RegisterCounter(armnn::profiling::BACKEND_ID,
                                                                     0,
                                                                     categoryName,
                                                                     0,
@@ -1782,16 +1784,16 @@
                                                                     5,
                                                                     device->m_Uid,
                                                                     counterSet->m_Uid));
-    BOOST_CHECK(counterDirectory.GetCounterCount() == 5);
-    BOOST_CHECK(counter);
+    CHECK(counterDirectory.GetCounterCount() == 5);
+    CHECK(counter);
 
     // Buffer with enough space
     MockBufferManager mockBuffer(1024);
     SendCounterPacket sendCounterPacket(mockBuffer);
-    BOOST_CHECK_THROW(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
+    CHECK_THROWS_AS(sendCounterPacket.SendCounterDirectoryPacket(counterDirectory), armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(SendThreadTest0)
+TEST_CASE("SendThreadTest0")
 {
     ProfilingStateMachine profilingStateMachine;
     SetActiveProfilingState(profilingStateMachine);
@@ -1804,18 +1806,18 @@
     // Try to start the send thread many times, it must only start once
 
     sendThread.Start(mockProfilingConnection);
-    BOOST_CHECK(sendThread.IsRunning());
+    CHECK(sendThread.IsRunning());
     sendThread.Start(mockProfilingConnection);
     sendThread.Start(mockProfilingConnection);
     sendThread.Start(mockProfilingConnection);
     sendThread.Start(mockProfilingConnection);
-    BOOST_CHECK(sendThread.IsRunning());
+    CHECK(sendThread.IsRunning());
 
     sendThread.Stop();
-    BOOST_CHECK(!sendThread.IsRunning());
+    CHECK(!sendThread.IsRunning());
 }
 
-BOOST_AUTO_TEST_CASE(SendThreadTest1)
+TEST_CASE("SendThreadTest1")
 {
     ProfilingStateMachine profilingStateMachine;
     SetActiveProfilingState(profilingStateMachine);
@@ -1918,12 +1920,12 @@
 
     sendThread.Stop();
 
-    BOOST_CHECK(mockStreamCounterBuffer.GetCommittedSize() == totalWrittenSize);
-    BOOST_CHECK(mockStreamCounterBuffer.GetReadableSize()  == totalWrittenSize);
-    BOOST_CHECK(mockStreamCounterBuffer.GetReadSize()      == totalWrittenSize);
+    CHECK(mockStreamCounterBuffer.GetCommittedSize() == totalWrittenSize);
+    CHECK(mockStreamCounterBuffer.GetReadableSize()  == totalWrittenSize);
+    CHECK(mockStreamCounterBuffer.GetReadSize()      == totalWrittenSize);
 }
 
-BOOST_AUTO_TEST_CASE(SendThreadTest2)
+TEST_CASE("SendThreadTest2")
 {
     ProfilingStateMachine profilingStateMachine;
     SetActiveProfilingState(profilingStateMachine);
@@ -2036,12 +2038,12 @@
     // read all what's remaining in the buffer
     sendThread.Stop();
 
-    BOOST_CHECK(mockStreamCounterBuffer.GetCommittedSize() == totalWrittenSize);
-    BOOST_CHECK(mockStreamCounterBuffer.GetReadableSize()  == totalWrittenSize);
-    BOOST_CHECK(mockStreamCounterBuffer.GetReadSize()      == totalWrittenSize);
+    CHECK(mockStreamCounterBuffer.GetCommittedSize() == totalWrittenSize);
+    CHECK(mockStreamCounterBuffer.GetReadableSize()  == totalWrittenSize);
+    CHECK(mockStreamCounterBuffer.GetReadSize()      == totalWrittenSize);
 }
 
-BOOST_AUTO_TEST_CASE(SendThreadTest3)
+TEST_CASE("SendThreadTest3")
 {
     ProfilingStateMachine profilingStateMachine;
     SetActiveProfilingState(profilingStateMachine);
@@ -2137,14 +2139,14 @@
     // thread is not guaranteed to flush the buffer)
     sendThread.Stop();
 
-    BOOST_CHECK(mockStreamCounterBuffer.GetCommittedSize() == totalWrittenSize);
-    BOOST_CHECK(mockStreamCounterBuffer.GetReadableSize()  <= totalWrittenSize);
-    BOOST_CHECK(mockStreamCounterBuffer.GetReadSize()      <= totalWrittenSize);
-    BOOST_CHECK(mockStreamCounterBuffer.GetReadSize()      <= mockStreamCounterBuffer.GetReadableSize());
-    BOOST_CHECK(mockStreamCounterBuffer.GetReadSize()      <= mockStreamCounterBuffer.GetCommittedSize());
+    CHECK(mockStreamCounterBuffer.GetCommittedSize() == totalWrittenSize);
+    CHECK(mockStreamCounterBuffer.GetReadableSize()  <= totalWrittenSize);
+    CHECK(mockStreamCounterBuffer.GetReadSize()      <= totalWrittenSize);
+    CHECK(mockStreamCounterBuffer.GetReadSize()      <= mockStreamCounterBuffer.GetReadableSize());
+    CHECK(mockStreamCounterBuffer.GetReadSize()      <= mockStreamCounterBuffer.GetCommittedSize());
 }
 
-BOOST_AUTO_TEST_CASE(SendCounterPacketTestWithSendThread)
+TEST_CASE("SendCounterPacketTestWithSendThread")
 {
     ProfilingStateMachine profilingStateMachine;
     SetWaitingForAckProfilingState(profilingStateMachine);
@@ -2160,7 +2162,7 @@
     sendThread.Stop();
 
     // check for packet in ProfilingConnection
-    BOOST_CHECK(mockProfilingConnection.CheckForPacket({PacketType::StreamMetaData, streamMetadataPacketsize}) == 1);
+    CHECK(mockProfilingConnection.CheckForPacket({PacketType::StreamMetaData, streamMetadataPacketsize}) == 1);
 
     SetActiveProfilingState(profilingStateMachine);
     sendThread.Start(mockProfilingConnection);
@@ -2172,7 +2174,7 @@
     sendThread.Stop();
     unsigned int counterDirectoryPacketSize = 32;
     // check for packet in ProfilingConnection
-    BOOST_CHECK(mockProfilingConnection.CheckForPacket(
+    CHECK(mockProfilingConnection.CheckForPacket(
         {PacketType::CounterDirectory, counterDirectoryPacketSize}) == 1);
 
     sendThread.Start(mockProfilingConnection);
@@ -2187,11 +2189,11 @@
     sendThread.Stop();
 
     unsigned int periodicCounterCapturePacketSize = 28;
-    BOOST_CHECK(mockProfilingConnection.CheckForPacket(
+    CHECK(mockProfilingConnection.CheckForPacket(
         {PacketType::PeriodicCounterCapture, periodicCounterCapturePacketSize}) == 1);
 }
 
-BOOST_AUTO_TEST_CASE(SendThreadBufferTest)
+TEST_CASE("SendThreadBufferTest")
 {
     ProfilingStateMachine profilingStateMachine;
     SetActiveProfilingState(profilingStateMachine);
@@ -2208,10 +2210,10 @@
     // Read data from the buffer
     // Buffer should become readable after commit by SendStreamMetaDataPacket
     auto packetBuffer = bufferManager.GetReadableBuffer();
-    BOOST_TEST(packetBuffer.get());
+    CHECK(packetBuffer.get());
 
     unsigned int streamMetadataPacketsize = GetStreamMetaDataPacketSize();
-    BOOST_TEST(packetBuffer->GetSize() == streamMetadataPacketsize);
+    CHECK(packetBuffer->GetSize() == streamMetadataPacketsize);
 
     // Recommit to be read by sendCounterPacket
     bufferManager.Commit(packetBuffer, streamMetadataPacketsize);
@@ -2231,26 +2233,26 @@
 
     // The buffer is read by the send thread so it should not be in the readable buffer.
     auto readBuffer = bufferManager.GetReadableBuffer();
-    BOOST_TEST(!readBuffer);
+    CHECK(!readBuffer);
 
     // Successfully reserved the buffer with requested size
     unsigned int reservedSize = 0;
     auto reservedBuffer = bufferManager.Reserve(512, reservedSize);
-    BOOST_TEST(reservedSize == 512);
-    BOOST_TEST(reservedBuffer.get());
+    CHECK(reservedSize == 512);
+    CHECK(reservedBuffer.get());
 
     const auto writtenDataSize = mockProfilingConnection.GetWrittenDataSize();
     const auto metaDataPacketCount =
             mockProfilingConnection.CheckForPacket({PacketType::StreamMetaData, streamMetadataPacketsize});
 
-    BOOST_TEST(metaDataPacketCount >= 1);
-    BOOST_TEST(mockProfilingConnection.CheckForPacket({PacketType::CounterDirectory, 32}) == 1);
-    BOOST_TEST(mockProfilingConnection.CheckForPacket({PacketType::PeriodicCounterCapture, 28}) == 1);
+    CHECK(metaDataPacketCount >= 1);
+    CHECK(mockProfilingConnection.CheckForPacket({PacketType::CounterDirectory, 32}) == 1);
+    CHECK(mockProfilingConnection.CheckForPacket({PacketType::PeriodicCounterCapture, 28}) == 1);
     // Check that we only received the packets we expected
-    BOOST_TEST(metaDataPacketCount + 2 == writtenDataSize);
+    CHECK(metaDataPacketCount + 2 == writtenDataSize);
 }
 
-BOOST_AUTO_TEST_CASE(SendThreadSendStreamMetadataPacket1)
+TEST_CASE("SendThreadSendStreamMetadataPacket1")
 {
     ProfilingStateMachine profilingStateMachine;
 
@@ -2261,10 +2263,10 @@
     sendThread.Start(mockProfilingConnection);
 
     // The profiling state is set to "Uninitialized", so the send thread should throw an exception
-    BOOST_CHECK_THROW(sendThread.Stop(), armnn::RuntimeException);
+    CHECK_THROWS_AS(sendThread.Stop(), armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(SendThreadSendStreamMetadataPacket2)
+TEST_CASE("SendThreadSendStreamMetadataPacket2")
 {
     ProfilingStateMachine profilingStateMachine;
     SetNotConnectedProfilingState(profilingStateMachine);
@@ -2276,10 +2278,10 @@
     sendThread.Start(mockProfilingConnection);
 
     // The profiling state is set to "NotConnected", so the send thread should throw an exception
-    BOOST_CHECK_THROW(sendThread.Stop(), armnn::RuntimeException);
+    CHECK_THROWS_AS(sendThread.Stop(), armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(SendThreadSendStreamMetadataPacket3)
+TEST_CASE("SendThreadSendStreamMetadataPacket3")
 {
     ProfilingStateMachine profilingStateMachine;
     SetWaitingForAckProfilingState(profilingStateMachine);
@@ -2294,17 +2296,17 @@
 
     // The profiling state is set to "WaitingForAck", so the send thread should send a Stream Metadata packet
     // Wait for sendThread to join
-    BOOST_CHECK_NO_THROW(sendThread.Stop());
+    CHECK_NOTHROW(sendThread.Stop());
 
     // Check that the buffer contains at least one Stream Metadata packet and no other packets
     const auto writtenDataSize = mockProfilingConnection.GetWrittenDataSize();
 
-    BOOST_TEST(writtenDataSize >= 1u);
-    BOOST_TEST(mockProfilingConnection.CheckForPacket(
+    CHECK(writtenDataSize >= 1u);
+    CHECK(mockProfilingConnection.CheckForPacket(
                   {PacketType::StreamMetaData, streamMetadataPacketsize}) == writtenDataSize);
 }
 
-BOOST_AUTO_TEST_CASE(SendThreadSendStreamMetadataPacket4)
+TEST_CASE("SendThreadSendStreamMetadataPacket4")
 {
     ProfilingStateMachine profilingStateMachine;
     SetWaitingForAckProfilingState(profilingStateMachine);
@@ -2323,10 +2325,10 @@
 
     sendThread.Start(mockProfilingConnection);
     // Check that the profiling state is still "WaitingForAck"
-    BOOST_TEST((profilingStateMachine.GetCurrentState() == ProfilingState::WaitingForAck));
+    CHECK((profilingStateMachine.GetCurrentState() == ProfilingState::WaitingForAck));
 
     // Check that the buffer contains at least one Stream Metadata packet
-    BOOST_TEST(mockProfilingConnection.CheckForPacket({PacketType::StreamMetaData, streamMetadataPacketsize}) >= 1);
+    CHECK(mockProfilingConnection.CheckForPacket({PacketType::StreamMetaData, streamMetadataPacketsize}) >= 1);
 
     mockProfilingConnection.Clear();
 
@@ -2337,17 +2339,17 @@
     sendThread.SetReadyToRead();
 
     // Wait for sendThread to join
-    BOOST_CHECK_NO_THROW(sendThread.Stop());
+    CHECK_NOTHROW(sendThread.Stop());
 
     // Check that the profiling state is still "WaitingForAck"
-    BOOST_TEST((profilingStateMachine.GetCurrentState() == ProfilingState::WaitingForAck));
+    CHECK((profilingStateMachine.GetCurrentState() == ProfilingState::WaitingForAck));
 
     // Check that the buffer contains at least one Stream Metadata packet and no other packets
     const auto writtenDataSize = mockProfilingConnection.GetWrittenDataSize();
 
-    BOOST_TEST(writtenDataSize >= 1u);
-    BOOST_TEST(mockProfilingConnection.CheckForPacket(
+    CHECK(writtenDataSize >= 1u);
+    CHECK(mockProfilingConnection.CheckForPacket(
                   {PacketType::StreamMetaData, streamMetadataPacketsize}) == writtenDataSize);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/profiling/test/SendTimelinePacketTests.cpp b/src/profiling/test/SendTimelinePacketTests.cpp
index 244f23d..dd856d8 100644
--- a/src/profiling/test/SendTimelinePacketTests.cpp
+++ b/src/profiling/test/SendTimelinePacketTests.cpp
@@ -15,16 +15,16 @@
 
 #include <common/include/SwTrace.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 #include <functional>
 #include <Runtime.hpp>
 
 using namespace armnn::profiling;
 
-BOOST_AUTO_TEST_SUITE(SendTimelinePacketTests)
-
-BOOST_AUTO_TEST_CASE(SendTimelineMessageDirectoryPackageTest)
+TEST_SUITE("SendTimelinePacketTests")
+{
+TEST_CASE("SendTimelineMessageDirectoryPackageTest")
 {
     MockBufferManager mockBuffer(512);
     TimelinePacketWriterFactory timelinePacketWriterFactory(mockBuffer);
@@ -47,111 +47,111 @@
     uint32_t packetType   = (packetHeaderWord0 >> 16) & 0x00000007;
     uint32_t streamId     = (packetHeaderWord0 >>  0) & 0x00000007;
 
-    BOOST_CHECK(packetFamily == 1);
-    BOOST_CHECK(packetClass  == 0);
-    BOOST_CHECK(packetType   == 0);
-    BOOST_CHECK(streamId     == 0);
+    CHECK(packetFamily == 1);
+    CHECK(packetClass  == 0);
+    CHECK(packetType   == 0);
+    CHECK(streamId     == 0);
 
     offset += uint32_t_size;
     uint32_t packetHeaderWord1 = ReadUint32(packetBuffer, offset);
     uint32_t sequenceNumbered = (packetHeaderWord1 >> 24) & 0x00000001;
     uint32_t dataLength       = (packetHeaderWord1 >>  0) & 0x00FFFFFF;
-    BOOST_CHECK(sequenceNumbered ==  0);
-    BOOST_CHECK(dataLength       == 443);
+    CHECK(sequenceNumbered ==  0);
+    CHECK(dataLength       == 443);
 
     offset += uint32_t_size;
     uint8_t readStreamVersion = ReadUint8(packetBuffer, offset);
-    BOOST_CHECK(readStreamVersion == 4);
+    CHECK(readStreamVersion == 4);
     offset += uint8_t_size;
     uint8_t readPointerBytes = ReadUint8(packetBuffer, offset);
-    BOOST_CHECK(readPointerBytes == uint64_t_size);
+    CHECK(readPointerBytes == uint64_t_size);
     offset += uint8_t_size;
     uint8_t readThreadIdBytes = ReadUint8(packetBuffer, offset);
-    BOOST_CHECK(readThreadIdBytes == ThreadIdSize);
+    CHECK(readThreadIdBytes == ThreadIdSize);
 
     offset += uint8_t_size;
     uint32_t DeclCount = ReadUint32(packetBuffer, offset);
-    BOOST_CHECK(DeclCount == 5);
+    CHECK(DeclCount == 5);
 
     offset += uint32_t_size;
     arm::pipe::SwTraceMessage swTraceMessage = arm::pipe::ReadSwTraceMessage(packetBuffer->GetReadableData(),
                                                                              offset,
                                                                              packetBuffer->GetSize());
 
-    BOOST_CHECK(swTraceMessage.m_Id == 0);
-    BOOST_CHECK(swTraceMessage.m_Name == "declareLabel");
-    BOOST_CHECK(swTraceMessage.m_UiName == "declare label");
-    BOOST_CHECK(swTraceMessage.m_ArgTypes.size() == 2);
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[0] == 'p');
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[1] == 's');
-    BOOST_CHECK(swTraceMessage.m_ArgNames.size() == 2);
-    BOOST_CHECK(swTraceMessage.m_ArgNames[0] == "guid");
-    BOOST_CHECK(swTraceMessage.m_ArgNames[1] == "value");
+    CHECK(swTraceMessage.m_Id == 0);
+    CHECK(swTraceMessage.m_Name == "declareLabel");
+    CHECK(swTraceMessage.m_UiName == "declare label");
+    CHECK(swTraceMessage.m_ArgTypes.size() == 2);
+    CHECK(swTraceMessage.m_ArgTypes[0] == 'p');
+    CHECK(swTraceMessage.m_ArgTypes[1] == 's');
+    CHECK(swTraceMessage.m_ArgNames.size() == 2);
+    CHECK(swTraceMessage.m_ArgNames[0] == "guid");
+    CHECK(swTraceMessage.m_ArgNames[1] == "value");
 
     swTraceMessage = arm::pipe::ReadSwTraceMessage(packetBuffer->GetReadableData(),
                                                    offset,
                                                    packetBuffer->GetSize());
 
-    BOOST_CHECK(swTraceMessage.m_Id == 1);
-    BOOST_CHECK(swTraceMessage.m_Name == "declareEntity");
-    BOOST_CHECK(swTraceMessage.m_UiName == "declare entity");
-    BOOST_CHECK(swTraceMessage.m_ArgTypes.size() == 1);
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[0] == 'p');
-    BOOST_CHECK(swTraceMessage.m_ArgNames.size() == 1);
-    BOOST_CHECK(swTraceMessage.m_ArgNames[0] == "guid");
+    CHECK(swTraceMessage.m_Id == 1);
+    CHECK(swTraceMessage.m_Name == "declareEntity");
+    CHECK(swTraceMessage.m_UiName == "declare entity");
+    CHECK(swTraceMessage.m_ArgTypes.size() == 1);
+    CHECK(swTraceMessage.m_ArgTypes[0] == 'p');
+    CHECK(swTraceMessage.m_ArgNames.size() == 1);
+    CHECK(swTraceMessage.m_ArgNames[0] == "guid");
 
     swTraceMessage = arm::pipe::ReadSwTraceMessage(packetBuffer->GetReadableData(),
                                                    offset,
                                                    packetBuffer->GetSize());
 
-    BOOST_CHECK(swTraceMessage.m_Id == 2);
-    BOOST_CHECK(swTraceMessage.m_Name == "declareEventClass");
-    BOOST_CHECK(swTraceMessage.m_UiName == "declare event class");
-    BOOST_CHECK(swTraceMessage.m_ArgTypes.size() == 2);
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[0] == 'p');
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[1] == 'p');
-    BOOST_CHECK(swTraceMessage.m_ArgNames.size() == 2);
-    BOOST_CHECK(swTraceMessage.m_ArgNames[0] == "guid");
-    BOOST_CHECK(swTraceMessage.m_ArgNames[1] == "nameGuid");
+    CHECK(swTraceMessage.m_Id == 2);
+    CHECK(swTraceMessage.m_Name == "declareEventClass");
+    CHECK(swTraceMessage.m_UiName == "declare event class");
+    CHECK(swTraceMessage.m_ArgTypes.size() == 2);
+    CHECK(swTraceMessage.m_ArgTypes[0] == 'p');
+    CHECK(swTraceMessage.m_ArgTypes[1] == 'p');
+    CHECK(swTraceMessage.m_ArgNames.size() == 2);
+    CHECK(swTraceMessage.m_ArgNames[0] == "guid");
+    CHECK(swTraceMessage.m_ArgNames[1] == "nameGuid");
 
     swTraceMessage = arm::pipe::ReadSwTraceMessage(packetBuffer->GetReadableData(),
                                                    offset,
                                                    packetBuffer->GetSize());
 
-    BOOST_CHECK(swTraceMessage.m_Id == 3);
-    BOOST_CHECK(swTraceMessage.m_Name == "declareRelationship");
-    BOOST_CHECK(swTraceMessage.m_UiName == "declare relationship");
-    BOOST_CHECK(swTraceMessage.m_ArgTypes.size() == 5);
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[0] == 'I');
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[1] == 'p');
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[2] == 'p');
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[3] == 'p');
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[4] == 'p');
-    BOOST_CHECK(swTraceMessage.m_ArgNames.size() == 5);
-    BOOST_CHECK(swTraceMessage.m_ArgNames[0] == "relationshipType");
-    BOOST_CHECK(swTraceMessage.m_ArgNames[1] == "relationshipGuid");
-    BOOST_CHECK(swTraceMessage.m_ArgNames[2] == "headGuid");
-    BOOST_CHECK(swTraceMessage.m_ArgNames[3] == "tailGuid");
-    BOOST_CHECK(swTraceMessage.m_ArgNames[4] == "attributeGuid");
+    CHECK(swTraceMessage.m_Id == 3);
+    CHECK(swTraceMessage.m_Name == "declareRelationship");
+    CHECK(swTraceMessage.m_UiName == "declare relationship");
+    CHECK(swTraceMessage.m_ArgTypes.size() == 5);
+    CHECK(swTraceMessage.m_ArgTypes[0] == 'I');
+    CHECK(swTraceMessage.m_ArgTypes[1] == 'p');
+    CHECK(swTraceMessage.m_ArgTypes[2] == 'p');
+    CHECK(swTraceMessage.m_ArgTypes[3] == 'p');
+    CHECK(swTraceMessage.m_ArgTypes[4] == 'p');
+    CHECK(swTraceMessage.m_ArgNames.size() == 5);
+    CHECK(swTraceMessage.m_ArgNames[0] == "relationshipType");
+    CHECK(swTraceMessage.m_ArgNames[1] == "relationshipGuid");
+    CHECK(swTraceMessage.m_ArgNames[2] == "headGuid");
+    CHECK(swTraceMessage.m_ArgNames[3] == "tailGuid");
+    CHECK(swTraceMessage.m_ArgNames[4] == "attributeGuid");
 
     swTraceMessage = arm::pipe::ReadSwTraceMessage(packetBuffer->GetReadableData(),
                                                    offset,
                                                    packetBuffer->GetSize());
 
-    BOOST_CHECK(swTraceMessage.m_Id == 4);
-    BOOST_CHECK(swTraceMessage.m_Name == "declareEvent");
-    BOOST_CHECK(swTraceMessage.m_UiName == "declare event");
-    BOOST_CHECK(swTraceMessage.m_ArgTypes.size() == 3);
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[0] == '@');
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[1] == 't');
-    BOOST_CHECK(swTraceMessage.m_ArgTypes[2] == 'p');
-    BOOST_CHECK(swTraceMessage.m_ArgNames.size() == 3);
-    BOOST_CHECK(swTraceMessage.m_ArgNames[0] == "timestamp");
-    BOOST_CHECK(swTraceMessage.m_ArgNames[1] == "threadId");
-    BOOST_CHECK(swTraceMessage.m_ArgNames[2] == "eventGuid");
+    CHECK(swTraceMessage.m_Id == 4);
+    CHECK(swTraceMessage.m_Name == "declareEvent");
+    CHECK(swTraceMessage.m_UiName == "declare event");
+    CHECK(swTraceMessage.m_ArgTypes.size() == 3);
+    CHECK(swTraceMessage.m_ArgTypes[0] == '@');
+    CHECK(swTraceMessage.m_ArgTypes[1] == 't');
+    CHECK(swTraceMessage.m_ArgTypes[2] == 'p');
+    CHECK(swTraceMessage.m_ArgNames.size() == 3);
+    CHECK(swTraceMessage.m_ArgNames[0] == "timestamp");
+    CHECK(swTraceMessage.m_ArgNames[1] == "threadId");
+    CHECK(swTraceMessage.m_ArgNames[2] == "eventGuid");
 }
 
-BOOST_AUTO_TEST_CASE(SendTimelineEntityWithEventClassPacketTest)
+TEST_CASE("SendTimelineEntityWithEventClassPacketTest")
 {
     MockBufferManager bufferManager(40);
     TimelinePacketWriterFactory timelinePacketWriterFactory(bufferManager);
@@ -184,10 +184,10 @@
     uint32_t entityBinaryPacketType         = (entityBinaryPacketHeaderWord0 >> 16) & 0x00000007;
     uint32_t entityBinaryPacketStreamId     = (entityBinaryPacketHeaderWord0 >>  0) & 0x00000007;
 
-    BOOST_CHECK(entityBinaryPacketFamily       == 1);
-    BOOST_CHECK(entityBinaryPacketClass        == 0);
-    BOOST_CHECK(entityBinaryPacketType         == 1);
-    BOOST_CHECK(entityBinaryPacketStreamId     == 0);
+    CHECK(entityBinaryPacketFamily       == 1);
+    CHECK(entityBinaryPacketClass        == 0);
+    CHECK(entityBinaryPacketType         == 1);
+    CHECK(entityBinaryPacketStreamId     == 0);
 
     offset += uint32_t_size;
 
@@ -196,40 +196,40 @@
     uint32_t entityBinaryPacketSequenceNumbered = (entityBinaryPacketHeaderWord1 >> 24) & 0x00000001;
     uint32_t entityBinaryPacketDataLength       = (entityBinaryPacketHeaderWord1 >>  0) & 0x00FFFFFF;
 
-    BOOST_CHECK(entityBinaryPacketSequenceNumbered == 0);
-    BOOST_CHECK(entityBinaryPacketDataLength       == 32);
+    CHECK(entityBinaryPacketSequenceNumbered == 0);
+    CHECK(entityBinaryPacketDataLength       == 32);
 
     // Check the decl_id
     offset += uint32_t_size;
     uint32_t entitytDecId = ReadUint32(packetBuffer, offset);
 
-    BOOST_CHECK(entitytDecId == uint32_t(1));
+    CHECK(entitytDecId == uint32_t(1));
 
     // Check the profiling GUID
     offset += uint32_t_size;
     uint64_t readProfilingGuid = ReadUint64(packetBuffer, offset);
 
-    BOOST_CHECK(readProfilingGuid == entityBinaryPacketProfilingGuid);
+    CHECK(readProfilingGuid == entityBinaryPacketProfilingGuid);
 
     // Reading TimelineEventClassBinaryPacket
     offset += uint64_t_size;
 
     uint32_t eventClassDeclId = ReadUint32(packetBuffer, offset);
-    BOOST_CHECK(eventClassDeclId == uint32_t(2));
+    CHECK(eventClassDeclId == uint32_t(2));
 
     // Check the profiling GUID
     offset += uint32_t_size;
     readProfilingGuid = ReadUint64(packetBuffer, offset);
-    BOOST_CHECK(readProfilingGuid == eventClassBinaryPacketProfilingGuid);
+    CHECK(readProfilingGuid == eventClassBinaryPacketProfilingGuid);
 
     offset += uint64_t_size;
     uint64_t readEventClassNameGuid = ReadUint64(packetBuffer, offset);
-    BOOST_CHECK(readEventClassNameGuid == eventClassBinaryPacketNameGuid);
+    CHECK(readEventClassNameGuid == eventClassBinaryPacketNameGuid);
 
     bufferManager.MarkRead(packetBuffer);
 }
 
-BOOST_AUTO_TEST_CASE(SendEventClassAfterTimelineEntityPacketTest)
+TEST_CASE("SendEventClassAfterTimelineEntityPacketTest")
 {
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int uint64_t_size = sizeof(uint64_t);
@@ -258,29 +258,29 @@
     uint32_t entityBinaryPacketType   = (entityBinaryPacketHeaderWord0 >> 16) & 0x00000007;
     uint32_t entityBinaryPacketStreamId     = (entityBinaryPacketHeaderWord0 >>  0) & 0x00000007;
 
-    BOOST_CHECK(entityBinaryPacketFamily == 1);
-    BOOST_CHECK(entityBinaryPacketClass  == 0);
-    BOOST_CHECK(entityBinaryPacketType   == 1);
-    BOOST_CHECK(entityBinaryPacketStreamId     == 0);
+    CHECK(entityBinaryPacketFamily == 1);
+    CHECK(entityBinaryPacketClass  == 0);
+    CHECK(entityBinaryPacketType   == 1);
+    CHECK(entityBinaryPacketStreamId     == 0);
 
     offset += uint32_t_size;
     uint32_t entityBinaryPacketHeaderWord1 = ReadUint32(packetBuffer, offset);
     uint32_t entityBinaryPacketSequenceNumbered = (entityBinaryPacketHeaderWord1 >> 24) & 0x00000001;
     uint32_t entityBinaryPacketDataLength       = (entityBinaryPacketHeaderWord1 >>  0) & 0x00FFFFFF;
-    BOOST_CHECK(entityBinaryPacketSequenceNumbered == 0);
-    BOOST_CHECK(entityBinaryPacketDataLength       == 12);
+    CHECK(entityBinaryPacketSequenceNumbered == 0);
+    CHECK(entityBinaryPacketDataLength       == 12);
 
     // Check the decl_id
     offset += uint32_t_size;
     uint32_t entitytDecId = ReadUint32(packetBuffer, offset);
 
-    BOOST_CHECK(entitytDecId == uint32_t(1));
+    CHECK(entitytDecId == uint32_t(1));
 
     // Check the profiling GUID
     offset += uint32_t_size;
     uint64_t readProfilingGuid = ReadUint64(packetBuffer, offset);
 
-    BOOST_CHECK(readProfilingGuid == entityBinaryPacketProfilingGuid);
+    CHECK(readProfilingGuid == entityBinaryPacketProfilingGuid);
 
     bufferManager.MarkRead(packetBuffer);
 
@@ -306,30 +306,30 @@
     uint32_t eventClassBinaryPacketType   = (eventClassBinaryPacketHeaderWord0 >> 16) & 0x00000007;
     uint32_t eventClassBinaryPacketStreamId     = (eventClassBinaryPacketHeaderWord0 >>  0) & 0x00000007;
 
-    BOOST_CHECK(eventClassBinaryPacketFamily == 1);
-    BOOST_CHECK(eventClassBinaryPacketClass  == 0);
-    BOOST_CHECK(eventClassBinaryPacketType   == 1);
-    BOOST_CHECK(eventClassBinaryPacketStreamId     == 0);
+    CHECK(eventClassBinaryPacketFamily == 1);
+    CHECK(eventClassBinaryPacketClass  == 0);
+    CHECK(eventClassBinaryPacketType   == 1);
+    CHECK(eventClassBinaryPacketStreamId     == 0);
 
     offset += uint32_t_size;
     uint32_t eventClassBinaryPacketHeaderWord1 = ReadUint32(packetBuffer, offset);
     uint32_t eventClassBinaryPacketSequenceNumbered = (eventClassBinaryPacketHeaderWord1 >> 24) & 0x00000001;
     uint32_t eventClassBinaryPacketDataLength       = (eventClassBinaryPacketHeaderWord1 >>  0) & 0x00FFFFFF;
-    BOOST_CHECK(eventClassBinaryPacketSequenceNumbered == 0);
-    BOOST_CHECK(eventClassBinaryPacketDataLength       == 20);
+    CHECK(eventClassBinaryPacketSequenceNumbered == 0);
+    CHECK(eventClassBinaryPacketDataLength       == 20);
 
     offset += uint32_t_size;
     uint32_t eventClassDeclId = ReadUint32(packetBuffer, offset);
-    BOOST_CHECK(eventClassDeclId == uint32_t(2));
+    CHECK(eventClassDeclId == uint32_t(2));
 
     // Check the profiling GUID
     offset += uint32_t_size;
     readProfilingGuid = ReadUint64(packetBuffer, offset);
-    BOOST_CHECK(readProfilingGuid == eventClassBinaryPacketProfilingGuid);
+    CHECK(readProfilingGuid == eventClassBinaryPacketProfilingGuid);
 
     offset += uint64_t_size;
     uint64_t readEventClassNameGuid = ReadUint64(packetBuffer, offset);
-    BOOST_CHECK(readEventClassNameGuid == eventClassBinaryPacketNameGuid);
+    CHECK(readEventClassNameGuid == eventClassBinaryPacketNameGuid);
 
     bufferManager.MarkRead(packetBuffer);
 
@@ -355,51 +355,51 @@
     uint32_t eventBinaryPacketType   = (eventBinaryPacketHeaderWord0 >> 16) & 0x00000007;
     uint32_t eventBinaryPacketStreamId     = (eventBinaryPacketHeaderWord0 >>  0) & 0x00000007;
 
-    BOOST_CHECK(eventBinaryPacketFamily == 1);
-    BOOST_CHECK(eventBinaryPacketClass  == 0);
-    BOOST_CHECK(eventBinaryPacketType   == 1);
-    BOOST_CHECK(eventBinaryPacketStreamId     == 0);
+    CHECK(eventBinaryPacketFamily == 1);
+    CHECK(eventBinaryPacketClass  == 0);
+    CHECK(eventBinaryPacketType   == 1);
+    CHECK(eventBinaryPacketStreamId     == 0);
 
     offset += uint32_t_size;
     uint32_t eventBinaryPacketHeaderWord1 = ReadUint32(packetBuffer, offset);
     uint32_t eventBinaryPacketSequenceNumbered = (eventBinaryPacketHeaderWord1 >> 24) & 0x00000001;
     uint32_t eventBinaryPacketDataLength       = (eventBinaryPacketHeaderWord1 >>  0) & 0x00FFFFFF;
-    BOOST_CHECK(eventBinaryPacketSequenceNumbered == 0);
-    BOOST_CHECK(eventBinaryPacketDataLength == 20 + ThreadIdSize);
+    CHECK(eventBinaryPacketSequenceNumbered == 0);
+    CHECK(eventBinaryPacketDataLength == 20 + ThreadIdSize);
 
     // Check the decl_id
     offset += uint32_t_size;
     uint32_t eventDeclId = ReadUint32(packetBuffer, offset);
-    BOOST_CHECK(eventDeclId == 4);
+    CHECK(eventDeclId == 4);
 
     // Check the timestamp
     offset += uint32_t_size;
     uint64_t eventTimestamp = ReadUint64(packetBuffer, offset);
-    BOOST_CHECK(eventTimestamp == timestamp);
+    CHECK(eventTimestamp == timestamp);
 
     // Check the thread id
     offset += uint64_t_size;
     std::vector<uint8_t> readThreadId(ThreadIdSize, 0);
     ReadBytes(packetBuffer, offset, ThreadIdSize, readThreadId.data());
-    BOOST_CHECK(readThreadId == threadId);
+    CHECK(readThreadId == threadId);
 
     // Check the profiling GUID
     offset += ThreadIdSize;
     readProfilingGuid = ReadUint64(packetBuffer, offset);
-    BOOST_CHECK(readProfilingGuid == eventProfilingGuid);
+    CHECK(readProfilingGuid == eventProfilingGuid);
 }
 
-BOOST_AUTO_TEST_CASE(SendTimelinePacketTests2)
+TEST_CASE("SendTimelinePacketTests2")
 {
     MockBufferManager bufferManager(40);
     TimelinePacketWriterFactory timelinePacketWriterFactory(bufferManager);
     std::unique_ptr<ISendTimelinePacket> sendTimelinePacket = timelinePacketWriterFactory.GetSendTimelinePacket();
 
-    BOOST_CHECK_THROW(sendTimelinePacket->SendTimelineMessageDirectoryPackage(),
+    CHECK_THROWS_AS(sendTimelinePacket->SendTimelineMessageDirectoryPackage(),
                       armnn::RuntimeException);
 }
 
-BOOST_AUTO_TEST_CASE(SendTimelinePacketTests3)
+TEST_CASE("SendTimelinePacketTests3")
 {
     MockBufferManager bufferManager(512);
     TimelinePacketWriterFactory timelinePacketWriterFactory(bufferManager);
@@ -418,12 +418,12 @@
     // Send TimelineEventClassBinaryPacket
     const uint64_t eventClassBinaryPacketProfilingGuid = 789123u;
     const uint64_t eventClassBinaryPacketNameGuid = 8845u;
-    BOOST_CHECK_THROW(sendTimelinePacket->SendTimelineEventClassBinaryPacket(
+    CHECK_THROWS_AS(sendTimelinePacket->SendTimelineEventClassBinaryPacket(
                       eventClassBinaryPacketProfilingGuid, eventClassBinaryPacketNameGuid),
                       armnn::profiling::BufferExhaustion);
 }
 
-BOOST_AUTO_TEST_CASE(GetGuidsFromProfilingService)
+TEST_CASE("GetGuidsFromProfilingService")
 {
     armnn::IRuntime::CreationOptions options;
     options.m_ProfilingOptions.m_EnableProfiling = true;
@@ -435,16 +435,16 @@
     std::hash<std::string> hasher;
     uint64_t hash = static_cast<uint64_t>(hasher("dummy"));
     ProfilingStaticGuid expectedStaticValue(hash | MIN_STATIC_GUID);
-    BOOST_CHECK(staticGuid == expectedStaticValue);
+    CHECK(staticGuid == expectedStaticValue);
     ProfilingDynamicGuid dynamicGuid = profilingService.GetNextGuid();
     uint64_t dynamicGuidValue = static_cast<uint64_t>(dynamicGuid);
     ++dynamicGuidValue;
     ProfilingDynamicGuid expectedDynamicValue(dynamicGuidValue);
     dynamicGuid = profilingService.GetNextGuid();
-    BOOST_CHECK(dynamicGuid == expectedDynamicValue);
+    CHECK(dynamicGuid == expectedDynamicValue);
 }
 
-BOOST_AUTO_TEST_CASE(GetTimelinePackerWriterFromProfilingService)
+TEST_CASE("GetTimelinePackerWriterFromProfilingService")
 {
     armnn::IRuntime::CreationOptions::ExternalProfilingOptions options;
     options.m_EnableProfiling = true;
@@ -452,36 +452,36 @@
     profilingService.ResetExternalProfilingOptions(options, true);
 
     std::unique_ptr<ISendTimelinePacket> writer = profilingService.GetSendTimelinePacket();
-    BOOST_CHECK(writer != nullptr);
+    CHECK(writer != nullptr);
 }
 
-BOOST_AUTO_TEST_CASE(CheckStaticGuidsAndEvents)
+TEST_CASE("CheckStaticGuidsAndEvents")
 {
-    BOOST_CHECK("name" == LabelsAndEventClasses::NAME_LABEL);
-    BOOST_CHECK("type" == LabelsAndEventClasses::TYPE_LABEL);
-    BOOST_CHECK("index" == LabelsAndEventClasses::INDEX_LABEL);
+    CHECK("name" == LabelsAndEventClasses::NAME_LABEL);
+    CHECK("type" == LabelsAndEventClasses::TYPE_LABEL);
+    CHECK("index" == LabelsAndEventClasses::INDEX_LABEL);
 
     std::hash<std::string> hasher;
 
     uint64_t hash = static_cast<uint64_t>(hasher(LabelsAndEventClasses::NAME_LABEL));
     ProfilingStaticGuid expectedNameGuid(hash | MIN_STATIC_GUID);
-    BOOST_CHECK(LabelsAndEventClasses::NAME_GUID == expectedNameGuid);
+    CHECK(LabelsAndEventClasses::NAME_GUID == expectedNameGuid);
 
     hash = static_cast<uint64_t>(hasher(LabelsAndEventClasses::TYPE_LABEL));
     ProfilingStaticGuid expectedTypeGuid(hash | MIN_STATIC_GUID);
-    BOOST_CHECK(LabelsAndEventClasses::TYPE_GUID == expectedTypeGuid);
+    CHECK(LabelsAndEventClasses::TYPE_GUID == expectedTypeGuid);
 
     hash = static_cast<uint64_t>(hasher(LabelsAndEventClasses::INDEX_LABEL));
     ProfilingStaticGuid expectedIndexGuid(hash | MIN_STATIC_GUID);
-    BOOST_CHECK(LabelsAndEventClasses::INDEX_GUID == expectedIndexGuid);
+    CHECK(LabelsAndEventClasses::INDEX_GUID == expectedIndexGuid);
 
     hash = static_cast<uint64_t>(hasher("ARMNN_PROFILING_SOL"));
     ProfilingStaticGuid expectedSol(hash | MIN_STATIC_GUID);
-    BOOST_CHECK(LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS == expectedSol);
+    CHECK(LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS == expectedSol);
 
     hash = static_cast<uint64_t>(hasher("ARMNN_PROFILING_EOL"));
     ProfilingStaticGuid expectedEol(hash | MIN_STATIC_GUID);
-    BOOST_CHECK(LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS == expectedEol);
+    CHECK(LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS == expectedEol);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/profiling/test/TimelinePacketTests.cpp b/src/profiling/test/TimelinePacketTests.cpp
index 811918a..37a6f45 100644
--- a/src/profiling/test/TimelinePacketTests.cpp
+++ b/src/profiling/test/TimelinePacketTests.cpp
@@ -10,13 +10,13 @@
 
 #include <common/include/SwTrace.hpp>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn::profiling;
 
-BOOST_AUTO_TEST_SUITE(TimelinePacketTests)
-
-BOOST_AUTO_TEST_CASE(TimelineLabelPacketTestNoBuffer)
+TEST_SUITE("TimelinePacketTests")
+{
+TEST_CASE("TimelineLabelPacketTestNoBuffer")
 {
     const uint64_t profilingGuid = 123456u;
     const std::string label = "some label";
@@ -26,11 +26,11 @@
                                                                  nullptr,
                                                                  512u,
                                                                  numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineLabelPacketTestBufferExhaustionZeroValue)
+TEST_CASE("TimelineLabelPacketTestBufferExhaustionZeroValue")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -42,11 +42,11 @@
                                                                  buffer.data(),
                                                                  0,
                                                                  numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineLabelPacketTestBufferExhaustionFixedValue)
+TEST_CASE("TimelineLabelPacketTestBufferExhaustionFixedValue")
 {
     std::vector<unsigned char> buffer(10, 0);
 
@@ -58,11 +58,11 @@
                                                                  buffer.data(),
                                                                  armnn::numeric_cast<unsigned int>(buffer.size()),
                                                                  numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineLabelPacketTestInvalidLabel)
+TEST_CASE("TimelineLabelPacketTestInvalidLabel")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -74,11 +74,11 @@
                                                                  buffer.data(),
                                                                  armnn::numeric_cast<unsigned int>(buffer.size()),
                                                                  numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Error);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::Error);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineLabelPacketTestSingleConstructionOfData)
+TEST_CASE("TimelineLabelPacketTestSingleConstructionOfData")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -90,8 +90,8 @@
                                                                  buffer.data(),
                                                                  armnn::numeric_cast<unsigned int>(buffer.size()),
                                                                  numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Ok);
-    BOOST_CHECK(numberOfBytesWritten == 28);
+    CHECK(result == TimelinePacketStatus::Ok);
+    CHECK(numberOfBytesWritten == 28);
 
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int uint64_t_size = sizeof(uint64_t);
@@ -99,28 +99,28 @@
     // Check the packet header
     unsigned int offset = 0;
     uint32_t decl_Id = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(decl_Id == uint32_t(0));
+    CHECK(decl_Id == uint32_t(0));
 
     // Check the profiling GUID
     offset += uint32_t_size;
     uint64_t readProfilingGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readProfilingGuid == profilingGuid);
+    CHECK(readProfilingGuid == profilingGuid);
 
     // Check the SWTrace label
     offset += uint64_t_size;
     uint32_t swTraceLabelLength = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(swTraceLabelLength == 11); // Label length including the null-terminator
+    CHECK(swTraceLabelLength == 11); // Label length including the null-terminator
 
     offset += uint32_t_size;
-    BOOST_CHECK(std::memcmp(buffer.data() + offset,        // Offset to the label in the buffer
+    CHECK(std::memcmp(buffer.data() + offset,        // Offset to the label in the buffer
                             label.data(),                  // The original label
                             swTraceLabelLength - 1) == 0); // The length of the label
 
     offset += swTraceLabelLength * uint32_t_size;
-    BOOST_CHECK(buffer[offset] == '\0'); // The null-terminator at the end of the SWTrace label
+    CHECK(buffer[offset] == '\0'); // The null-terminator at the end of the SWTrace label
 }
 
-BOOST_AUTO_TEST_CASE(TimelineRelationshipPacketNullBufferTest)
+TEST_CASE("TimelineRelationshipPacketNullBufferTest")
 {
     ProfilingRelationshipType relationshipType = ProfilingRelationshipType::DataLink;
     const uint64_t relationshipGuid = 123456u;
@@ -136,11 +136,11 @@
                                                                   nullptr,
                                                                   512u,
                                                                   numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineRelationshipPacketZeroBufferSizeTest)
+TEST_CASE("TimelineRelationshipPacketZeroBufferSizeTest")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -158,11 +158,11 @@
                                                                   buffer.data(),
                                                                   0,
                                                                   numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineRelationshipPacketSmallBufferSizeTest)
+TEST_CASE("TimelineRelationshipPacketSmallBufferSizeTest")
 {
     std::vector<unsigned char> buffer(10, 0);
 
@@ -181,11 +181,11 @@
                                                              buffer.data(),
                                                              armnn::numeric_cast<unsigned int>(buffer.size()),
                                                              numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineRelationshipPacketInvalidRelationTest)
+TEST_CASE("TimelineRelationshipPacketInvalidRelationTest")
 {
     std::vector<unsigned char> buffer(512, 0);
     ProfilingRelationshipType relationshipType = static_cast<ProfilingRelationshipType>(5);
@@ -196,7 +196,7 @@
 
     unsigned int numberOfBytesWritten = 789u;
 
-    BOOST_CHECK_THROW(WriteTimelineRelationshipBinary(relationshipType,
+    CHECK_THROWS_AS(WriteTimelineRelationshipBinary(relationshipType,
                                                       relationshipGuid,
                                                       headGuid,
                                                       tailGuid,
@@ -206,10 +206,10 @@
                                                       numberOfBytesWritten),
                       armnn::InvalidArgumentException);
 
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineRelationshipPacketTestDataConstruction)
+TEST_CASE("TimelineRelationshipPacketTestDataConstruction")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -228,8 +228,8 @@
                                                              buffer.data(),
                                                              armnn::numeric_cast<unsigned int>(buffer.size()),
                                                              numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Ok);
-    BOOST_CHECK(numberOfBytesWritten == 40);
+    CHECK(result == TimelinePacketStatus::Ok);
+    CHECK(numberOfBytesWritten == 40);
 
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int uint64_t_size = sizeof(uint64_t);
@@ -238,35 +238,35 @@
     unsigned int offset = 0;
     // Check the decl_id
     uint32_t readDeclId = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readDeclId == 3);
+    CHECK(readDeclId == 3);
 
     // Check the relationship type
     offset += uint32_t_size;
     uint32_t readRelationshipType = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readRelationshipType == 0);
+    CHECK(readRelationshipType == 0);
 
     // Check the relationship GUID
     offset += uint32_t_size;
     uint64_t readRelationshipGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readRelationshipGuid == relationshipGuid);
+    CHECK(readRelationshipGuid == relationshipGuid);
 
     // Check the head GUID
     offset += uint64_t_size;
     uint64_t readHeadGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readHeadGuid == headGuid);
+    CHECK(readHeadGuid == headGuid);
 
     // Check the tail GUID
     offset += uint64_t_size;
     uint64_t readTailGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readTailGuid == tailGuid);
+    CHECK(readTailGuid == tailGuid);
 
     // Check the attribute GUID
     offset += uint64_t_size;
     uint64_t readAttributeGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readAttributeGuid == attributeGuid);
+    CHECK(readAttributeGuid == attributeGuid);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineRelationshipPacketExecutionLinkTestDataConstruction)
+TEST_CASE("TimelineRelationshipPacketExecutionLinkTestDataConstruction")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -285,44 +285,44 @@
                                                              buffer.data(),
                                                              armnn::numeric_cast<unsigned int>(buffer.size()),
                                                              numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Ok);
-    BOOST_CHECK(numberOfBytesWritten == 40);
+    CHECK(result == TimelinePacketStatus::Ok);
+    CHECK(numberOfBytesWritten == 40);
 
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int uint64_t_size = sizeof(uint64_t);
 
     unsigned int offset = 0;
     uint32_t readDeclId = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readDeclId == 3);
+    CHECK(readDeclId == 3);
 
     // Check the relationship type
     offset += uint32_t_size;
     uint32_t readRelationshipType = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readRelationshipType == 1);
+    CHECK(readRelationshipType == 1);
 
     // Check the relationship GUID
     offset += uint32_t_size;
     uint64_t readRelationshipGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readRelationshipGuid == relationshipGuid);
+    CHECK(readRelationshipGuid == relationshipGuid);
 
     // Check the head GUID
     offset += uint64_t_size;
     uint64_t readHeadGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readHeadGuid == headGuid);
+    CHECK(readHeadGuid == headGuid);
 
     // Check the tail GUID
     offset += uint64_t_size;
     uint64_t readTailGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readTailGuid == tailGuid);
+    CHECK(readTailGuid == tailGuid);
 
     // Check the attribute GUID
     offset += uint64_t_size;
     uint64_t readAttributeGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readAttributeGuid == attributeGuid);
+    CHECK(readAttributeGuid == attributeGuid);
 }
 
 
-BOOST_AUTO_TEST_CASE(TimelineRelationshipPacketDataLinkTestDataConstruction)
+TEST_CASE("TimelineRelationshipPacketDataLinkTestDataConstruction")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -341,44 +341,44 @@
                                                              buffer.data(),
                                                              armnn::numeric_cast<unsigned int>(buffer.size()),
                                                              numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Ok);
-    BOOST_CHECK(numberOfBytesWritten == 40);
+    CHECK(result == TimelinePacketStatus::Ok);
+    CHECK(numberOfBytesWritten == 40);
 
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int uint64_t_size = sizeof(uint64_t);
 
     unsigned int offset = 0;
     uint32_t readDeclId = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readDeclId == 3);
+    CHECK(readDeclId == 3);
 
     // Check the relationship type
     offset += uint32_t_size;
     uint32_t readRelationshipType = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readRelationshipType == 2);
+    CHECK(readRelationshipType == 2);
 
     // Check the relationship GUID
     offset += uint32_t_size;
     uint64_t readRelationshipGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readRelationshipGuid == relationshipGuid);
+    CHECK(readRelationshipGuid == relationshipGuid);
 
     // Check the head GUID
     offset += uint64_t_size;
     uint64_t readHeadGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readHeadGuid == headGuid);
+    CHECK(readHeadGuid == headGuid);
 
     // Check the tail GUID
     offset += uint64_t_size;
     uint64_t readTailGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readTailGuid == tailGuid);
+    CHECK(readTailGuid == tailGuid);
 
     // Check the attribute GUID
     offset += uint64_t_size;
     uint64_t readAttributeGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readAttributeGuid == attributeGuid);
+    CHECK(readAttributeGuid == attributeGuid);
 }
 
 
-BOOST_AUTO_TEST_CASE(TimelineRelationshipPacketLabelLinkTestDataConstruction)
+TEST_CASE("TimelineRelationshipPacketLabelLinkTestDataConstruction")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -397,8 +397,8 @@
                                                              buffer.data(),
                                                              armnn::numeric_cast<unsigned int>(buffer.size()),
                                                              numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Ok);
-    BOOST_CHECK(numberOfBytesWritten == 40);
+    CHECK(result == TimelinePacketStatus::Ok);
+    CHECK(numberOfBytesWritten == 40);
 
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int uint64_t_size = sizeof(uint64_t);
@@ -406,45 +406,45 @@
     // Check the packet header
     unsigned int offset = 0;
     uint32_t readDeclId = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readDeclId == 3);
+    CHECK(readDeclId == 3);
 
     // Check the relationship type
     offset += uint32_t_size;
     uint32_t readRelationshipType = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readRelationshipType == 3);
+    CHECK(readRelationshipType == 3);
 
     // Check the relationship GUID
     offset += uint32_t_size;
     uint64_t readRelationshipGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readRelationshipGuid == relationshipGuid);
+    CHECK(readRelationshipGuid == relationshipGuid);
 
     // Check the head GUID
     offset += uint64_t_size;
     uint64_t readHeadGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readHeadGuid == headGuid);
+    CHECK(readHeadGuid == headGuid);
 
     // Check the tail GUID
     offset += uint64_t_size;
     uint64_t readTailGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readTailGuid == tailGuid);
+    CHECK(readTailGuid == tailGuid);
 
     // Check the attribute GUID
     offset += uint64_t_size;
     uint64_t readAttributeGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readAttributeGuid == attributeGuid);
+    CHECK(readAttributeGuid == attributeGuid);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineMessageDirectoryPacketTestNoBuffer)
+TEST_CASE("TimelineMessageDirectoryPacketTestNoBuffer")
 {
     unsigned int numberOfBytesWritten = 789u;
     TimelinePacketStatus result = WriteTimelineMessageDirectoryPackage(nullptr,
                                                                        512u,
                                                                        numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineMessageDirectoryPacketTestBufferExhausted)
+TEST_CASE("TimelineMessageDirectoryPacketTestBufferExhausted")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -452,20 +452,20 @@
     TimelinePacketStatus result = WriteTimelineMessageDirectoryPackage(buffer.data(),
                                                                        0,
                                                                        numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineMessageDirectoryPacketTestFullConstruction)
+TEST_CASE("TimelineMessageDirectoryPacketTestFullConstruction")
 {
     std::vector<unsigned char> buffer(512, 0);
     unsigned int numberOfBytesWritten = 789u;
     TimelinePacketStatus result = WriteTimelineMessageDirectoryPackage(buffer.data(),
                                                                        armnn::numeric_cast<unsigned int>(buffer.size()),
                                                                        numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Ok);
+    CHECK(result == TimelinePacketStatus::Ok);
 
-    BOOST_CHECK(numberOfBytesWritten == 451);
+    CHECK(numberOfBytesWritten == 451);
 
     unsigned int uint8_t_size  = sizeof(uint8_t);
     unsigned int uint32_t_size = sizeof(uint32_t);
@@ -478,38 +478,38 @@
     uint32_t packetClass  = (packetHeaderWord0 >> 19) & 0x0000007F;
     uint32_t packetType   = (packetHeaderWord0 >> 16) & 0x00000007;
     uint32_t streamId     = (packetHeaderWord0 >>  0) & 0x00000007;
-    BOOST_CHECK(packetFamily == 1);
-    BOOST_CHECK(packetClass  == 0);
-    BOOST_CHECK(packetType   == 0);
-    BOOST_CHECK(streamId     == 0);
+    CHECK(packetFamily == 1);
+    CHECK(packetClass  == 0);
+    CHECK(packetType   == 0);
+    CHECK(streamId     == 0);
 
     offset += uint32_t_size;
     uint32_t packetHeaderWord1 = ReadUint32(buffer.data(), offset);
     uint32_t sequenceNumbered = (packetHeaderWord1 >> 24) & 0x00000001;
     uint32_t dataLength       = (packetHeaderWord1 >>  0) & 0x00FFFFFF;
-    BOOST_CHECK(sequenceNumbered ==  0);
-    BOOST_CHECK(dataLength       == 443);
+    CHECK(sequenceNumbered ==  0);
+    CHECK(dataLength       == 443);
 
     // Check the stream header
     offset += uint32_t_size;
     uint8_t readStreamVersion = ReadUint8(buffer.data(), offset);
-    BOOST_CHECK(readStreamVersion == 4);
+    CHECK(readStreamVersion == 4);
     offset += uint8_t_size;
     uint8_t readPointerBytes = ReadUint8(buffer.data(), offset);
-    BOOST_CHECK(readPointerBytes == uint64_t_size);
+    CHECK(readPointerBytes == uint64_t_size);
     offset += uint8_t_size;
     uint8_t readThreadIdBytes = ReadUint8(buffer.data(), offset);
-    BOOST_CHECK(readThreadIdBytes == ThreadIdSize);
+    CHECK(readThreadIdBytes == ThreadIdSize);
 
     // Check the number of declarations
     offset += uint8_t_size;
     uint32_t declCount = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(declCount == 5);
+    CHECK(declCount == 5);
 
     // Check the decl_id
     offset += uint32_t_size;
     uint32_t readDeclId = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readDeclId == 0);
+    CHECK(readDeclId == 0);
 
     // SWTrace "namestring" format
     // length of the string (first 4 bytes) + string + null terminator
@@ -517,11 +517,11 @@
     // Check the decl_name
     offset += uint32_t_size;
     uint32_t swTraceDeclNameLength = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(swTraceDeclNameLength == 13); // decl_name length including the null-terminator
+    CHECK(swTraceDeclNameLength == 13); // decl_name length including the null-terminator
 
     std::string label = "declareLabel";
     offset += uint32_t_size;
-    BOOST_CHECK(std::memcmp(buffer.data() + offset,           // Offset to the label in the buffer
+    CHECK(std::memcmp(buffer.data() + offset,           // Offset to the label in the buffer
                             label.data(),                     // The original label
                             swTraceDeclNameLength - 1) == 0); // The length of the label
 
@@ -530,11 +530,11 @@
     arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>(label, swTraceString);
     offset += (armnn::numeric_cast<unsigned int>(swTraceString.size()) - 1) * uint32_t_size;
     uint32_t swTraceUINameLength = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(swTraceUINameLength == 14); // ui_name length including the null-terminator
+    CHECK(swTraceUINameLength == 14); // ui_name length including the null-terminator
 
     label = "declare label";
     offset += uint32_t_size;
-    BOOST_CHECK(std::memcmp(buffer.data() + offset,           // Offset to the label in the buffer
+    CHECK(std::memcmp(buffer.data() + offset,           // Offset to the label in the buffer
                             label.data(),                     // The original label
                             swTraceUINameLength - 1) == 0);   // The length of the label
 
@@ -542,11 +542,11 @@
     arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>(label, swTraceString);
     offset += (armnn::numeric_cast<unsigned int>(swTraceString.size()) - 1) * uint32_t_size;
     uint32_t swTraceArgTypesLength = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(swTraceArgTypesLength == 3); // arg_types length including the null-terminator
+    CHECK(swTraceArgTypesLength == 3); // arg_types length including the null-terminator
 
     label = "ps";
     offset += uint32_t_size;
-    BOOST_CHECK(std::memcmp(buffer.data() + offset,           // Offset to the label in the buffer
+    CHECK(std::memcmp(buffer.data() + offset,           // Offset to the label in the buffer
                             label.data(),                     // The original label
                             swTraceArgTypesLength - 1) == 0); // The length of the label
 
@@ -554,11 +554,11 @@
     arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>(label, swTraceString);
     offset += (armnn::numeric_cast<unsigned int>(swTraceString.size()) - 1) * uint32_t_size;
     uint32_t swTraceArgNamesLength = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(swTraceArgNamesLength == 11); // arg_names length including the null-terminator
+    CHECK(swTraceArgNamesLength == 11); // arg_names length including the null-terminator
 
     label = "guid,value";
     offset += uint32_t_size;
-    BOOST_CHECK(std::memcmp(buffer.data() + offset,        // Offset to the label in the buffer
+    CHECK(std::memcmp(buffer.data() + offset,        // Offset to the label in the buffer
                             label.data(),                     // The original label
                             swTraceArgNamesLength - 1) == 0); // The length of the label
 
@@ -566,21 +566,21 @@
     arm::pipe::StringToSwTraceString<arm::pipe::SwTraceCharPolicy>(label, swTraceString);
     offset += (armnn::numeric_cast<unsigned int>(swTraceString.size()) - 1) * uint32_t_size;
     readDeclId = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readDeclId == 1);
+    CHECK(readDeclId == 1);
 
     // Check second decl_name
     offset += uint32_t_size;
     swTraceDeclNameLength = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(swTraceDeclNameLength == 14); // decl_name length including the null-terminator
+    CHECK(swTraceDeclNameLength == 14); // decl_name length including the null-terminator
 
     label = "declareEntity";
     offset += uint32_t_size;
-    BOOST_CHECK(std::memcmp(buffer.data() + offset,           // Offset to the label in the buffer
+    CHECK(std::memcmp(buffer.data() + offset,           // Offset to the label in the buffer
                             label.data(),                     // The original label
                             swTraceDeclNameLength - 1) == 0); // The length of the label
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEntityPacketTestNoBuffer)
+TEST_CASE("TimelineEntityPacketTestNoBuffer")
 {
     const uint64_t profilingGuid = 123456u;
     unsigned int numberOfBytesWritten = 789u;
@@ -588,11 +588,11 @@
                                                             nullptr,
                                                             512u,
                                                             numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEntityPacketTestBufferExhaustedWithZeroBufferSize)
+TEST_CASE("TimelineEntityPacketTestBufferExhaustedWithZeroBufferSize")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -602,11 +602,11 @@
                                                             buffer.data(),
                                                             0,
                                                             numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEntityPacketTestBufferExhaustedWithFixedBufferSize)
+TEST_CASE("TimelineEntityPacketTestBufferExhaustedWithFixedBufferSize")
 {
     std::vector<unsigned char> buffer(10, 0);
 
@@ -616,11 +616,11 @@
                                                             buffer.data(),
                                                             armnn::numeric_cast<unsigned int>(buffer.size()),
                                                             numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEntityPacketTestFullConstructionOfData)
+TEST_CASE("TimelineEntityPacketTestFullConstructionOfData")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -630,23 +630,23 @@
                                                             buffer.data(),
                                                             armnn::numeric_cast<unsigned int>(buffer.size()),
                                                             numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Ok);
-    BOOST_CHECK(numberOfBytesWritten == 12);
+    CHECK(result == TimelinePacketStatus::Ok);
+    CHECK(numberOfBytesWritten == 12);
 
     unsigned int uint32_t_size = sizeof(uint32_t);
 
     unsigned int offset = 0;
     // Check decl_Id
     uint32_t decl_Id = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(decl_Id == uint32_t(1));
+    CHECK(decl_Id == uint32_t(1));
 
     // Check the profiling GUID
     offset += uint32_t_size;
     uint64_t readProfilingGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readProfilingGuid == profilingGuid);
+    CHECK(readProfilingGuid == profilingGuid);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEventClassTestNoBuffer)
+TEST_CASE("TimelineEventClassTestNoBuffer")
 {
     const uint64_t profilingGuid = 123456u;
     const uint64_t profilingNameGuid = 3345u;
@@ -656,11 +656,11 @@
                                                                 nullptr,
                                                                 512u,
                                                                 numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEventClassTestBufferExhaustionZeroValue)
+TEST_CASE("TimelineEventClassTestBufferExhaustionZeroValue")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -672,11 +672,11 @@
                                                                 buffer.data(),
                                                                 0,
                                                                 numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEventClassTestBufferExhaustionFixedValue)
+TEST_CASE("TimelineEventClassTestBufferExhaustionFixedValue")
 {
     std::vector<unsigned char> buffer(10, 0);
 
@@ -688,11 +688,11 @@
                                                                 buffer.data(),
                                                                 armnn::numeric_cast<unsigned int>(buffer.size()),
                                                                 numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEventClassTestFullConstructionOfData)
+TEST_CASE("TimelineEventClassTestFullConstructionOfData")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -704,8 +704,8 @@
                                                                 buffer.data(),
                                                                 armnn::numeric_cast<unsigned int>(buffer.size()),
                                                                 numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Ok);
-    BOOST_CHECK(numberOfBytesWritten == 20);
+    CHECK(result == TimelinePacketStatus::Ok);
+    CHECK(numberOfBytesWritten == 20);
 
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int uint64_t_size = sizeof(uint64_t);
@@ -713,19 +713,19 @@
     unsigned int offset = 0;
     // Check the decl_id
     uint32_t declId = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(declId == uint32_t(2));
+    CHECK(declId == uint32_t(2));
 
     // Check the profiling GUID
     offset += uint32_t_size;
     uint64_t readProfilingGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readProfilingGuid == profilingGuid);
+    CHECK(readProfilingGuid == profilingGuid);
 
     offset += uint64_t_size;
     uint64_t readProfilingNameGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readProfilingNameGuid == profilingNameGuid);
+    CHECK(readProfilingNameGuid == profilingNameGuid);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEventPacketTestNoBuffer)
+TEST_CASE("TimelineEventPacketTestNoBuffer")
 {
     const uint64_t timestamp = 456789u;
     const int threadId = armnnUtils::Threads::GetCurrentThreadId();
@@ -737,11 +737,11 @@
                                                            nullptr,
                                                            512u,
                                                            numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEventPacketTestBufferExhaustionZeroValue)
+TEST_CASE("TimelineEventPacketTestBufferExhaustionZeroValue")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -755,11 +755,11 @@
                                                            buffer.data(),
                                                            0,
                                                            numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEventPacketTestBufferExhaustionFixedValue)
+TEST_CASE("TimelineEventPacketTestBufferExhaustionFixedValue")
 {
     std::vector<unsigned char> buffer(10, 0);
 
@@ -773,11 +773,11 @@
                                                            buffer.data(),
                                                            armnn::numeric_cast<unsigned int>(buffer.size()),
                                                            numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::BufferExhaustion);
-    BOOST_CHECK(numberOfBytesWritten == 0);
+    CHECK(result == TimelinePacketStatus::BufferExhaustion);
+    CHECK(numberOfBytesWritten == 0);
 }
 
-BOOST_AUTO_TEST_CASE(TimelineEventPacketTestFullConstructionOfData)
+TEST_CASE("TimelineEventPacketTestFullConstructionOfData")
 {
     std::vector<unsigned char> buffer(512, 0);
 
@@ -791,32 +791,32 @@
                                                            buffer.data(),
                                                            armnn::numeric_cast<unsigned int>(buffer.size()),
                                                            numberOfBytesWritten);
-    BOOST_CHECK(result == TimelinePacketStatus::Ok);
+    CHECK(result == TimelinePacketStatus::Ok);
 
     unsigned int uint32_t_size = sizeof(uint32_t);
     unsigned int uint64_t_size = sizeof(uint64_t);
-    BOOST_CHECK(numberOfBytesWritten == 20 + ThreadIdSize);
+    CHECK(numberOfBytesWritten == 20 + ThreadIdSize);
 
     unsigned int offset = 0;
     // Check the decl_id
     uint32_t readDeclId = ReadUint32(buffer.data(), offset);
-    BOOST_CHECK(readDeclId == 4);
+    CHECK(readDeclId == 4);
 
     // Check the timestamp
     offset += uint32_t_size;
     uint64_t readTimestamp = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readTimestamp == timestamp);
+    CHECK(readTimestamp == timestamp);
 
     // Check the thread id
     offset += uint64_t_size;
     std::vector<uint8_t> readThreadId(ThreadIdSize, 0);
     ReadBytes(buffer.data(), offset, ThreadIdSize, readThreadId.data());
-    BOOST_CHECK(readThreadId == threadId);
+    CHECK(readThreadId == threadId);
 
     // Check the profiling GUID
     offset += ThreadIdSize;
     uint64_t readProfilingGuid = ReadUint64(buffer.data(), offset);
-    BOOST_CHECK(readProfilingGuid == profilingGuid);
+    CHECK(readProfilingGuid == profilingGuid);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/profiling/test/TimelineUtilityMethodsTests.cpp b/src/profiling/test/TimelineUtilityMethodsTests.cpp
index cbe3b79..1e733df 100644
--- a/src/profiling/test/TimelineUtilityMethodsTests.cpp
+++ b/src/profiling/test/TimelineUtilityMethodsTests.cpp
@@ -13,14 +13,14 @@
 
 #include <memory>
 
-#include <boost/test/unit_test.hpp>
+#include <doctest/doctest.h>
 
 using namespace armnn;
 using namespace armnn::profiling;
 
-BOOST_AUTO_TEST_SUITE(TimelineUtilityMethodsTests)
-
-BOOST_AUTO_TEST_CASE(CreateTypedLabelTest)
+TEST_SUITE("TimelineUtilityMethodsTests")
+{
+TEST_CASE("CreateTypedLabelTest")
 {
     MockBufferManager mockBufferManager(1024);
     ProfilingService  profilingService;
@@ -35,18 +35,18 @@
     const std::string entityName = "some entity";
     ProfilingStaticGuid labelTypeGuid(456);
 
-    BOOST_CHECK_NO_THROW(timelineUtilityMethods.MarkEntityWithLabel(entityGuid, entityName, labelTypeGuid));
+    CHECK_NOTHROW(timelineUtilityMethods.MarkEntityWithLabel(entityGuid, entityName, labelTypeGuid));
 
     // Commit all packets at once
     timelineUtilityMethods.Commit();
 
     // Get the readable buffer
     auto readableBuffer = mockBufferManager.GetReadableBuffer();
-    BOOST_CHECK(readableBuffer != nullptr);
+    CHECK(readableBuffer != nullptr);
     unsigned int size = readableBuffer->GetSize();
-    BOOST_CHECK(size == 76);
+    CHECK(size == 76);
     const unsigned char* readableData = readableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     // Utils
     unsigned int offset = 0;
@@ -70,21 +70,21 @@
     mockBufferManager.MarkRead(readableBuffer);
 }
 
-BOOST_AUTO_TEST_CASE(SendWellKnownLabelsAndEventClassesTest)
+TEST_CASE("SendWellKnownLabelsAndEventClassesTest")
 {
     MockBufferManager mockBufferManager(1024);
     ProfilingService  profilingService;
     SendTimelinePacket sendTimelinePacket(mockBufferManager);
 
-    BOOST_CHECK_NO_THROW(TimelineUtilityMethods::SendWellKnownLabelsAndEventClasses(sendTimelinePacket));
+    CHECK_NOTHROW(TimelineUtilityMethods::SendWellKnownLabelsAndEventClasses(sendTimelinePacket));
 
     // Get the readable buffer
     auto readableBuffer = mockBufferManager.GetReadableBuffer();
-    BOOST_CHECK(readableBuffer != nullptr);
+    CHECK(readableBuffer != nullptr);
     unsigned int size = readableBuffer->GetSize();
-    BOOST_TEST(size == 460);
+    CHECK(size == 460);
     const unsigned char* readableData = readableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     // Utils
     unsigned int offset = 0;
@@ -197,7 +197,7 @@
     mockBufferManager.MarkRead(readableBuffer);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNamedTypedChildEntityTest)
+TEST_CASE("CreateNamedTypedChildEntityTest")
 {
     MockBufferManager mockBufferManager(1024);
     ProfilingService  profilingService;
@@ -212,30 +212,30 @@
     // Generate first guid to ensure that the named typed entity guid is not 0 on local single test.
     profilingService.NextGuid();
 
-    BOOST_CHECK_THROW(timelineUtilityMethods.CreateNamedTypedChildEntity(parentEntityGuid, "", entityType),
+    CHECK_THROWS_AS(timelineUtilityMethods.CreateNamedTypedChildEntity(parentEntityGuid, "", entityType),
                       InvalidArgumentException);
-    BOOST_CHECK_THROW(timelineUtilityMethods.CreateNamedTypedChildEntity(parentEntityGuid, entityName, ""),
+    CHECK_THROWS_AS(timelineUtilityMethods.CreateNamedTypedChildEntity(parentEntityGuid, entityName, ""),
                       InvalidArgumentException);
-    BOOST_CHECK_THROW(timelineUtilityMethods.CreateNamedTypedChildEntity(
+    CHECK_THROWS_AS(timelineUtilityMethods.CreateNamedTypedChildEntity(
         childEntityGuid, parentEntityGuid, "", entityType), InvalidArgumentException);
-    BOOST_CHECK_THROW(timelineUtilityMethods.CreateNamedTypedChildEntity(
+    CHECK_THROWS_AS(timelineUtilityMethods.CreateNamedTypedChildEntity(
         childEntityGuid, parentEntityGuid, entityName, ""), InvalidArgumentException);
 
-    BOOST_CHECK_NO_THROW(childEntityGuid = timelineUtilityMethods.CreateNamedTypedChildEntity(parentEntityGuid,
+    CHECK_NOTHROW(childEntityGuid = timelineUtilityMethods.CreateNamedTypedChildEntity(parentEntityGuid,
                                                                                               entityName,
                                                                                               entityType));
-    BOOST_CHECK(childEntityGuid != ProfilingGuid(0));
+    CHECK(childEntityGuid != ProfilingGuid(0));
 
     // Commit all packets at once
     timelineUtilityMethods.Commit();
 
     // Get the readable buffer
     auto readableBuffer = mockBufferManager.GetReadableBuffer();
-    BOOST_CHECK(readableBuffer != nullptr);
+    CHECK(readableBuffer != nullptr);
     unsigned int size = readableBuffer->GetSize();
-    BOOST_CHECK(size == 196);
+    CHECK(size == 196);
     const unsigned char* readableData = readableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     // Utils
     unsigned int offset = 0;
@@ -284,7 +284,7 @@
     mockBufferManager.MarkRead(readableBuffer);
 }
 
-BOOST_AUTO_TEST_CASE(DeclareLabelTest)
+TEST_CASE("DeclareLabelTest")
 {
     MockBufferManager mockBufferManager(1024);
     ProfilingService  profilingService;
@@ -295,25 +295,25 @@
     profilingService.NextGuid();
 
     // Try declaring an invalid (empty) label
-    BOOST_CHECK_THROW(timelineUtilityMethods.DeclareLabel(""), InvalidArgumentException);
+    CHECK_THROWS_AS(timelineUtilityMethods.DeclareLabel(""), InvalidArgumentException);
 
     // Try declaring an invalid (wrong SWTrace format) label
-    BOOST_CHECK_THROW(timelineUtilityMethods.DeclareLabel("inv@lid lab€l"), RuntimeException);
+    CHECK_THROWS_AS(timelineUtilityMethods.DeclareLabel("inv@lid lab€l"), RuntimeException);
 
     // Declare a valid label
     const std::string labelName = "valid label";
     ProfilingGuid labelGuid = 0;
-    BOOST_CHECK_NO_THROW(labelGuid = timelineUtilityMethods.DeclareLabel(labelName));
-    BOOST_CHECK(labelGuid != ProfilingGuid(0));
+    CHECK_NOTHROW(labelGuid = timelineUtilityMethods.DeclareLabel(labelName));
+    CHECK(labelGuid != ProfilingGuid(0));
 
     // Try adding the same label as before
     ProfilingGuid newLabelGuid = 0;
-    BOOST_CHECK_NO_THROW(newLabelGuid = timelineUtilityMethods.DeclareLabel(labelName));
-    BOOST_CHECK(newLabelGuid != ProfilingGuid(0));
-    BOOST_CHECK(newLabelGuid == labelGuid);
+    CHECK_NOTHROW(newLabelGuid = timelineUtilityMethods.DeclareLabel(labelName));
+    CHECK(newLabelGuid != ProfilingGuid(0));
+    CHECK(newLabelGuid == labelGuid);
 }
 
-BOOST_AUTO_TEST_CASE(CreateNameTypeEntityInvalidTest)
+TEST_CASE("CreateNameTypeEntityInvalidTest")
 {
     MockBufferManager mockBufferManager(1024);
     ProfilingService  profilingService;
@@ -321,24 +321,24 @@
     TimelineUtilityMethods timelineUtilityMethods(sendTimelinePacket);
 
     // Invalid name
-    BOOST_CHECK_THROW(timelineUtilityMethods.CreateNamedTypedEntity("", "Type"), InvalidArgumentException);
+    CHECK_THROWS_AS(timelineUtilityMethods.CreateNamedTypedEntity("", "Type"), InvalidArgumentException);
 
     // Invalid type
-    BOOST_CHECK_THROW(timelineUtilityMethods.CreateNamedTypedEntity("Name", ""), InvalidArgumentException);
+    CHECK_THROWS_AS(timelineUtilityMethods.CreateNamedTypedEntity("Name", ""), InvalidArgumentException);
 
     ProfilingDynamicGuid guid = profilingService.NextGuid();
 
     // CreatedNamedTypedEntity with Guid - Invalid name
-    BOOST_CHECK_THROW(timelineUtilityMethods.CreateNamedTypedEntity(guid, "", "Type"),
+    CHECK_THROWS_AS(timelineUtilityMethods.CreateNamedTypedEntity(guid, "", "Type"),
                       InvalidArgumentException);
 
     // CreatedNamedTypedEntity with Guid - Invalid type
-    BOOST_CHECK_THROW(timelineUtilityMethods.CreateNamedTypedEntity(guid, "Name", ""),
+    CHECK_THROWS_AS(timelineUtilityMethods.CreateNamedTypedEntity(guid, "Name", ""),
                       InvalidArgumentException);
 
 }
 
-BOOST_AUTO_TEST_CASE(CreateNameTypeEntityTest)
+TEST_CASE("CreateNameTypeEntityTest")
 {
     MockBufferManager mockBufferManager(1024);
     ProfilingService  profilingService;
@@ -352,18 +352,18 @@
     profilingService.NextGuid();
 
     ProfilingDynamicGuid guid = timelineUtilityMethods.CreateNamedTypedEntity(entityName, entityType);
-    BOOST_CHECK(guid != ProfilingGuid(0));
+    CHECK(guid != ProfilingGuid(0));
 
     // Commit all packets at once
     timelineUtilityMethods.Commit();
 
     // Get the readable buffer
     auto readableBuffer = mockBufferManager.GetReadableBuffer();
-    BOOST_CHECK(readableBuffer != nullptr);
+    CHECK(readableBuffer != nullptr);
     unsigned int size = readableBuffer->GetSize();
-    BOOST_CHECK(size == 148);
+    CHECK(size == 148);
     const unsigned char* readableData = readableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     // Utils
     unsigned int offset = 0;
@@ -405,7 +405,7 @@
     mockBufferManager.MarkRead(readableBuffer);
 }
 
-BOOST_AUTO_TEST_CASE(RecordEventTest)
+TEST_CASE("RecordEventTest")
 {
     MockBufferManager mockBufferManager(1024);
     ProfilingService  profilingService;
@@ -417,21 +417,21 @@
     ProfilingGuid entityGuid(123);
     ProfilingStaticGuid eventClassGuid(456);
     ProfilingDynamicGuid eventGuid(0);
-    BOOST_CHECK_NO_THROW(eventGuid = timelineUtilityMethods.RecordEvent(entityGuid, eventClassGuid));
-    BOOST_CHECK(eventGuid != ProfilingGuid(0));
+    CHECK_NOTHROW(eventGuid = timelineUtilityMethods.RecordEvent(entityGuid, eventClassGuid));
+    CHECK(eventGuid != ProfilingGuid(0));
 
     // Commit all packets at once
     timelineUtilityMethods.Commit();
 
     // Get the readable buffer
     auto readableBuffer = mockBufferManager.GetReadableBuffer();
-    BOOST_CHECK(readableBuffer != nullptr);
+    CHECK(readableBuffer != nullptr);
     unsigned int size = readableBuffer->GetSize();
 
-    BOOST_CHECK(size == 68 + ThreadIdSize);
+    CHECK(size == 68 + ThreadIdSize);
 
     const unsigned char* readableData = readableBuffer->GetReadableData();
-    BOOST_CHECK(readableData != nullptr);
+    CHECK(readableData != nullptr);
 
     // Utils
     unsigned int offset = 0;
@@ -455,4 +455,4 @@
     mockBufferManager.MarkRead(readableBuffer);
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}
diff --git a/src/timelineDecoder/tests/JSONTimelineDecoderTests.cpp b/src/timelineDecoder/tests/JSONTimelineDecoderTests.cpp
index dfe5fd0..65dbdcd 100644
--- a/src/timelineDecoder/tests/JSONTimelineDecoderTests.cpp
+++ b/src/timelineDecoder/tests/JSONTimelineDecoderTests.cpp
@@ -8,13 +8,12 @@
 #include <server/include/timelineDecoder/TimelineDecoder.hpp>
 #include <Filesystem.hpp>
 
-#include <boost/test/test_tools.hpp>
-#include <boost/test/unit_test_suite.hpp>
+#include <doctest/doctest.h>
 
 #include <fstream>
 
-BOOST_AUTO_TEST_SUITE(JSONTimelineDecoderTests)
-
+TEST_SUITE("JSONTimelineDecoderTests")
+{
 using namespace armnn;
 using namespace timelinedecoder;
 
@@ -683,103 +682,103 @@
     timelineDecoder.CreateRelationship(relationship90);
 }
 
-BOOST_AUTO_TEST_CASE(JSONTimelineDecoderTestStructure)
+TEST_CASE("JSONTimelineDecoderTestStructure")
 {
     JSONTimelineDecoder timelineDecoder;
     RunSimpleModelThroughDecoder(timelineDecoder);
 
     JSONTimelineDecoder::Model model = timelineDecoder.GetModel();
-    BOOST_CHECK(model.jsonEntities.size() == 20);
+    CHECK(model.jsonEntities.size() == 20);
     JSONTimelineDecoder::JSONEntity rootEntity = model.jsonEntities.at(6);
-    BOOST_CHECK(rootEntity.childEntities.size() == 4);
+    CHECK(rootEntity.childEntities.size() == 4);
 
     // Testing input layer model
     JSONTimelineDecoder::JSONEntity entity0 = model.jsonEntities.at(rootEntity.childEntities[0]);
-    BOOST_CHECK(entity0.GetName() == "input");
-    BOOST_CHECK(entity0.GetType() == "layer");
+    CHECK(entity0.GetName() == "input");
+    CHECK(entity0.GetType() == "layer");
 
-    BOOST_CHECK(entity0.childEntities.size() == 1);
+    CHECK(entity0.childEntities.size() == 1);
     JSONTimelineDecoder::JSONEntity input_workload_entity = model.jsonEntities.at(entity0.childEntities[0]);
-    BOOST_CHECK(input_workload_entity.childEntities.size() == 1);
-    BOOST_CHECK(input_workload_entity.GetType() == "workload");
-    BOOST_CHECK(input_workload_entity.extendedData.at("backendId") == "CpuRef");
+    CHECK(input_workload_entity.childEntities.size() == 1);
+    CHECK(input_workload_entity.GetType() == "workload");
+    CHECK(input_workload_entity.extendedData.at("backendId") == "CpuRef");
 
     JSONTimelineDecoder::JSONEntity input_workload_execution_entity = model.jsonEntities
             .at(input_workload_entity.childEntities[0]);
-    BOOST_CHECK(input_workload_execution_entity.childEntities.size() == 2);
-    BOOST_CHECK(input_workload_execution_entity.GetType() == "workload_execution");
+    CHECK(input_workload_execution_entity.childEntities.size() == 2);
+    CHECK(input_workload_execution_entity.GetType() == "workload_execution");
 
     JSONTimelineDecoder::JSONEntity input_workload_execution_event0 = model.jsonEntities
             .at(input_workload_execution_entity.childEntities[0]);
-    BOOST_CHECK(input_workload_execution_event0.GetType() == "Event");
-    BOOST_CHECK(input_workload_execution_event0.childEntities.size() == 0);
-    BOOST_CHECK(model.events.at(input_workload_execution_event0.GetGuid()).m_ThreadId > uint64_t(0));
-    BOOST_CHECK(model.events.at(input_workload_execution_event0.GetGuid()).m_TimeStamp > uint64_t(0));
+    CHECK(input_workload_execution_event0.GetType() == "Event");
+    CHECK(input_workload_execution_event0.childEntities.size() == 0);
+    CHECK(model.events.at(input_workload_execution_event0.GetGuid()).m_ThreadId > uint64_t(0));
+    CHECK(model.events.at(input_workload_execution_event0.GetGuid()).m_TimeStamp > uint64_t(0));
 
     JSONTimelineDecoder::JSONEntity input_workload_execution_event1 = model.jsonEntities
             .at(input_workload_execution_entity.childEntities[1]);
-    BOOST_CHECK(input_workload_execution_event0.GetType() == "Event");
-    BOOST_CHECK(input_workload_execution_event1.childEntities.size() == 0);
-    BOOST_CHECK(model.events.at(input_workload_execution_event1.GetGuid()).m_ThreadId > uint64_t(0));
-    BOOST_CHECK(model.events.at(input_workload_execution_event1.GetGuid()).m_TimeStamp > uint64_t(0));
+    CHECK(input_workload_execution_event0.GetType() == "Event");
+    CHECK(input_workload_execution_event1.childEntities.size() == 0);
+    CHECK(model.events.at(input_workload_execution_event1.GetGuid()).m_ThreadId > uint64_t(0));
+    CHECK(model.events.at(input_workload_execution_event1.GetGuid()).m_TimeStamp > uint64_t(0));
 
     // Testing normalization layer model
     JSONTimelineDecoder::JSONEntity entity1 = model.jsonEntities.at(rootEntity.childEntities[1]);
-    BOOST_CHECK(entity1.GetName() == "normalization");
-    BOOST_CHECK(entity1.GetType() == "layer");
+    CHECK(entity1.GetName() == "normalization");
+    CHECK(entity1.GetType() == "layer");
 
     JSONTimelineDecoder::JSONEntity normalization_workload_entity = model.jsonEntities
             .at(entity1.childEntities[0]);
-    BOOST_CHECK(normalization_workload_entity.GetType() == "workload");
-    BOOST_CHECK(normalization_workload_entity.extendedData.at("backendId") == "CpuRef");
+    CHECK(normalization_workload_entity.GetType() == "workload");
+    CHECK(normalization_workload_entity.extendedData.at("backendId") == "CpuRef");
 
     JSONTimelineDecoder::JSONEntity normalization_workload_execution_entity = model.jsonEntities
             .at(normalization_workload_entity.childEntities[0]);
-    BOOST_CHECK(normalization_workload_execution_entity.GetType() == "workload_execution");
+    CHECK(normalization_workload_execution_entity.GetType() == "workload_execution");
 
     JSONTimelineDecoder::JSONEntity normalization_workload_execution_event0 = model.jsonEntities
             .at(normalization_workload_execution_entity.childEntities[0]);
-    BOOST_CHECK(normalization_workload_execution_event0.GetType() == "Event");
-    BOOST_CHECK(model.events.at(normalization_workload_execution_event0.GetGuid()).m_ThreadId > uint64_t(0));
-    BOOST_CHECK(model.events.at(normalization_workload_execution_event0.GetGuid()).m_TimeStamp > uint64_t(0));
+    CHECK(normalization_workload_execution_event0.GetType() == "Event");
+    CHECK(model.events.at(normalization_workload_execution_event0.GetGuid()).m_ThreadId > uint64_t(0));
+    CHECK(model.events.at(normalization_workload_execution_event0.GetGuid()).m_TimeStamp > uint64_t(0));
 
     JSONTimelineDecoder::JSONEntity normalization_workload_execution_event1 = model.jsonEntities
             .at(normalization_workload_execution_entity.childEntities[1]);
-    BOOST_CHECK(normalization_workload_execution_event1.GetType() == "Event");
-    BOOST_CHECK(model.events.at(normalization_workload_execution_event1.GetGuid()).m_ThreadId > uint64_t(0));
-    BOOST_CHECK(model.events.at(normalization_workload_execution_event1.GetGuid()).m_TimeStamp > uint64_t(0));
+    CHECK(normalization_workload_execution_event1.GetType() == "Event");
+    CHECK(model.events.at(normalization_workload_execution_event1.GetGuid()).m_ThreadId > uint64_t(0));
+    CHECK(model.events.at(normalization_workload_execution_event1.GetGuid()).m_TimeStamp > uint64_t(0));
 
     // Testing output layer model
     JSONTimelineDecoder::JSONEntity entity2 = model.jsonEntities.at(rootEntity.childEntities[2]);
-    BOOST_CHECK(entity2.GetName() == "output");
-    BOOST_CHECK(entity2.GetType() == "layer");
+    CHECK(entity2.GetName() == "output");
+    CHECK(entity2.GetType() == "layer");
 
     JSONTimelineDecoder::JSONEntity output_workload_entity = model.jsonEntities.at(entity2.childEntities[0]);
-    BOOST_CHECK(output_workload_entity.GetType() == "workload");
-    BOOST_CHECK(output_workload_entity.extendedData.at("backendId") == "CpuRef");
+    CHECK(output_workload_entity.GetType() == "workload");
+    CHECK(output_workload_entity.extendedData.at("backendId") == "CpuRef");
 
     JSONTimelineDecoder::JSONEntity output_workload_execution_entity = model.jsonEntities
             .at(output_workload_entity.childEntities[0]);
-    BOOST_CHECK(output_workload_execution_entity.GetType() == "workload_execution");
+    CHECK(output_workload_execution_entity.GetType() == "workload_execution");
 
     JSONTimelineDecoder::JSONEntity output_workload_execution_event0 = model.jsonEntities
             .at(output_workload_execution_entity.childEntities[0]);
-    BOOST_CHECK(output_workload_execution_event0.GetType() == "Event");
-    BOOST_CHECK(model.events.at(output_workload_execution_event0.GetGuid()).m_ThreadId > uint64_t(0));
-    BOOST_CHECK(model.events.at(output_workload_execution_event0.GetGuid()).m_TimeStamp > uint64_t(0));
+    CHECK(output_workload_execution_event0.GetType() == "Event");
+    CHECK(model.events.at(output_workload_execution_event0.GetGuid()).m_ThreadId > uint64_t(0));
+    CHECK(model.events.at(output_workload_execution_event0.GetGuid()).m_TimeStamp > uint64_t(0));
 
     JSONTimelineDecoder::JSONEntity output_workload_execution_event1 = model.jsonEntities
             .at(output_workload_execution_entity.childEntities[1]);
-    BOOST_CHECK(output_workload_execution_event1.GetType() == "Event");
-    BOOST_CHECK(model.events.at(output_workload_execution_event1.GetGuid()).m_ThreadId > uint64_t(0));
-    BOOST_CHECK(model.events.at(output_workload_execution_event1.GetGuid()).m_TimeStamp > uint64_t(0));
+    CHECK(output_workload_execution_event1.GetType() == "Event");
+    CHECK(model.events.at(output_workload_execution_event1.GetGuid()).m_ThreadId > uint64_t(0));
+    CHECK(model.events.at(output_workload_execution_event1.GetGuid()).m_TimeStamp > uint64_t(0));
 
     JSONTimelineDecoder::JSONEntity entity48 =  model.jsonEntities.at(rootEntity.childEntities[3]);
-    BOOST_CHECK(entity48.GetName() == "");
-    BOOST_CHECK(entity48.GetType() == "inference");
+    CHECK(entity48.GetName() == "");
+    CHECK(entity48.GetType() == "inference");
 }
 
-BOOST_AUTO_TEST_CASE(JSONTimelineDecoderTestJSON)
+TEST_CASE("JSONTimelineDecoderTestJSON")
 {
     JSONTimelineDecoder timelineDecoder;
     RunSimpleModelThroughDecoder(timelineDecoder);
@@ -788,12 +787,12 @@
     JSONTimelineDecoder::JSONEntity rootEntity = model.jsonEntities.at(6);
 
     std::string jsonString = timelineDecoder.GetJSONString(rootEntity);
-    BOOST_CHECK(jsonString != "");
-    BOOST_CHECK(jsonString.find("input_0: {")!=std::string::npos);
-    BOOST_CHECK(jsonString.find("type: Measurement,\n"
+    CHECK(jsonString != "");
+    CHECK(jsonString.find("input_0: {")!=std::string::npos);
+    CHECK(jsonString.find("type: Measurement,\n"
                                    "\t\t\tbackendId :CpuRef,")!=std::string::npos);
-    BOOST_CHECK(jsonString.find("normalization_2: {")!=std::string::npos);
-    BOOST_CHECK(jsonString.find("output_4: {")!=std::string::npos);
+    CHECK(jsonString.find("normalization_2: {")!=std::string::npos);
+    CHECK(jsonString.find("output_4: {")!=std::string::npos);
 
     // Create a temporary file to write Json output to
     fs::path tempFile = armnnUtils::Filesystem::NamedTempFile("JSONTimelineDecoderTestJSON.json");
@@ -814,14 +813,14 @@
     inFile.close();
     std::string outfileJson = strStream.str();
 
-    BOOST_CHECK(outfileJson != "");
-    BOOST_CHECK(outfileJson.find("input_0: {")!=std::string::npos);
-    BOOST_CHECK(outfileJson.find("type: Measurement,\n"
+    CHECK(outfileJson != "");
+    CHECK(outfileJson.find("input_0: {")!=std::string::npos);
+    CHECK(outfileJson.find("type: Measurement,\n"
                                 "\t\t\tbackendId :CpuRef,")!=std::string::npos);
-    BOOST_CHECK(outfileJson.find("normalization_2: {")!=std::string::npos);
-    BOOST_CHECK(outfileJson.find("output_4: {")!=std::string::npos);
+    CHECK(outfileJson.find("normalization_2: {")!=std::string::npos);
+    CHECK(outfileJson.find("output_4: {")!=std::string::npos);
 
     // Remove temporary file
     fs::remove(tempFile);
 }
-BOOST_AUTO_TEST_SUITE_END()
\ No newline at end of file
+}
\ No newline at end of file
diff --git a/tests/profiling/gatordmock/tests/GatordMockTests.cpp b/tests/profiling/gatordmock/tests/GatordMockTests.cpp
index b88ad45..66e468c 100644
--- a/tests/profiling/gatordmock/tests/GatordMockTests.cpp
+++ b/tests/profiling/gatordmock/tests/GatordMockTests.cpp
@@ -11,6 +11,7 @@
 #include <ProfilingService.hpp>
 #include <TimelinePacketWriterFactory.hpp>
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 #include <server/include/timelineDecoder/TimelineDirectoryCaptureCommandHandler.hpp>
@@ -20,16 +21,15 @@
 
 #include <MockBackend.hpp>
 
-#include <boost/test/test_tools.hpp>
-#include <boost/test/unit_test_suite.hpp>
+#include <doctest/doctest.h>
 
-BOOST_AUTO_TEST_SUITE(GatordMockTests)
-
+TEST_SUITE("GatordMockTests")
+{
 using namespace armnn;
 using namespace std::this_thread;
 using namespace std::chrono_literals;
 
-BOOST_AUTO_TEST_CASE(CounterCaptureHandlingTest)
+TEST_CASE("CounterCaptureHandlingTest")
 {
     arm::pipe::PacketVersionResolver packetVersionResolver;
 
@@ -113,7 +113,7 @@
     {
         if (timeSlept >= timeout)
         {
-            BOOST_FAIL("Timeout: " + errorMsg);
+            FAIL("Timeout: " + errorMsg);
         }
         std::this_thread::sleep_for(std::chrono::milliseconds(sleepTime));
         timeSlept += sleepTime;
@@ -143,13 +143,13 @@
     std::unique_ptr<profiling::IPacketBuffer> packetBuffer = bufferManager.GetReadableBuffer();
 
     uint8_t readStreamVersion = ReadUint8(packetBuffer, offset);
-    BOOST_CHECK(readStreamVersion == 4);
+    CHECK(readStreamVersion == 4);
     offset += uint8_t_size;
     uint8_t readPointerBytes = ReadUint8(packetBuffer, offset);
-    BOOST_CHECK(readPointerBytes == uint64_t_size);
+    CHECK(readPointerBytes == uint64_t_size);
     offset += uint8_t_size;
     uint8_t readThreadIdBytes = ReadUint8(packetBuffer, offset);
-    BOOST_CHECK(readThreadIdBytes == threadId_size);
+    CHECK(readThreadIdBytes == threadId_size);
     offset += uint8_t_size;
 
     uint32_t declarationSize = profiling::ReadUint32(packetBuffer, offset);
@@ -166,20 +166,20 @@
         arm::pipe::SwTraceMessage& bufferMessage = swTraceBufferMessages[index];
         arm::pipe::SwTraceMessage& handlerMessage = commandHandler.m_SwTraceMessages[index];
 
-        BOOST_CHECK(bufferMessage.m_Name == handlerMessage.m_Name);
-        BOOST_CHECK(bufferMessage.m_UiName == handlerMessage.m_UiName);
-        BOOST_CHECK(bufferMessage.m_Id == handlerMessage.m_Id);
+        CHECK(bufferMessage.m_Name == handlerMessage.m_Name);
+        CHECK(bufferMessage.m_UiName == handlerMessage.m_UiName);
+        CHECK(bufferMessage.m_Id == handlerMessage.m_Id);
 
-        BOOST_CHECK(bufferMessage.m_ArgTypes.size() == handlerMessage.m_ArgTypes.size());
+        CHECK(bufferMessage.m_ArgTypes.size() == handlerMessage.m_ArgTypes.size());
         for(uint32_t i = 0; i < bufferMessage.m_ArgTypes.size(); ++i)
         {
-            BOOST_CHECK(bufferMessage.m_ArgTypes[i] == handlerMessage.m_ArgTypes[i]);
+            CHECK(bufferMessage.m_ArgTypes[i] == handlerMessage.m_ArgTypes[i]);
         }
 
-        BOOST_CHECK(bufferMessage.m_ArgNames.size() == handlerMessage.m_ArgNames.size());
+        CHECK(bufferMessage.m_ArgNames.size() == handlerMessage.m_ArgNames.size());
         for(uint32_t i = 0; i < bufferMessage.m_ArgNames.size(); ++i)
         {
-            BOOST_CHECK(bufferMessage.m_ArgNames[i] == handlerMessage.m_ArgNames[i]);
+            CHECK(bufferMessage.m_ArgNames[i] == handlerMessage.m_ArgNames[i]);
         }
     }
 }
@@ -187,56 +187,56 @@
 void CheckTimelinePackets(arm::pipe::TimelineDecoder& timelineDecoder)
 {
     unsigned int i = 0; // Use a postfix increment to avoid changing indexes each time the packet gets updated.
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::NAME_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::NAME_LABEL);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::NAME_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::NAME_LABEL);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::TYPE_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::TYPE_LABEL);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::TYPE_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::TYPE_LABEL);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::INDEX_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::INDEX_LABEL);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::INDEX_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::INDEX_LABEL);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::BACKENDID_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::BACKENDID_LABEL);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::BACKENDID_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::BACKENDID_LABEL);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::CHILD_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::CHILD_LABEL);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::CHILD_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::CHILD_LABEL);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::EXECUTION_OF_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name ==
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::EXECUTION_OF_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name ==
                 profiling::LabelsAndEventClasses::EXECUTION_OF_LABEL);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::PROCESS_ID_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name ==
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::PROCESS_ID_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name ==
                 profiling::LabelsAndEventClasses::PROCESS_ID_LABEL);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::LAYER_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::LAYER);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::LAYER_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::LAYER);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::WORKLOAD_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::WORKLOAD);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::WORKLOAD_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::WORKLOAD);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::NETWORK_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::NETWORK);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::NETWORK_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::NETWORK);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::CONNECTION_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::CONNECTION);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::CONNECTION_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::CONNECTION);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::INFERENCE_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::INFERENCE);
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid == profiling::LabelsAndEventClasses::INFERENCE_GUID);
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name == profiling::LabelsAndEventClasses::INFERENCE);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid ==
+    CHECK(timelineDecoder.GetModel().m_Labels[i].m_Guid ==
                 profiling::LabelsAndEventClasses::WORKLOAD_EXECUTION_GUID);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name ==
+    CHECK(timelineDecoder.GetModel().m_Labels[i++].m_Name ==
                 profiling::LabelsAndEventClasses::WORKLOAD_EXECUTION);
 
-    BOOST_CHECK(timelineDecoder.GetModel().m_EventClasses[0].m_Guid ==
+    CHECK(timelineDecoder.GetModel().m_EventClasses[0].m_Guid ==
                 profiling::LabelsAndEventClasses::ARMNN_PROFILING_SOL_EVENT_CLASS);
-    BOOST_CHECK(timelineDecoder.GetModel().m_EventClasses[1].m_Guid ==
+    CHECK(timelineDecoder.GetModel().m_EventClasses[1].m_Guid ==
                 profiling::LabelsAndEventClasses::ARMNN_PROFILING_EOL_EVENT_CLASS);
 }
 
-BOOST_AUTO_TEST_CASE(GatorDMockEndToEnd)
+TEST_CASE("GatorDMockEndToEnd")
 {
     // The purpose of this test is to setup both sides of the profiling service and get to the point of receiving
     // performance data.
@@ -244,7 +244,7 @@
     // Setup the mock service to bind to the UDS.
     std::string udsNamespace = "gatord_namespace";
 
-    BOOST_CHECK_NO_THROW(arm::pipe::ConnectionHandler connectionHandler(udsNamespace, false));
+    CHECK_NOTHROW(arm::pipe::ConnectionHandler connectionHandler(udsNamespace, false));
 
     arm::pipe::ConnectionHandler connectionHandler(udsNamespace, false);
 
@@ -257,9 +257,9 @@
     profilingService.ResetExternalProfilingOptions(options, true);
 
     // Bring the profiling service to the "WaitingForAck" state
-    BOOST_CHECK(profilingService.GetCurrentState() == profiling::ProfilingState::Uninitialised);
+    CHECK(profilingService.GetCurrentState() == profiling::ProfilingState::Uninitialised);
     profilingService.Update();
-    BOOST_CHECK(profilingService.GetCurrentState() == profiling::ProfilingState::NotConnected);
+    CHECK(profilingService.GetCurrentState() == profiling::ProfilingState::NotConnected);
     profilingService.Update();
 
     // Connect the profiling service
@@ -280,7 +280,7 @@
     // Read the stream metadata on the mock side.
     if (!mockService.WaitForStreamMetaData())
     {
-        BOOST_FAIL("Failed to receive StreamMetaData");
+        FAIL("Failed to receive StreamMetaData");
     }
     // Send Ack from GatorD
     mockService.SendConnectionAck();
@@ -306,10 +306,10 @@
     const profiling::ICounterDirectory& receivedCounterDirectory = directoryCaptureCommandHandler.GetCounterDirectory();
 
     // Compare the basics of the counter directory from the service and the one we received over the wire.
-    BOOST_CHECK(serviceCounterDirectory.GetDeviceCount() == receivedCounterDirectory.GetDeviceCount());
-    BOOST_CHECK(serviceCounterDirectory.GetCounterSetCount() == receivedCounterDirectory.GetCounterSetCount());
-    BOOST_CHECK(serviceCounterDirectory.GetCategoryCount() == receivedCounterDirectory.GetCategoryCount());
-    BOOST_CHECK(serviceCounterDirectory.GetCounterCount() == receivedCounterDirectory.GetCounterCount());
+    CHECK(serviceCounterDirectory.GetDeviceCount() == receivedCounterDirectory.GetDeviceCount());
+    CHECK(serviceCounterDirectory.GetCounterSetCount() == receivedCounterDirectory.GetCounterSetCount());
+    CHECK(serviceCounterDirectory.GetCategoryCount() == receivedCounterDirectory.GetCategoryCount());
+    CHECK(serviceCounterDirectory.GetCounterCount() == receivedCounterDirectory.GetCounterCount());
 
     receivedCounterDirectory.GetDeviceCount();
     serviceCounterDirectory.GetDeviceCount();
@@ -319,9 +319,9 @@
     {
         // Find the same device in the received counter directory.
         auto foundDevice = receivedCounterDirectory.GetDevices().find(device.second->m_Uid);
-        BOOST_CHECK(foundDevice != receivedCounterDirectory.GetDevices().end());
-        BOOST_CHECK(device.second->m_Name.compare((*foundDevice).second->m_Name) == 0);
-        BOOST_CHECK(device.second->m_Cores == (*foundDevice).second->m_Cores);
+        CHECK(foundDevice != receivedCounterDirectory.GetDevices().end());
+        CHECK(device.second->m_Name.compare((*foundDevice).second->m_Name) == 0);
+        CHECK(device.second->m_Cores == (*foundDevice).second->m_Cores);
     }
 
     const profiling::CounterSets& serviceCounterSets = serviceCounterDirectory.GetCounterSets();
@@ -329,9 +329,9 @@
     {
         // Find the same counter set in the received counter directory.
         auto foundCounterSet = receivedCounterDirectory.GetCounterSets().find(counterSet.second->m_Uid);
-        BOOST_CHECK(foundCounterSet != receivedCounterDirectory.GetCounterSets().end());
-        BOOST_CHECK(counterSet.second->m_Name.compare((*foundCounterSet).second->m_Name) == 0);
-        BOOST_CHECK(counterSet.second->m_Count == (*foundCounterSet).second->m_Count);
+        CHECK(foundCounterSet != receivedCounterDirectory.GetCounterSets().end());
+        CHECK(counterSet.second->m_Name.compare((*foundCounterSet).second->m_Name) == 0);
+        CHECK(counterSet.second->m_Count == (*foundCounterSet).second->m_Count);
     }
 
     const profiling::Categories& serviceCategories = serviceCounterDirectory.GetCategories();
@@ -356,7 +356,7 @@
                         return false;
                     };
                 // Then let vector == do the work.
-                BOOST_CHECK(std::equal(category->m_Counters.begin(), category->m_Counters.end(),
+                CHECK(std::equal(category->m_Counters.begin(), category->m_Counters.end(),
                                        receivedCategory->m_Counters.begin(), comparator));
                 break;
             }
@@ -371,14 +371,14 @@
         // Note we can't check m_MaxCounterUid here as it will likely differ between the two counter directories.
         uint16_t translated = directoryCaptureCommandHandler.TranslateUIDCopyToOriginal(receivedCounter.first);
         const profiling::Counter* serviceCounter = serviceCounterDirectory.GetCounter(translated);
-        BOOST_CHECK(serviceCounter->m_DeviceUid == receivedCounter.second->m_DeviceUid);
-        BOOST_CHECK(serviceCounter->m_Name.compare(receivedCounter.second->m_Name) == 0);
-        BOOST_CHECK(serviceCounter->m_CounterSetUid == receivedCounter.second->m_CounterSetUid);
-        BOOST_CHECK(serviceCounter->m_Multiplier == receivedCounter.second->m_Multiplier);
-        BOOST_CHECK(serviceCounter->m_Interpolation == receivedCounter.second->m_Interpolation);
-        BOOST_CHECK(serviceCounter->m_Class == receivedCounter.second->m_Class);
-        BOOST_CHECK(serviceCounter->m_Units.compare(receivedCounter.second->m_Units) == 0);
-        BOOST_CHECK(serviceCounter->m_Description.compare(receivedCounter.second->m_Description) == 0);
+        CHECK(serviceCounter->m_DeviceUid == receivedCounter.second->m_DeviceUid);
+        CHECK(serviceCounter->m_Name.compare(receivedCounter.second->m_Name) == 0);
+        CHECK(serviceCounter->m_CounterSetUid == receivedCounter.second->m_CounterSetUid);
+        CHECK(serviceCounter->m_Multiplier == receivedCounter.second->m_Multiplier);
+        CHECK(serviceCounter->m_Interpolation == receivedCounter.second->m_Interpolation);
+        CHECK(serviceCounter->m_Class == receivedCounter.second->m_Class);
+        CHECK(serviceCounter->m_Units.compare(receivedCounter.second->m_Units) == 0);
+        CHECK(serviceCounter->m_Description.compare(receivedCounter.second->m_Description) == 0);
     }
 
     mockService.WaitForReceivingThread();
@@ -388,7 +388,7 @@
     // PeriodicCounterCapture data received. These are yet to be integrated.
 }
 
-BOOST_AUTO_TEST_CASE(GatorDMockTimeLineActivation)
+TEST_CASE("GatorDMockTimeLineActivation")
 {
     // This test requires the CpuRef backend to be enabled
     if(!BackendRegistryInstance().IsBackendRegistered("CpuRef"))
@@ -412,7 +412,7 @@
     // Read the stream metadata on the mock side.
     if (!mockService.WaitForStreamMetaData())
     {
-        BOOST_FAIL("Failed to receive StreamMetaData");
+        FAIL("Failed to receive StreamMetaData");
     }
 
     armnn::MockBackendProfilingService mockProfilingService = armnn::MockBackendProfilingService::Instance();
@@ -454,11 +454,11 @@
             "MockGatord did not receive mock backend test entity");
 
     // Packets we expect from SendWellKnownLabelsAndEventClassesTest
-    BOOST_CHECK(timelineDecoder.GetModel().m_Entities.size() == 1);
-    BOOST_CHECK(timelineDecoder.GetModel().m_EventClasses.size()  == 2);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels.size()  == 15);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Relationships.size()  == 0);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Events.size()  == 0);
+    CHECK(timelineDecoder.GetModel().m_Entities.size() == 1);
+    CHECK(timelineDecoder.GetModel().m_EventClasses.size()  == 2);
+    CHECK(timelineDecoder.GetModel().m_Labels.size()  == 15);
+    CHECK(timelineDecoder.GetModel().m_Relationships.size()  == 0);
+    CHECK(timelineDecoder.GetModel().m_Events.size()  == 0);
 
     mockService.SendDeactivateTimelinePacket();
 
@@ -485,14 +485,14 @@
             "MockGatord did not receive well known timeline labels");
 
     // Packets we expect from SendWellKnownLabelsAndEventClassesTest * 2 + network above (input, norm, backend, output)
-    BOOST_CHECK(timelineDecoder.GetModel().m_Entities.size() == 6);
-    BOOST_CHECK(timelineDecoder.GetModel().m_EventClasses.size()  == 4);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Labels.size()  == 34);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Relationships.size()  == 15);
-    BOOST_CHECK(timelineDecoder.GetModel().m_Events.size()  == 0);
+    CHECK(timelineDecoder.GetModel().m_Entities.size() == 6);
+    CHECK(timelineDecoder.GetModel().m_EventClasses.size()  == 4);
+    CHECK(timelineDecoder.GetModel().m_Labels.size()  == 34);
+    CHECK(timelineDecoder.GetModel().m_Relationships.size()  == 15);
+    CHECK(timelineDecoder.GetModel().m_Events.size()  == 0);
 
     mockService.WaitForReceivingThread();
     GetProfilingService(&runtime).Disconnect();
 }
 
-BOOST_AUTO_TEST_SUITE_END()
+}