IVGCVSW-7854 Remove/rewrite asserts in the backends unit tests.

* Replace calls to ARMNN_ASSERT with DOCTEST CHECK.

Signed-off-by: Colm Donelan <colm.donelan@arm.com>
Change-Id: I8904d169b2099d57a344e319b2f14cf5d8392ae8
diff --git a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
index 9ba9057..84bf34d 100644
--- a/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
+++ b/src/backends/backendsCommon/test/StridedSliceAsyncEndToEndTest.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021,2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -127,7 +127,7 @@
                            float tolerance = 0.000001f,
                            size_t numThreads = 1)
 {
-    ARMNN_ASSERT(numThreads >= 1);
+    CHECK(numThreads >= 1);
     const unsigned int numberOfInferences = numThreads == 1 ? 1 : 1000;
 
     // Create Runtime in which test will run
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index b562a8a..21b3951 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -1350,10 +1350,10 @@
 
     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateWorkload(armnn::LayerType::Activation,
                                                                                 data, info);
-    ARMNN_ASSERT(workload != nullptr);
+    CHECK(workload != nullptr);
     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateWorkload(armnn::LayerType::Activation,
                                                                                       refData, refInfo);
-    ARMNN_ASSERT(workloadRef != nullptr);
+    CHECK(workloadRef != nullptr);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index c5da072..c5366ba 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -43,13 +43,13 @@
     int outQuantOffset)
 {
     IgnoreUnused(memoryManager);
-    ARMNN_ASSERT(shape0.GetNumDimensions() == NumDims);
+    CHECK(shape0.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
 
-    ARMNN_ASSERT(shape1.GetNumDimensions() == NumDims);
+    CHECK(shape1.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo1(shape1, ArmnnInType, quantScale1, quantOffset1);
 
-    ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims);
+    CHECK(outShape.GetNumDimensions() == NumDims);
     armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset);
 
     std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index a7a2364..d3d9c88 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -61,7 +61,7 @@
         }
         else
         {
-            ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
+            CHECK_MESSAGE(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
                 "Input shapes must have the same number of dimensions");
         }
     }
@@ -92,7 +92,7 @@
     unsigned int & concatDim,
     std::pair<PermutationVector, PermutationVector> & permutations)
 {
-    ARMNN_ASSERT_MSG(numDimensions <= 3,
+    CHECK_MESSAGE(numDimensions <= 3,
        "Only dimensions 1,2 and 3 are supported by this helper");
     unsigned int expandedBy = 3 - numDimensions;
     unsigned int expandedConcatAxis = concatDim + expandedBy;
@@ -113,7 +113,7 @@
     }
     else
     {
-        ARMNN_ASSERT(expandedConcatAxis == 0);
+        CHECK(expandedConcatAxis == 0);
         concatDim = 0;
     }
 }
@@ -128,7 +128,7 @@
     std::vector<T>& outputData)
 {
     IgnoreUnused(memoryManager);
-    ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
+    CHECK_MESSAGE(inputData != nullptr, "inputData must not be null");
     if (inputData == nullptr)
     {
         // Nullptr is an error in the test. By returning without doing the concatenation
@@ -182,7 +182,7 @@
     TensorInfo & outputTensorInfo)
 {
     IgnoreUnused(memoryManager);
-    ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
+    CHECK_MESSAGE(inputTensorInfos.size() > 1,
         "Expecting more than one tensor to be concatenated here");
 
     unsigned int numDims = 0;
@@ -203,12 +203,12 @@
 
             // Store the reverese permutation.
             permuteVector = permutations.second;
-            ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
+            CHECK_MESSAGE(!permuteVector.IsEqual(identity),
                 "Test logic error, we don't need permutation, so we shouldn't arrive here");
         }
         else
         {
-            ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
+            CHECK_MESSAGE(numDims == tensorInfo.GetShape().GetNumDimensions(),
                 "All inputs must have the same number of dimensions");
         }
 
@@ -249,7 +249,7 @@
     std::unique_ptr<ITensorHandle> && inputDataHandle,
     T * data)
 {
-    ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
+    CHECK_MESSAGE(data != nullptr, "data must not be null");
     if (data == nullptr)
     {
         // Nullptr is an error in the test. By returning without doing the permutation
@@ -286,7 +286,7 @@
     unsigned int concatDim,
     bool useSubtensor)
 {
-    ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
+    CHECK_MESSAGE(output != nullptr, "output must not be null");
     if (output == nullptr)
     {
         // Nullptr is an error in the test. By returning without doing the permutation
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 69a04df..6fcb4d0 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2022, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -162,9 +162,9 @@
 void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
     const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
 {
-    ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+    CHECK_MESSAGE(((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>())),
                      "Invalid type and parameter combination.");
-    ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+    CHECK_MESSAGE(((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>())),
                      "Invalid type and parameter combination.");
 
     // Note we need to dequantize and re-quantize the image value and the bias.
@@ -176,7 +176,7 @@
             for (uint32_t x = 0; x < w; ++x)
             {
                 uint32_t offset = (i * h + y) * w + x;
-                ARMNN_ASSERT(offset < v.size());
+                CHECK(offset < v.size());
                 T& outRef = v[offset];
                 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
                 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
@@ -233,11 +233,11 @@
     bool biasEnabled = bias.size() > 0;
 
     // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
-    ARMNN_ASSERT(inputNum == 1);
-    ARMNN_ASSERT(outputNum == 1);
+    CHECK(inputNum == 1);
+    CHECK(outputNum == 1);
 
     // If a bias is used, its size must equal the number of output channels.
-    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    CHECK((!biasEnabled || (bias.size() == outputChannels)));
 
     // Note these tensors will use two (identical) batches.
     armnn::TensorInfo inputTensorInfo =
@@ -1719,7 +1719,7 @@
 
     // If a bias is used, its size must equal the number of output channels.
     bool biasEnabled = bias.size() > 0;
-    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    CHECK((!biasEnabled || (bias.size() == outputChannels)));
 
     // Creates the tensors.
     armnn::TensorInfo inputTensorInfo =
@@ -2277,11 +2277,11 @@
     bool biasEnabled = bias.size() > 0;
 
     // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
-    ARMNN_ASSERT(inputNum == 1);
-    ARMNN_ASSERT(outputNum == 1);
+    CHECK(inputNum == 1);
+    CHECK(outputNum == 1);
 
     // If a bias is used, its size must equal the number of output channels.
-    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    CHECK((!biasEnabled || (bias.size() == outputChannels)));
 
 
     // Note these tensors will use two (identical) batches.
diff --git a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
index d62ffed..55e6dd0 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv3dTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -57,9 +57,9 @@
                      float vScale, int32_t vOffset,
                      float bScale, int32_t bOffset)
 {
-    ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+    CHECK_MESSAGE(((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>())),
                      "Invalid type and parameter combination.");
-    ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+    CHECK_MESSAGE(((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>())),
                      "Invalid type and parameter combination.");
 
     for (uint32_t i = 0; i < bias.size(); ++i)
@@ -196,7 +196,7 @@
     bool biasEnabled = bias.size() > 0;
 
     // If a bias is used, its size must equal the number of output channels.
-    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    CHECK((!biasEnabled || (bias.size() == outputChannels)));
 
     // Creates the tensors.
     armnn::TensorInfo inputTensorInfo({inputNum, inputDepth, inputHeight, inputWidth, inputChannels}, ArmnnType);
diff --git a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
index 2bd9372..691780a 100644
--- a/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LogicalTestImpl.cpp
@@ -1,11 +1,10 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "LogicalTestImpl.hpp"
 
-#include <armnn/utility/Assert.hpp>
 #include <ResolveType.hpp>
 
 #include <armnn/backends/Workload.hpp>
@@ -29,10 +28,10 @@
     std::vector<uint8_t> expectedOutput,
     const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    ARMNN_ASSERT(inputShape.GetNumDimensions() == NumDims);
+    CHECK(inputShape.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo(inputShape, armnn::DataType::Boolean);
 
-    ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
+    CHECK(outputShape.GetNumDimensions() == NumDims);
     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
 
     std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
@@ -80,13 +79,13 @@
     std::vector<uint8_t> expectedOutput,
     const armnn::ITensorHandleFactory& tensorHandleFactory)
 {
-    ARMNN_ASSERT(inputShape0.GetNumDimensions() == NumDims);
+    CHECK(inputShape0.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo0(inputShape0, armnn::DataType::Boolean);
 
-    ARMNN_ASSERT(inputShape1.GetNumDimensions() == NumDims);
+    CHECK(inputShape1.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo1(inputShape1, armnn::DataType::Boolean);
 
-    ARMNN_ASSERT(outputShape.GetNumDimensions() == NumDims);
+    CHECK(outputShape.GetNumDimensions() == NumDims);
     armnn::TensorInfo outputTensorInfo(outputShape, armnn::DataType::Boolean);
 
     std::vector<uint8_t> actualOutput(outputTensorInfo.GetNumElements());
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index 63375f0..3f4453c 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -104,7 +104,7 @@
     outputHandle->Allocate();
     CopyDataToITensorHandle(inputHandle.get(), input.data());
 
-    ARMNN_ASSERT(workload);
+    CHECK(workload);
 
     ExecuteWorkload(*workload, memoryManager);
 
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 51ea0dc..09418c2 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -1,13 +1,12 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "ClContextControlFixture.hpp"
 #include "ClWorkloadFactoryHelper.hpp"
 
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
+#include <armnn/utility/PolymorphicDowncast.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 #include <armnn/backends/MemCopyWorkload.hpp>
 #include <armnnTestUtils/TensorCopyUtils.hpp>
@@ -331,11 +330,10 @@
                                                                                            DataLayout::NCHW,
                                                                                            modelOptions);
 
-    ARMNN_ASSERT(workload != nullptr);
+    CHECK(workload != nullptr);
     auto conv2dWorkload = PolymorphicDowncast<ClConvolution2dWorkload*>(workload.get());
-    IgnoreUnused(conv2dWorkload);
-    ARMNN_ASSERT(conv2dWorkload != nullptr);
-    ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
+    CHECK(conv2dWorkload != nullptr);
+    CHECK(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
 }
 
 TEST_CASE_FIXTURE(ClContextControlFixture, "ClReplaceInputOutputConvolution2dWorkload")
@@ -480,7 +478,7 @@
                                                               workloadInfo,
                                                               clMemoryManager->GetIntraLayerManager(),
                                                               clCompileContext);
-    ARMNN_ASSERT(workload != nullptr);
+    CHECK(workload != nullptr);
     // Check built programs are not empty in context
     CHECK(!clCompileContext.get_built_programs().empty());
 }
diff --git a/src/backends/cl/test/ClDefaultAllocatorTests.cpp b/src/backends/cl/test/ClDefaultAllocatorTests.cpp
index 411a480..24b8a09 100644
--- a/src/backends/cl/test/ClDefaultAllocatorTests.cpp
+++ b/src/backends/cl/test/ClDefaultAllocatorTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -97,7 +97,7 @@
     // Initialize Mock Backend
     MockBackendInitialiser initialiser;
     auto factoryFun = BackendRegistryInstance().GetFactory(MockBackend().GetIdStatic());
-    ARMNN_ASSERT(factoryFun != nullptr);
+    CHECK(factoryFun != nullptr);
     auto backend = factoryFun();
     auto defaultAllocator = backend->GetDefaultAllocator();
 
diff --git a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
index fee40fd..46be3a1 100644
--- a/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleFactoryTests.cpp
@@ -1,10 +1,8 @@
 //
-// Copyright © 2021 Arm Ltd. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
-#include <armnn/utility/Assert.hpp>
-
 #include <cl/ClImportTensorHandleFactory.hpp>
 
 #include <doctest/doctest.h>
@@ -35,21 +33,21 @@
     // Start with the TensorInfo factory method. Create an import tensor handle and verify the data is
     // passed through correctly.
     auto tensorHandle = factory.CreateTensorHandle(tensorInfo);
-    ARMNN_ASSERT(tensorHandle);
-    ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
-    ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
+    CHECK(tensorHandle);
+    CHECK(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+    CHECK(tensorHandle->GetShape() == tensorShape);
 
     // Same method but explicitly specifying isManaged = false.
     tensorHandle = factory.CreateTensorHandle(tensorInfo, false);
     CHECK(tensorHandle);
-    ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
-    ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
+    CHECK(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+    CHECK(tensorHandle->GetShape() == tensorShape);
 
     // Now try TensorInfo and DataLayout factory method.
     tensorHandle = factory.CreateTensorHandle(tensorInfo, DataLayout::NHWC);
     CHECK(tensorHandle);
-    ARMNN_ASSERT(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
-    ARMNN_ASSERT(tensorHandle->GetShape() == tensorShape);
+    CHECK(tensorHandle->GetImportFlags() == static_cast<MemorySourceFlags>(MemorySource::Malloc));
+    CHECK(tensorHandle->GetShape() == tensorShape);
 }
 
 TEST_CASE("CreateSubtensorOfImportTensor")
@@ -67,8 +65,8 @@
     uint32_t origin[4] = { 1, 1, 0, 0 };
     auto subTensor     = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
     CHECK(subTensor);
-    ARMNN_ASSERT(subTensor->GetShape() == subTensorShape);
-    ARMNN_ASSERT(subTensor->GetParent() == tensorHandle.get());
+    CHECK(subTensor->GetShape() == subTensorShape);
+    CHECK(subTensor->GetParent() == tensorHandle.get());
 }
 
 TEST_CASE("CreateSubtensorNonZeroXYIsInvalid")
@@ -87,7 +85,7 @@
     uint32_t origin[4] = { 0, 0, 1, 1 };
     auto subTensor     = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
     // We expect a nullptr.
-    ARMNN_ASSERT(subTensor == nullptr);
+    CHECK(subTensor == nullptr);
 }
 
 TEST_CASE("CreateSubtensorXYMustMatchParent")
@@ -105,7 +103,7 @@
     uint32_t origin[4] = { 1, 1, 0, 0 };
     auto subTensor     = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
     // We expect a nullptr.
-    ARMNN_ASSERT(subTensor == nullptr);
+    CHECK(subTensor == nullptr);
 }
 
 TEST_CASE("CreateSubtensorMustBeSmallerThanParent")
@@ -122,7 +120,7 @@
     uint32_t origin[4] = { 1, 1, 0, 0 };
     // This should result in a nullptr.
     auto subTensor = factory.CreateSubTensorHandle(*tensorHandle, subTensorShape, origin);
-    ARMNN_ASSERT(subTensor == nullptr);
+    CHECK(subTensor == nullptr);
 }
 
 }
diff --git a/src/backends/cl/test/ClImportTensorHandleTests.cpp b/src/backends/cl/test/ClImportTensorHandleTests.cpp
index 39619e6..259c091 100644
--- a/src/backends/cl/test/ClImportTensorHandleTests.cpp
+++ b/src/backends/cl/test/ClImportTensorHandleTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -308,7 +308,7 @@
     size_t totalBytes = numElements * sizeof(float);
 
     IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input");
-    ARMNN_ASSERT(inputLayer);
+    CHECK(inputLayer);
 
     armnn::ConstTensor weights(kernelInfo, kernel);
 
@@ -324,7 +324,7 @@
     armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
     armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
 
-    ARMNN_ASSERT(convLayer);
+    CHECK(convLayer);
 
     weightsLayer->GetOutputSlot(0).SetTensorInfo(weights.GetInfo());
     weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1u));
@@ -460,10 +460,10 @@
     size_t totalBytesOutput = numElements * sizeof(float);
 
     IConnectableLayer* const inputLayer = network.AddInputLayer(0, "input");
-    ARMNN_ASSERT(inputLayer);
+    CHECK(inputLayer);
 
     armnn::IConnectableLayer* const convLayer = network.AddConvertFp16ToFp32Layer("convert");
-    ARMNN_ASSERT(convLayer);
+    CHECK(convLayer);
 
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
@@ -608,10 +608,10 @@
     size_t totalBytesOutput = numElements * sizeof(Half);
 
     IConnectableLayer* const inputLayer = network.AddInputLayer(0, "input");
-    ARMNN_ASSERT(inputLayer);
+    CHECK(inputLayer);
 
     armnn::IConnectableLayer* const convLayer = network.AddConvertFp32ToFp16Layer("convert");
-    ARMNN_ASSERT(convLayer);
+    CHECK(convLayer);
 
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
@@ -747,10 +747,10 @@
     size_t totalBytesOutput = numElements * sizeof(Half);
 
     IConnectableLayer* const inputLayer = network.AddInputLayer(0, "input");
-    ARMNN_ASSERT(inputLayer);
+    CHECK(inputLayer);
 
     armnn::IConnectableLayer* const convLayer = network.AddConvertFp32ToFp16Layer("convert");
-    ARMNN_ASSERT(convLayer);
+    CHECK(convLayer);
 
     inputLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(0));
     inputLayer->GetOutputSlot(0).SetTensorInfo(inputInfo);
@@ -884,7 +884,7 @@
     size_t totalBytes = numElements * sizeof(float);
 
     IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input");
-    ARMNN_ASSERT(inputLayer);
+    CHECK(inputLayer);
 
     armnn::ConstTensor weights(kernelInfo, kernel);
 
@@ -897,7 +897,7 @@
     convDesc2d.m_PadBottom = 1;
     convDesc2d.m_DataLayout = DataLayout::NHWC;
     armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
-    ARMNN_ASSERT(convLayer);
+    CHECK(convLayer);
 
     armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
 
@@ -1109,7 +1109,7 @@
     size_t totalBytes = numElements * sizeof(float);
 
     IConnectableLayer* const inputLayer = network->AddInputLayer(0, "input");
-    ARMNN_ASSERT(inputLayer);
+    CHECK(inputLayer);
 
     armnn::ConstTensor weights(kernelInfo, kernel);
 
@@ -1123,7 +1123,7 @@
     convDesc2d.m_DataLayout = DataLayout::NHWC;
 
     armnn::IConnectableLayer* const convLayer = network->AddConvolution2dLayer(convDesc2d, "conv");
-    ARMNN_ASSERT(convLayer);
+    CHECK(convLayer);
 
     armnn::IConnectableLayer* weightsLayer = network->AddConstantLayer(weights);
 
diff --git a/src/backends/reference/test/RefTensorHandleTests.cpp b/src/backends/reference/test/RefTensorHandleTests.cpp
index 883df6f..7925228 100644
--- a/src/backends/reference/test/RefTensorHandleTests.cpp
+++ b/src/backends/reference/test/RefTensorHandleTests.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2022, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -190,7 +190,7 @@
     RefTensorHandleFactory handleFactory(memoryManager);
 
     // RefTensorHandleFactory does not support InPlaceComputation
-    ARMNN_ASSERT(!(handleFactory.SupportsInPlaceComputation()));
+    CHECK(!(handleFactory.SupportsInPlaceComputation()));
 }
 
 TEST_CASE("TestManagedConstTensorHandle")