IVGCVSW-5964 Removing some remaining boost utility usages from tests.

* Adding a basic PredicateResult class to replace
  boost::test_tools::predicate_result
* Replacing all uses of boost::test_tools::predicate_result with
  the new armnn::PredicateResult class
* Replacing use of boost::test_tools::output_test_stream output
  with std::ostringstream in ProfilerTests.cpp

Signed-off-by: Colm Donelan <Colm.Donelan@arm.com>
Change-Id: I75cdbbff98d984e26e4a50c125386b2988516fad
diff --git a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
index b14e148..0a30907 100644
--- a/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
+++ b/src/backends/aclCommon/test/CreateWorkloadClNeon.hpp
@@ -5,7 +5,7 @@
 #pragma once
 
 #include <test/CreateWorkload.hpp>
-
+#include <test/PredicateResult.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 #include <backendsCommon/MemCopyWorkload.hpp>
 #include <reference/RefWorkloadFactory.hpp>
@@ -27,8 +27,8 @@
 using namespace std;
 
 template<typename IComputeTensorHandle>
-boost::test_tools::predicate_result CompareTensorHandleShape(IComputeTensorHandle*               tensorHandle,
-                                                             std::initializer_list<unsigned int> expectedDimensions)
+PredicateResult CompareTensorHandleShape(IComputeTensorHandle* tensorHandle,
+                                         std::initializer_list<unsigned int> expectedDimensions)
 {
     arm_compute::ITensorInfo* info = tensorHandle->GetTensor().info();
 
@@ -36,8 +36,8 @@
     auto numExpectedDims = expectedDimensions.size();
     if (infoNumDims != numExpectedDims)
     {
-        boost::test_tools::predicate_result res(false);
-        res.message() << "Different number of dimensions [" << info->num_dimensions()
+        PredicateResult res(false);
+        res.Message() << "Different number of dimensions [" << info->num_dimensions()
                       << "!=" << expectedDimensions.size() << "]";
         return res;
     }
@@ -48,8 +48,8 @@
     {
         if (info->dimension(i) != expectedDimension)
         {
-            boost::test_tools::predicate_result res(false);
-            res.message() << "For dimension " << i <<
+            PredicateResult res(false);
+            res.Message() << "For dimension " << i <<
                              " expected size " << expectedDimension <<
                              " got " << info->dimension(i);
             return res;
@@ -58,7 +58,7 @@
         i--;
     }
 
-    return true;
+    return PredicateResult(true);
 }
 
 template<typename IComputeTensorHandle>
@@ -97,7 +97,8 @@
     auto inputHandle1  = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor1.m_Inputs[0]);
     auto outputHandle1 = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor1.m_Outputs[0]);
     BOOST_TEST((inputHandle1->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
-    BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3}));
+    auto result = CompareTensorHandleShape<IComputeTensorHandle>(outputHandle1, {2, 3});
+    BOOST_TEST(result.m_Result, result.m_Message.str());
 
 
     MemCopyQueueDescriptor queueDescriptor2 = workload2->GetData();
@@ -105,7 +106,8 @@
     BOOST_TEST(queueDescriptor2.m_Outputs.size() == 1);
     auto inputHandle2  = PolymorphicDowncast<IComputeTensorHandle*>(queueDescriptor2.m_Inputs[0]);
     auto outputHandle2 = PolymorphicDowncast<RefTensorHandle*>(queueDescriptor2.m_Outputs[0]);
-    BOOST_TEST(CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3}));
+    result = CompareTensorHandleShape<IComputeTensorHandle>(inputHandle2, {2, 3});
+    BOOST_TEST(result.m_Result, result.m_Message.str());
     BOOST_TEST((outputHandle2->GetTensorInfo() == TensorInfo({2, 3}, DataType::Float32)));
 }
 
diff --git a/src/backends/aclCommon/test/MemCopyTests.cpp b/src/backends/aclCommon/test/MemCopyTests.cpp
index 3e26364..ffba193 100644
--- a/src/backends/aclCommon/test/MemCopyTests.cpp
+++ b/src/backends/aclCommon/test/MemCopyTests.cpp
@@ -48,28 +48,32 @@
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeon)
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndGpuWithSubtensors)
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndNeonWithSubtensors)
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
index f680827..c663655 100644
--- a/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
+++ b/src/backends/backendsCommon/test/layerTests/DetectionPostProcessTestImpl.hpp
@@ -225,10 +225,14 @@
     CopyDataFromITensorHandle(detectionScoresResult.output.origin(), outputScoresHandle.get());
     CopyDataFromITensorHandle(numDetectionsResult.output.origin(), numDetectionHandle.get());
 
-    BOOST_TEST(CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected));
-    BOOST_TEST(CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected));
-    BOOST_TEST(CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected));
-    BOOST_TEST(CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected));
+    auto result = CompareTensors(detectionBoxesResult.output, detectionBoxesResult.outputExpected);
+    BOOST_TEST(result.m_Result, result.m_Message.str());
+    result = CompareTensors(detectionClassesResult.output, detectionClassesResult.outputExpected);
+    BOOST_TEST(result.m_Result, result.m_Message.str());
+    result = CompareTensors(detectionScoresResult.output, detectionScoresResult.outputExpected);
+    BOOST_TEST(result.m_Result, result.m_Message.str());
+    result = CompareTensors(numDetectionsResult.output, numDetectionsResult.outputExpected);
+    BOOST_TEST(result.m_Result, result.m_Message.str());
 }
 
 template<armnn::DataType QuantizedType, typename RawType = armnn::ResolveType<QuantizedType>>
diff --git a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
index 7a9652a..1c63542 100644
--- a/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/LstmTestImpl.cpp
@@ -45,7 +45,8 @@
     VectorBatchVectorAdd(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
 
     // check shape and compare values
-    BOOST_TEST(CompareTensors(batchVec, expectedOutput));
+    auto result = CompareTensors(batchVec, expectedOutput);
+    BOOST_TEST(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     batchVecEncoder->Set(1.0f);
@@ -70,7 +71,8 @@
     ZeroVector(*outputEncoder, vSize);
 
     // check shape and compare values
-    BOOST_TEST(CompareTensors(input, expectedOutput));
+    auto result = CompareTensors(input, expectedOutput);
+    BOOST_TEST(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     outputEncoder->Set(1.0f);
@@ -96,7 +98,8 @@
     MeanStddevNormalization(*inputDecoder, *outputEncoder, vSize, nBatch, 1e-8f);
 
     // check shape and compare values
-    BOOST_TEST(CompareTensors(input, expectedOutput));
+    auto result = CompareTensors(input, expectedOutput);
+    BOOST_TEST(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     outputEncoder->Set(1.0f);
@@ -123,7 +126,8 @@
     VectorBatchVectorCwiseProduct(*vecDecoder, vSize, *batchVecDecoder, nBatch, *batchVecEncoder);
 
     // check shape and compare values
-    BOOST_TEST(CompareTensors(batchVec, expectedOutput));
+    auto result = CompareTensors(batchVec, expectedOutput);
+    BOOST_TEST(result.m_Result, result.m_Message.str());
 
     // check if iterator is back at start position
     batchVecEncoder->Set(1.0f);
diff --git a/src/backends/cl/test/ClCreateWorkloadTests.cpp b/src/backends/cl/test/ClCreateWorkloadTests.cpp
index 47e2f4e..7602cbb 100644
--- a/src/backends/cl/test/ClCreateWorkloadTests.cpp
+++ b/src/backends/cl/test/ClCreateWorkloadTests.cpp
@@ -21,8 +21,8 @@
 #include <cl/workloads/ClWorkloads.hpp>
 #include <cl/workloads/ClWorkloadUtils.hpp>
 
-boost::test_tools::predicate_result CompareIClTensorHandleShape(IClTensorHandle*                    tensorHandle,
-                                                                std::initializer_list<unsigned int> expectedDimensions)
+armnn::PredicateResult CompareIClTensorHandleShape(IClTensorHandle* tensorHandle,
+                                                   std::initializer_list<unsigned int> expectedDimensions)
 {
     return CompareTensorHandleShape<IClTensorHandle>(tensorHandle, expectedDimensions);
 }
@@ -43,8 +43,11 @@
     auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 1}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 1}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 1});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+
+    predResult = CompareIClTensorHandleShape(outputHandle, {1, 1});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateActivationFloatWorkload)
@@ -74,9 +77,12 @@
     auto inputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto inputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, {2, 3}));
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle2, {2, 3}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle1, {2, 3});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(inputHandle2, {2, 3});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateAdditionFloatWorkload)
@@ -167,8 +173,11 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 3}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+
+    predResult = CompareIClTensorHandleShape(outputHandle, {2, 3});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateRsqrtFloat32WorkloadTest)
@@ -192,15 +201,20 @@
     auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-     switch (dataLayout)
+    armnn::PredicateResult predResult(true);
+    switch (dataLayout)
     {
         case DataLayout::NHWC:
-            BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
-            BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 }));
+            predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            predResult = CompareIClTensorHandleShape(outputHandle, { 2, 4, 4, 3 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
             break;
         default: // NCHW
-            BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
-            BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 }));
+            predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 4, 4 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
     }
 }
 
@@ -239,9 +253,10 @@
     ConvertFp16ToFp32QueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
     BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
     BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
 }
@@ -258,8 +273,10 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {1, 3, 2, 3});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, {1, 3, 2, 3});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
     BOOST_TEST((inputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F32));
     BOOST_TEST((outputHandle->GetTensor().info()->data_type() == arm_compute::DataType::F16));
 }
@@ -470,8 +487,10 @@
     Convolution2dQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 3, 6, 6});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, {2, 2, 6, 6});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateDirectConvolution2dFloatWorkload)
@@ -503,8 +522,10 @@
     FullyConnectedQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {3, 7}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {3, 1, 4, 5});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, {3, 7});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 
@@ -660,8 +681,10 @@
     auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {1, 4}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, {1, 4});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateReshapeFloatWorkload)
@@ -705,8 +728,10 @@
         tensorInfo.SetQuantizationScale(1.f / 256);
     }
 
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 
@@ -742,16 +767,20 @@
     // Checks that outputs are as we expect them (see definition of CreateSplitterWorkloadTest).
     SplitterQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {5, 7, 7}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {5, 7, 7});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 
     auto outputHandle1 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle1, {2, 7, 7}));
+    predResult = CompareIClTensorHandleShape(outputHandle1, {2, 7, 7});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 
     auto outputHandle2 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[2]);
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle2, {2, 7, 7}));
+    predResult = CompareIClTensorHandleShape(outputHandle2, {2, 7, 7});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 
     auto outputHandle0 = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle0, {1, 7, 7}));
+    predResult = CompareIClTensorHandleShape(outputHandle0, {1, 7, 7});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateSplitterFloatWorkload)
@@ -931,8 +960,10 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {4, 1}));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, {4, 1}));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {4, 1});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, {4, 1});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateLogSoftmaxFloat32WorkloadTest)
@@ -952,8 +983,10 @@
     LstmQueueDescriptor queueDescriptor = workload->GetData();
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[1]);
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 2 }));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 4 }));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {2, 2});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, {2, 4});
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateLSTMWorkloadFloatWorkload)
@@ -975,16 +1008,20 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
+    armnn::PredicateResult predResult(true);
     switch (dataLayout)
     {
         case DataLayout::NHWC:
-            BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
-            BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+            predResult = CompareIClTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            predResult = CompareIClTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
             break;
-        case DataLayout::NCHW:
-        default:
-            BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
-            BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+        default: // DataLayout::NCHW
+            predResult = CompareIClTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            predResult = CompareIClTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
     }
 }
 
@@ -1033,8 +1070,10 @@
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
     // The first dimension (batch size) in both input and output is singular thus it has been reduced by ACL.
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, {  1, 3, 7, 4 }));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 4 }));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, {  1, 3, 7, 4 });
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, { 1, 4 });
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateMeanFloat32Workload)
@@ -1067,9 +1106,12 @@
     auto inputHandle1  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[1]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 }));
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 }));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+    auto predResult = CompareIClTensorHandleShape(inputHandle0, { 2, 3, 2, 5 });
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(inputHandle1, { 2, 3, 2, 5 });
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, outputShape);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateConcatDim0Float32Workload)
@@ -1115,8 +1157,10 @@
     auto inputHandle  = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
-    BOOST_TEST(CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 }));
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 }));
+    auto predResult = CompareIClTensorHandleShape(inputHandle, { 1, 2, 2, 1 });
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+    predResult = CompareIClTensorHandleShape(outputHandle, { 1, 1, 1, 4 });
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateSpaceToDepthFloat32Workload)
@@ -1161,10 +1205,12 @@
     for (unsigned int i = 0; i < numInputs; ++i)
     {
         auto inputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Inputs[i]);
-        BOOST_TEST(CompareIClTensorHandleShape(inputHandle, inputShape));
+        auto predResult1 = CompareIClTensorHandleShape(inputHandle, inputShape);
+        BOOST_TEST(predResult1.m_Result, predResult1.m_Message.str());
     }
     auto outputHandle = PolymorphicDowncast<IClTensorHandle*>(queueDescriptor.m_Outputs[0]);
-    BOOST_TEST(CompareIClTensorHandleShape(outputHandle, outputShape));
+    auto predResult2 = CompareIClTensorHandleShape(outputHandle, outputShape);
+    BOOST_TEST(predResult2.m_Result, predResult2.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CreateStackFloat32Workload)
diff --git a/src/backends/cl/test/ClMemCopyTests.cpp b/src/backends/cl/test/ClMemCopyTests.cpp
index 3cd9af7..c26f7bd 100644
--- a/src/backends/cl/test/ClMemCopyTests.cpp
+++ b/src/backends/cl/test/ClMemCopyTests.cpp
@@ -19,28 +19,32 @@
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(false);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpu)
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndGpuWithSubtensors)
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::ClWorkloadFactory, armnn::DataType::Float32>(true);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CopyBetweenGpuAndCpuWithSubtensors)
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::ClWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_SUITE_END()
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index c994bfe..a8c0c8a 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -23,8 +23,8 @@
 namespace
 {
 
-boost::test_tools::predicate_result CompareIAclTensorHandleShape(IAclTensorHandle*                    tensorHandle,
-                                                                std::initializer_list<unsigned int> expectedDimensions)
+armnn::PredicateResult CompareIAclTensorHandleShape(IAclTensorHandle* tensorHandle,
+                                                    std::initializer_list<unsigned int> expectedDimensions)
 {
     return CompareTensorHandleShape<IAclTensorHandle>(tensorHandle, expectedDimensions);
 }
@@ -564,16 +564,20 @@
     auto inputHandle  = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Inputs[0]);
     auto outputHandle = PolymorphicDowncast<IAclTensorHandle*>(queueDescriptor.m_Outputs[0]);
 
+    armnn::PredicateResult predResult(true);
     switch (dataLayout)
     {
         case DataLayout::NHWC:
-            BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 }));
-            BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 }));
+            predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 4, 4, 3 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 2, 2, 3 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
             break;
-        case DataLayout::NCHW:
-        default:
-            BOOST_TEST(CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 }));
-            BOOST_TEST(CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 }));
+        default: // DataLayout::NCHW
+            predResult = CompareIAclTensorHandleShape(inputHandle, { 2, 3, 4, 4 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
+            predResult = CompareIAclTensorHandleShape(outputHandle, { 2, 3, 2, 2 });
+            BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
     }
 }
 
diff --git a/src/backends/neon/test/NeonMemCopyTests.cpp b/src/backends/neon/test/NeonMemCopyTests.cpp
index dbe1f8d..6a3d05d 100644
--- a/src/backends/neon/test/NeonMemCopyTests.cpp
+++ b/src/backends/neon/test/NeonMemCopyTests.cpp
@@ -20,28 +20,32 @@
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(false);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpu)
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(false);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CopyBetweenCpuAndNeonWithSubtensors)
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::RefWorkloadFactory, armnn::NeonWorkloadFactory, armnn::DataType::Float32>(true);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_CASE(CopyBetweenNeonAndCpuWithSubtensors)
 {
     LayerTestResult<float, 4> result =
         MemCopyTest<armnn::NeonWorkloadFactory, armnn::RefWorkloadFactory, armnn::DataType::Float32>(true);
-    BOOST_TEST(CompareTensors(result.output, result.outputExpected));
+    auto predResult = CompareTensors(result.output, result.outputExpected);
+    BOOST_TEST(predResult.m_Result, predResult.m_Message.str());
 }
 
 BOOST_AUTO_TEST_SUITE_END()