IVGCVSW-4485 Remove Boost assert

 * Change boost assert to armnn assert
 * Change include file to armnn assert
 * Fix ARMNN_ASSERT_MSG issue with multiple conditions
 * Change BOOST_ASSERT to BOOST_TEST where appropriate
 * Remove unused include statements

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff
diff --git a/src/backends/backendsCommon/CpuTensorHandle.cpp b/src/backends/backendsCommon/CpuTensorHandle.cpp
index 65e6c47..7bcf59f 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.cpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.cpp
@@ -118,8 +118,8 @@
 
 void ScopedCpuTensorHandle::CopyFrom(const void* srcMemory, unsigned int numBytes)
 {
-    BOOST_ASSERT(GetTensor<void>() == nullptr);
-    BOOST_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
+    ARMNN_ASSERT(GetTensor<void>() == nullptr);
+    ARMNN_ASSERT(GetTensorInfo().GetNumBytes() == numBytes);
 
     if (srcMemory)
     {
diff --git a/src/backends/backendsCommon/CpuTensorHandle.hpp b/src/backends/backendsCommon/CpuTensorHandle.hpp
index e6e59fc..78efb08 100644
--- a/src/backends/backendsCommon/CpuTensorHandle.hpp
+++ b/src/backends/backendsCommon/CpuTensorHandle.hpp
@@ -14,7 +14,7 @@
 
 #include <algorithm>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -30,7 +30,7 @@
     template <typename T>
     const T* GetConstTensor() const
     {
-        BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+        ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
         return reinterpret_cast<const T*>(m_Memory);
     }
 
@@ -59,8 +59,8 @@
 
 private:
     // Only used for testing
-    void CopyOutTo(void *) const override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
-    void CopyInFrom(const void*) override { BOOST_ASSERT_MSG(false, "Unimplemented"); }
+    void CopyOutTo(void *) const override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
+    void CopyInFrom(const void*) override { ARMNN_ASSERT_MSG(false, "Unimplemented"); }
 
     ConstCpuTensorHandle(const ConstCpuTensorHandle& other) = delete;
     ConstCpuTensorHandle& operator=(const ConstCpuTensorHandle& other) = delete;
@@ -79,7 +79,7 @@
     template <typename T>
     T* GetTensor() const
     {
-        BOOST_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
+        ARMNN_ASSERT(CompatibleTypes<T>(GetTensorInfo().GetDataType()));
         return reinterpret_cast<T*>(m_MutableMemory);
     }
 
diff --git a/src/backends/backendsCommon/LayerSupportRules.hpp b/src/backends/backendsCommon/LayerSupportRules.hpp
index 03bec53..ddecc82 100644
--- a/src/backends/backendsCommon/LayerSupportRules.hpp
+++ b/src/backends/backendsCommon/LayerSupportRules.hpp
@@ -5,7 +5,7 @@
 
 #pragma once
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <algorithm>
 
 namespace armnn
@@ -30,7 +30,7 @@
         case armnn::DataType::QAsymmS8:
             return armnn::DataType::Signed32;
         default:
-            BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+            ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
     }
     return armnn::EmptyOptional();
 }
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 8abc8a6..5601822 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -70,7 +70,7 @@
         case DataType::QSymmS16:
             return nullptr;
         default:
-            BOOST_ASSERT_MSG(false, "Unknown DataType.");
+            ARMNN_ASSERT_MSG(false, "Unknown DataType.");
             return nullptr;
     }
 }
diff --git a/src/backends/backendsCommon/Workload.hpp b/src/backends/backendsCommon/Workload.hpp
index 984443b..244b5f1 100644
--- a/src/backends/backendsCommon/Workload.hpp
+++ b/src/backends/backendsCommon/Workload.hpp
@@ -65,9 +65,9 @@
 
             if (std::find(dataTypes.begin(), dataTypes.end(), expectedInputType) == dataTypes.end())
             {
-                BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+                ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
             }
-            BOOST_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
+            ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_InputTensorInfos.begin()),
                                          info.m_InputTensorInfos.end(),
                                          [&](auto it){
                                              return it.GetDataType() == expectedInputType;
@@ -84,14 +84,14 @@
             {
                 if (expectedOutputType != expectedInputType)
                 {
-                    BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+                    ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
                 }
             }
             else if (std::find(dataTypes.begin(), dataTypes.end(), expectedOutputType) == dataTypes.end())
             {
-                BOOST_ASSERT_MSG(false, "Trying to create workload with incorrect type");
+                ARMNN_ASSERT_MSG(false, "Trying to create workload with incorrect type");
             }
-            BOOST_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
+            ARMNN_ASSERT_MSG(std::all_of(std::next(info.m_OutputTensorInfos.begin()),
                                          info.m_OutputTensorInfos.end(),
                                          [&](auto it){
                                              return it.GetDataType() == expectedOutputType;
@@ -109,14 +109,14 @@
     MultiTypedWorkload(const QueueDescriptor& descriptor, const WorkloadInfo& info)
         : BaseWorkload<QueueDescriptor>(descriptor, info)
     {
-        BOOST_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
+        ARMNN_ASSERT_MSG(std::all_of(info.m_InputTensorInfos.begin(),
                                      info.m_InputTensorInfos.end(),
                                      [&](auto it){
                                          return it.GetDataType() == InputDataType;
                                      }),
                          "Trying to create workload with incorrect type");
 
-        BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+        ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
                                      info.m_OutputTensorInfos.end(),
                                      [&](auto it){
                                          return it.GetDataType() == OutputDataType;
@@ -136,11 +136,11 @@
     {
         if (!info.m_InputTensorInfos.empty())
         {
-            BOOST_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
+            ARMNN_ASSERT_MSG(info.m_InputTensorInfos.front().GetDataType() == DataType,
                                  "Trying to create workload with incorrect type");
         }
 
-        BOOST_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
+        ARMNN_ASSERT_MSG(std::all_of(info.m_OutputTensorInfos.begin(),
                                      info.m_OutputTensorInfos.end(),
                                      [&](auto it){
                                          return it.GetDataType() == DataType;
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index f968ad7..1f4a849 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -40,7 +40,7 @@
         case DataType::QSymmS16:
             return DataType::Signed32;
         default:
-            BOOST_ASSERT_MSG(false, "Invalid input data type");
+            ARMNN_ASSERT_MSG(false, "Invalid input data type");
             return DataType::Float32;
     }
 }
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 5628c36..a7e8576 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -194,7 +194,7 @@
             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
                                                        dataType);
             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
-            BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
 
             const Convolution2dDescriptor& descriptor  = cLayer->GetParameters();
 
@@ -244,7 +244,7 @@
             const TensorInfo& input = OverrideDataType(layer.GetInputSlot(0).GetConnection()->GetTensorInfo(),
                                                        dataType);
             const TensorInfo& output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
-            BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
 
             const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
 
@@ -335,7 +335,7 @@
             auto cLayer = boost::polymorphic_downcast<const FullyConnectedLayer*>(&layer);
             const TensorInfo& input = layer.GetInputSlot(0).GetConnection()->GetTensorInfo();
             const TensorInfo& output = layer.GetOutputSlot(0).GetTensorInfo();
-            BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
 
             TensorInfo biasInfo;
             const TensorInfo * biasInfoPtr = nullptr;
@@ -347,7 +347,7 @@
             const FullyConnectedDescriptor& descriptor = cLayer->GetParameters();
             if (descriptor.m_BiasEnabled)
             {
-                BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+                ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
                 biasInfo = OverrideDataType(cLayer->m_Bias->GetTensorInfo(), GetBiasTypeFromWeightsType(dataType));
                 biasInfoPtr = &biasInfo;
             }
@@ -381,7 +381,7 @@
                     }
                     default:
                     {
-                        BOOST_ASSERT_MSG(false, "Unexpected bias type");
+                        ARMNN_ASSERT_MSG(false, "Unexpected bias type");
                     }
                 }
             }
@@ -1156,12 +1156,12 @@
             Optional<TensorInfo> biases;
             if (descriptor.m_BiasEnabled)
             {
-                BOOST_ASSERT(cLayer->m_Bias.get() != nullptr);
+                ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
                 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
                                           GetBiasTypeFromWeightsType(dataType));
             }
 
-            BOOST_ASSERT(cLayer->m_Weight.get() != nullptr);
+            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
             const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
 
             result = layerSupportObject->IsTransposeConvolution2dSupported(input,
@@ -1175,7 +1175,7 @@
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
+            ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
             reason.value() = "Unrecognised layer type";
             result = false;
             break;
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index 3b3959b..bd5e81e 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -13,8 +13,8 @@
 armnn::ConstTensor PermuteTensor(const ConstCpuTensorHandle* tensor,
                                  const PermutationVector& permutationVector, void* permuteBuffer)
 {
-    BOOST_ASSERT_MSG(tensor, "Invalid input tensor");
-    BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+    ARMNN_ASSERT_MSG(tensor, "Invalid input tensor");
+    ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
 
     TensorInfo tensorInfo = tensor->GetTensorInfo();
 
@@ -133,8 +133,8 @@
                                                      DataLayout dataLayout,
                                                      void* permuteBuffer)
 {
-    BOOST_ASSERT_MSG(weightTensor, "Invalid input tensor");
-    BOOST_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
+    ARMNN_ASSERT_MSG(weightTensor, "Invalid input tensor");
+    ARMNN_ASSERT_MSG(permuteBuffer, "Invalid permute buffer");
 
     auto multiplier    = weightTensor->GetTensorInfo().GetShape()[0];
     auto inputChannels = weightTensor->GetTensorInfo().GetShape()[1];
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index 66056db..a4da924 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -168,8 +168,8 @@
                 auto dstPtrChannel = dstData;
                 for (unsigned int w = 0; w < copyWidth; ++w)
                 {
-                    BOOST_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
-                    BOOST_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
+                    ARMNN_ASSERT(srcData >= srcDataStart && srcData + copyLength <= srcDataStart + srcSize);
+                    ARMNN_ASSERT(dstData >= dstDataStart && dstData + copyLength <= dstDataStart + dstSize);
                     copy(dstData, srcData, copyLength);
                     dstData += dstWidthStride;
                     srcData += srcWidthStride;
diff --git a/src/backends/backendsCommon/test/MockBackend.cpp b/src/backends/backendsCommon/test/MockBackend.cpp
index 116bf77..abdaa81 100644
--- a/src/backends/backendsCommon/test/MockBackend.cpp
+++ b/src/backends/backendsCommon/test/MockBackend.cpp
@@ -23,7 +23,7 @@
 
 bool IsLayerSupported(const armnn::Layer* layer)
 {
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     armnn::LayerType layerType = layer->GetType();
     switch (layerType)
@@ -47,7 +47,7 @@
 
 bool IsLayerOptimizable(const armnn::Layer* layer)
 {
-    BOOST_ASSERT(layer != nullptr);
+    ARMNN_ASSERT(layer != nullptr);
 
     // A Layer is not optimizable if its name contains "unoptimizable"
     const std::string layerName(layer->GetName());
@@ -191,7 +191,7 @@
                       supportedSubgraphs.end(),
                       [&optimizationViews](const SubgraphView::SubgraphViewPtr& supportedSubgraph)
         {
-            BOOST_ASSERT(supportedSubgraph != nullptr);
+            ARMNN_ASSERT(supportedSubgraph != nullptr);
 
             PreCompiledLayer* preCompiledLayer =
                 optimizationViews.GetGraph().AddLayer<PreCompiledLayer>(
@@ -228,7 +228,7 @@
                       unsupportedSubgraphs.end(),
                       [&optimizationViews](const SubgraphView::SubgraphViewPtr& unsupportedSubgraph)
         {
-            BOOST_ASSERT(unsupportedSubgraph != nullptr);
+            ARMNN_ASSERT(unsupportedSubgraph != nullptr);
 
             optimizationViews.AddFailedSubgraph(SubgraphView(*unsupportedSubgraph));
         });
@@ -256,7 +256,7 @@
                       untouchedSubgraphs.end(),
                       [&optimizationViews](const SubgraphView::SubgraphViewPtr& untouchedSubgraph)
         {
-            BOOST_ASSERT(untouchedSubgraph != nullptr);
+            ARMNN_ASSERT(untouchedSubgraph != nullptr);
 
             optimizationViews.AddUntouchedSubgraph(SubgraphView(*untouchedSubgraph));
         });
diff --git a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
index df001b7..9f38e47 100644
--- a/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
+++ b/src/backends/backendsCommon/test/WorkloadTestUtils.hpp
@@ -106,7 +106,7 @@
         case armnn::DataType::QSymmS16:
             return armnn::DataType::Signed32;
         default:
-            BOOST_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+            ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
     }
     return armnn::EmptyOptional();
 }
diff --git a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
index 319434e..a82048c 100644
--- a/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ActivationTestImpl.cpp
@@ -1212,9 +1212,9 @@
     SetWorkloadOutput(refData, refInfo, 0, outputTensorInfo, outputHandleRef.get());
 
     std::unique_ptr<armnn::IWorkload> workload = workloadFactory.CreateActivation(data, info);
-    BOOST_ASSERT(workload != nullptr);
+    ARMNN_ASSERT(workload != nullptr);
     std::unique_ptr<armnn::IWorkload> workloadRef = refWorkloadFactory.CreateActivation(refData, refInfo);
-    BOOST_ASSERT(workloadRef != nullptr);
+    ARMNN_ASSERT(workloadRef != nullptr);
 
     inputHandle->Allocate();
     outputHandle->Allocate();
diff --git a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
index 2156b0e..a6b703b 100644
--- a/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ComparisonTestImpl.cpp
@@ -5,7 +5,7 @@
 
 #include "ComparisonTestImpl.hpp"
 
-
+#include <armnn/utility/Assert.hpp>
 #include <Half.hpp>
 #include <QuantizeHelper.hpp>
 #include <ResolveType.hpp>
@@ -18,8 +18,6 @@
 
 #include <test/TensorHelpers.hpp>
 
-#include <boost/assert.hpp>
-
 namespace
 {
 
@@ -44,13 +42,13 @@
     int outQuantOffset)
 {
     IgnoreUnused(memoryManager);
-    BOOST_ASSERT(shape0.GetNumDimensions() == NumDims);
+    ARMNN_ASSERT(shape0.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo0(shape0, ArmnnInType, quantScale0, quantOffset0);
 
-    BOOST_ASSERT(shape1.GetNumDimensions() == NumDims);
+    ARMNN_ASSERT(shape1.GetNumDimensions() == NumDims);
     armnn::TensorInfo inputTensorInfo1(shape1, ArmnnInType, quantScale1, quantOffset1);
 
-    BOOST_ASSERT(outShape.GetNumDimensions() == NumDims);
+    ARMNN_ASSERT(outShape.GetNumDimensions() == NumDims);
     armnn::TensorInfo outputTensorInfo(outShape, armnn::DataType::Boolean, outQuantScale, outQuantOffset);
 
     auto input0 = MakeTensor<InType, NumDims>(inputTensorInfo0, values0);
diff --git a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
index 1e40b42..9e08e30 100644
--- a/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConcatTestImpl.cpp
@@ -61,7 +61,7 @@
         }
         else
         {
-            BOOST_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
+            ARMNN_ASSERT_MSG(nDimensions == tensorInfo.GetShape().GetNumDimensions(),
                 "Input shapes must have the same number of dimensions");
         }
     }
@@ -92,7 +92,7 @@
     unsigned int & concatDim,
     std::pair<PermutationVector, PermutationVector> & permutations)
 {
-    BOOST_ASSERT_MSG(numDimensions <= 3,
+    ARMNN_ASSERT_MSG(numDimensions <= 3,
        "Only dimensions 1,2 and 3 are supported by this helper");
     unsigned int expandedBy = 3 - numDimensions;
     unsigned int expandedConcatAxis = concatDim + expandedBy;
@@ -113,7 +113,7 @@
     }
     else
     {
-        BOOST_ASSERT(expandedConcatAxis == 0);
+        ARMNN_ASSERT(expandedConcatAxis == 0);
         concatDim = 0;
     }
 }
@@ -127,7 +127,7 @@
     std::vector<T>& outputData)
 {
     IgnoreUnused(memoryManager);
-    BOOST_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
+    ARMNN_ASSERT_MSG(inputData != nullptr, "inputData must not be null");
     if (inputData == nullptr)
     {
         // Nullptr is an error in the test. By returning without doing the concatenation
@@ -179,7 +179,7 @@
     TensorInfo & outputTensorInfo)
 {
     IgnoreUnused(memoryManager);
-    BOOST_ASSERT_MSG(inputTensorInfos.size() > 1,
+    ARMNN_ASSERT_MSG(inputTensorInfos.size() > 1,
         "Expecting more than one tensor to be concatenated here");
 
     unsigned int numDims = 0;
@@ -200,12 +200,12 @@
 
             // Store the reverese permutation.
             permuteVector = permutations.second;
-            BOOST_ASSERT_MSG(!permuteVector.IsEqual(identity),
+            ARMNN_ASSERT_MSG(!permuteVector.IsEqual(identity),
                 "Test logic error, we don't need permutation, so we shouldn't arrive here");
         }
         else
         {
-            BOOST_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
+            ARMNN_ASSERT_MSG(numDims == tensorInfo.GetShape().GetNumDimensions(),
                 "All inputs must have the same number of dimensions");
         }
 
@@ -244,7 +244,7 @@
     std::unique_ptr<ITensorHandle> && inputDataHandle,
     T * data)
 {
-    BOOST_ASSERT_MSG(data != nullptr, "data must not be null");
+    ARMNN_ASSERT_MSG(data != nullptr, "data must not be null");
     if (data == nullptr)
     {
         // Nullptr is an error in the test. By returning without doing the permutation
@@ -279,7 +279,7 @@
     unsigned int concatDim,
     bool useSubtensor)
 {
-    BOOST_ASSERT_MSG(output != nullptr, "output must not be null");
+    ARMNN_ASSERT_MSG(output != nullptr, "output must not be null");
     if (output == nullptr)
     {
         // Nullptr is an error in the test. By returning without doing the permutation
diff --git a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
index 50ad667..c66027e 100644
--- a/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/Conv2dTestImpl.cpp
@@ -169,9 +169,9 @@
 void ApplyBias(std::vector<T>& v, float vScale, int32_t vOffset,
     const std::vector<B>& bias, float bScale, int32_t bOffset, uint32_t w, uint32_t h)
 {
-    BOOST_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
+    ARMNN_ASSERT_MSG((armnn::IsQuantizedType<T>() && vScale != 0.0f) || (!armnn::IsQuantizedType<T>()),
                      "Invalid type and parameter combination.");
-    BOOST_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
+    ARMNN_ASSERT_MSG((armnn::IsQuantizedType<B>() && bScale != 0.0f) || (!armnn::IsQuantizedType<B>()),
                      "Invalid type and parameter combination.");
 
     // Note we need to dequantize and re-quantize the image value and the bias.
@@ -183,7 +183,7 @@
             for (uint32_t x = 0; x < w; ++x)
             {
                 uint32_t offset = (i * h + y) * w + x;
-                BOOST_ASSERT(offset < v.size());
+                ARMNN_ASSERT(offset < v.size());
                 T& outRef = v[offset];
                 float dOutput = SelectiveDequantize(outRef, vScale, vOffset);
                 outRef = SelectiveQuantize<T>(dOutput + dBias, vScale, vOffset);
@@ -236,11 +236,11 @@
     bool biasEnabled = bias.size() > 0;
 
     // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
-    BOOST_ASSERT(inputNum == 1);
-    BOOST_ASSERT(outputNum == 1);
+    ARMNN_ASSERT(inputNum == 1);
+    ARMNN_ASSERT(outputNum == 1);
 
     // If a bias is used, its size must equal the number of output channels.
-    BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
 
 
     // Note these tensors will use two (identical) batches.
@@ -1627,7 +1627,7 @@
 
     // If a bias is used, its size must equal the number of output channels.
     bool biasEnabled = bias.size() > 0;
-    BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
 
     // Creates the tensors.
     armnn::TensorInfo inputTensorInfo =
@@ -2135,11 +2135,11 @@
     bool biasEnabled = bias.size() > 0;
 
     // This function currently assumes 1 batch of input/output (and duplicates this into 2 batches).
-    BOOST_ASSERT(inputNum == 1);
-    BOOST_ASSERT(outputNum == 1);
+    ARMNN_ASSERT(inputNum == 1);
+    ARMNN_ASSERT(outputNum == 1);
 
     // If a bias is used, its size must equal the number of output channels.
-    BOOST_ASSERT(!biasEnabled || bias.size() == outputChannels);
+    ARMNN_ASSERT(!biasEnabled || bias.size() == outputChannels);
 
 
     // Note these tensors will use two (identical) batches.
diff --git a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
index c277d2d..c64fc88 100644
--- a/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
+++ b/src/backends/backendsCommon/test/layerTests/LayerTestResult.hpp
@@ -6,6 +6,7 @@
 #pragma once
 
 #include <armnn/Tensor.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <boost/multi_array.hpp>
 
@@ -14,7 +15,7 @@
 template <std::size_t n>
 boost::array<unsigned int, n> GetTensorShapeAsArray(const armnn::TensorInfo& tensorInfo)
 {
-    BOOST_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
+    ARMNN_ASSERT_MSG(n == tensorInfo.GetNumDimensions(),
         "Attempting to construct a shape array of mismatching size");
 
     boost::array<unsigned int, n> shape;
diff --git a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
index 772ae2c..953b543 100644
--- a/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/SoftmaxTestImpl.cpp
@@ -104,7 +104,7 @@
     outputHandle->Allocate();
     CopyDataToITensorHandle(inputHandle.get(), input.origin());
 
-    BOOST_ASSERT(workload);
+    ARMNN_ASSERT(workload);
 
     ExecuteWorkload(*workload, memoryManager);