IVGCVSW-4485 Remove Boost assert

 * Change boost assert to armnn assert
 * Change include file to armnn assert
 * Fix ARMNN_ASSERT_MSG issue with multiple conditions
 * Change BOOST_ASSERT to BOOST_TEST where appropriate
 * Remove unused include statements

Signed-off-by: Narumol Prangnawarat <narumol.prangnawarat@arm.com>
Change-Id: I5d0fa3a37b7c1c921216de68f0073aa34702c9ff
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index f43e8b6..be20644 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -5,14 +5,13 @@
 
 #pragma once
 
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/TypesUtils.hpp>
+#include <armnn/utility/Assert.hpp>
+#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnnUtils/FloatingPointConverter.hpp>
 
 #include <ResolveType.hpp>
 
-#include <boost/assert.hpp>
-
 namespace armnn
 {
 
@@ -78,28 +77,28 @@
 
     TypedIterator& operator++() override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         ++m_Iterator;
         return *this;
     }
 
     TypedIterator& operator+=(const unsigned int increment) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator += increment;
         return *this;
     }
 
     TypedIterator& operator-=(const unsigned int increment) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator -= increment;
         return *this;
     }
 
     TypedIterator& operator[](const unsigned int index) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator = m_Start + index;
         return *this;
     }
@@ -107,7 +106,7 @@
     TypedIterator& SetIndex(unsigned int index, unsigned int axisIndex = 0) override
     {
         IgnoreUnused(axisIndex);
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator = m_Start + index;
         return *this;
     }
@@ -504,7 +503,7 @@
     // This should be called to set index for per-axis Encoder/Decoder
     PerAxisIterator& SetIndex(unsigned int index, unsigned int axisIndex) override
     {
-         BOOST_ASSERT(m_Iterator);
+         ARMNN_ASSERT(m_Iterator);
          m_Iterator = m_Start + index;
          m_AxisIndex = axisIndex;
          return *this;
@@ -519,7 +518,7 @@
 
     PerAxisIterator& operator++() override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         ++m_Iterator;
         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
         return *this;
@@ -527,7 +526,7 @@
 
     PerAxisIterator& operator+=(const unsigned int increment) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator += increment;
         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
         return *this;
@@ -535,7 +534,7 @@
 
     PerAxisIterator& operator-=(const unsigned int decrement) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator -= decrement;
         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
         return *this;
@@ -543,7 +542,7 @@
 
     PerAxisIterator& operator[](const unsigned int index) override
     {
-        BOOST_ASSERT(m_Iterator);
+        ARMNN_ASSERT(m_Iterator);
         m_Iterator = m_Start + index;
         m_AxisIndex = static_cast<unsigned int>(*m_Iterator) % m_AxisFactor;
         return *this;
diff --git a/src/backends/reference/workloads/BatchToSpaceNd.cpp b/src/backends/reference/workloads/BatchToSpaceNd.cpp
index 7efdb9b..bf7de1b 100644
--- a/src/backends/reference/workloads/BatchToSpaceNd.cpp
+++ b/src/backends/reference/workloads/BatchToSpaceNd.cpp
@@ -9,7 +9,7 @@
 
 #include <armnn/Types.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 using namespace armnnUtils;
 
@@ -42,11 +42,11 @@
 {
     TensorShape inputShape = inputTensorInfo.GetShape();
 
-    BOOST_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
+    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Expected Input with 4 Dimensions");
 
     TensorShape outputShape = outputTensorInfo.GetShape();
 
-    BOOST_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
+    ARMNN_ASSERT_MSG(outputShape.GetNumDimensions() == 4, "Expected Output with 4 Dimensions");
 
     const unsigned int inputBatchSize = inputShape[0];
     const unsigned int channels = inputShape[dataLayout.GetChannelsIndex()];
@@ -55,12 +55,12 @@
     const unsigned int outputHeight = outputShape[dataLayout.GetHeightIndex()];
     const unsigned int outputWidth = outputShape[dataLayout.GetWidthIndex()];
 
-    BOOST_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
+    ARMNN_ASSERT_MSG(blockShape.size() > 0, "BlockShape must contain 1 or more entries");
 
     const unsigned int blockShapeHeight = blockShape[0];
     const unsigned int blockShapeWidth = blockShape[1];
 
-    BOOST_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
+    ARMNN_ASSERT_MSG(cropsData.size() > 0, "Crops must contain 1 or more entries");
 
     const unsigned int cropsTop = cropsData[0].first;
     const unsigned int cropsLeft = cropsData[1].first;
diff --git a/src/backends/reference/workloads/Concatenate.cpp b/src/backends/reference/workloads/Concatenate.cpp
index bb55424..a85e34e 100644
--- a/src/backends/reference/workloads/Concatenate.cpp
+++ b/src/backends/reference/workloads/Concatenate.cpp
@@ -38,7 +38,7 @@
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& inputInfo = GetTensorInfo(data.m_Inputs[viewIdx]);
-            BOOST_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+            ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
diff --git a/src/backends/reference/workloads/ConvImpl.cpp b/src/backends/reference/workloads/ConvImpl.cpp
index 0c13e3b..9d2f410 100644
--- a/src/backends/reference/workloads/ConvImpl.cpp
+++ b/src/backends/reference/workloads/ConvImpl.cpp
@@ -5,7 +5,7 @@
 
 #include "ConvImpl.hpp"
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <cmath>
 #include <limits>
@@ -15,7 +15,7 @@
 
 QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier)
 {
-    BOOST_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
+    ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
     if (multiplier == 0.0f)
     {
         m_Multiplier = 0;
@@ -26,14 +26,14 @@
         const double q = std::frexp(multiplier, &m_RightShift);
         m_RightShift = -m_RightShift;
         int64_t qFixed = static_cast<int64_t>(std::round(q * (1ll << 31)));
-        BOOST_ASSERT(qFixed <= (1ll << 31));
+        ARMNN_ASSERT(qFixed <= (1ll << 31));
         if (qFixed == (1ll << 31))
         {
             qFixed /= 2;
             --m_RightShift;
         }
-        BOOST_ASSERT(m_RightShift >= 0);
-        BOOST_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
+        ARMNN_ASSERT(m_RightShift >= 0);
+        ARMNN_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
         m_Multiplier = static_cast<int32_t>(qFixed);
     }
 }
@@ -61,7 +61,7 @@
 
 int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent)
 {
-    BOOST_ASSERT(exponent >= 0 && exponent <= 31);
+    ARMNN_ASSERT(exponent >= 0 && exponent <= 31);
     int32_t mask = (1 << exponent) - 1;
     int32_t remainder = x & mask;
     int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
diff --git a/src/backends/reference/workloads/ConvImpl.hpp b/src/backends/reference/workloads/ConvImpl.hpp
index 562fd3e..f5aa8f3 100644
--- a/src/backends/reference/workloads/ConvImpl.hpp
+++ b/src/backends/reference/workloads/ConvImpl.hpp
@@ -15,7 +15,6 @@
 
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <cmath>
diff --git a/src/backends/reference/workloads/Decoders.hpp b/src/backends/reference/workloads/Decoders.hpp
index 3434ccb..deb3b1f 100644
--- a/src/backends/reference/workloads/Decoders.hpp
+++ b/src/backends/reference/workloads/Decoders.hpp
@@ -10,7 +10,7 @@
 #include <armnnUtils/FloatingPointConverter.hpp>
 #include <armnnUtils/TensorUtils.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -142,7 +142,7 @@
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported Data Type!");
+            ARMNN_ASSERT_MSG(false, "Unsupported Data Type!");
             break;
         }
     }
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
index 91ca160..f5e9ec5 100644
--- a/src/backends/reference/workloads/DepthToSpace.cpp
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -8,7 +8,7 @@
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/Permute.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 using namespace armnnUtils;
 
@@ -22,7 +22,7 @@
                   unsigned int dataTypeSize)
 {
     const unsigned int blockSize = descriptor.m_BlockSize;
-    BOOST_ASSERT(blockSize != 0u);
+    ARMNN_ASSERT(blockSize != 0u);
 
     const TensorShape& inputShape = inputInfo.GetShape();
     const unsigned int batches = inputShape[0];
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index 63c0405..fdc8e30 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -16,7 +16,7 @@
                 const TensorInfo& outputInfo)
 {
     IgnoreUnused(outputInfo);
-    BOOST_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+    ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
     for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
     {
         // inputDecoder.Get() dequantizes the data element from whatever
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index 57cf01e..61a504e 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -5,8 +5,8 @@
 
 #include "DetectionPostProcess.hpp"
 
+#include <armnn/utility/Assert.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <algorithm>
@@ -213,8 +213,8 @@
         // xmax
         boxCorners[indexW] = xCentre + halfW;
 
-        BOOST_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
-        BOOST_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
+        ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
+        ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
     }
 
     unsigned int numClassesWithBg = desc.m_NumClasses + 1;
diff --git a/src/backends/reference/workloads/Encoders.hpp b/src/backends/reference/workloads/Encoders.hpp
index e93987d..c0524a7 100644
--- a/src/backends/reference/workloads/Encoders.hpp
+++ b/src/backends/reference/workloads/Encoders.hpp
@@ -9,7 +9,7 @@
 
 #include <armnnUtils/TensorUtils.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -89,7 +89,7 @@
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Unsupported target Data Type!");
+            ARMNN_ASSERT_MSG(false, "Unsupported target Data Type!");
             break;
         }
     }
@@ -107,7 +107,7 @@
         }
         default:
         {
-            BOOST_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
+            ARMNN_ASSERT_MSG(false, "Cannot encode from boolean. Not supported target Data Type!");
             break;
         }
     }
diff --git a/src/backends/reference/workloads/FullyConnected.cpp b/src/backends/reference/workloads/FullyConnected.cpp
index 02d9b06..5a87520 100644
--- a/src/backends/reference/workloads/FullyConnected.cpp
+++ b/src/backends/reference/workloads/FullyConnected.cpp
@@ -7,8 +7,6 @@
 
 #include "RefWorkloadUtils.hpp"
 
-#include <boost/assert.hpp>
-
 namespace armnn
 {
 
diff --git a/src/backends/reference/workloads/Gather.cpp b/src/backends/reference/workloads/Gather.cpp
index 4cf3a14..c23edcd 100644
--- a/src/backends/reference/workloads/Gather.cpp
+++ b/src/backends/reference/workloads/Gather.cpp
@@ -36,7 +36,7 @@
     {
         unsigned int indx = boost::numeric_cast<unsigned int>(indices[i]);
 
-        BOOST_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
+        ARMNN_ASSERT(indices[i] >= 0 && indx < paramsShape[0]);
 
         unsigned int startOffset = indx * paramsProduct;
         unsigned int endOffset = startOffset + paramsProduct;
@@ -51,7 +51,7 @@
         }
     }
 
-    BOOST_ASSERT(outIndex == outputInfo.GetNumElements());
+    ARMNN_ASSERT(outIndex == outputInfo.GetNumElements());
 }
 
 } //namespace armnn
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 103d62a..1998f50 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -6,11 +6,11 @@
 #include "LogSoftmax.hpp"
 
 #include <armnnUtils/TensorUtils.hpp>
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
 #include <cmath>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace
@@ -35,7 +35,7 @@
     const unsigned int numDimensions = inputInfo.GetNumDimensions();
 
     bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
-    BOOST_ASSERT_MSG(axisIsValid,
+    ARMNN_ASSERT_MSG(axisIsValid,
         "Axis index is not in range [-numDimensions, numDimensions).");
     IgnoreUnused(axisIsValid);
 
diff --git a/src/backends/reference/workloads/Mean.cpp b/src/backends/reference/workloads/Mean.cpp
index f2c0a4f..72080ef 100644
--- a/src/backends/reference/workloads/Mean.cpp
+++ b/src/backends/reference/workloads/Mean.cpp
@@ -128,7 +128,7 @@
     for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
     {
         unsigned int current = inputDims[resolvedAxis[idx]];
-        BOOST_ASSERT(boost::numeric_cast<float>(current) <
+        ARMNN_ASSERT(boost::numeric_cast<float>(current) <
               (std::numeric_limits<float>::max() / boost::numeric_cast<float>(numElementsInAxis)));
         numElementsInAxis *= current;
     }
diff --git a/src/backends/reference/workloads/RefConstantWorkload.cpp b/src/backends/reference/workloads/RefConstantWorkload.cpp
index 3506198..d3e65e6 100644
--- a/src/backends/reference/workloads/RefConstantWorkload.cpp
+++ b/src/backends/reference/workloads/RefConstantWorkload.cpp
@@ -9,7 +9,7 @@
 
 #include <armnn/Types.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 #include <cstring>
 
@@ -24,10 +24,10 @@
 {
     const ConstantQueueDescriptor& data = this->m_Data;
 
-    BOOST_ASSERT(data.m_LayerOutput != nullptr);
+    ARMNN_ASSERT(data.m_LayerOutput != nullptr);
 
     const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[0]);
-    BOOST_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
+    ARMNN_ASSERT(data.m_LayerOutput->GetTensorInfo().GetNumBytes() == outputInfo.GetNumBytes());
 
     memcpy(GetOutputTensorData<void>(0, data), data.m_LayerOutput->GetConstTensor<void>(),
         outputInfo.GetNumBytes());
diff --git a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
index ac82db9..f8c3548 100644
--- a/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
+++ b/src/backends/reference/workloads/RefFullyConnectedWorkload.cpp
@@ -32,7 +32,7 @@
 void RefFullyConnectedWorkload::PostAllocationConfigure()
 {
     const TensorInfo& inputInfo = GetTensorInfo(m_Data.m_Inputs[0]);
-    BOOST_ASSERT(inputInfo.GetNumDimensions() > 1);
+    ARMNN_ASSERT(inputInfo.GetNumDimensions() > 1);
     m_InputShape = inputInfo.GetShape();
     m_InputDecoder = MakeDecoder<float>(inputInfo);
 
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
index a987e79..a2ace13 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -12,7 +12,7 @@
 
 #include <Profiling.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -27,8 +27,8 @@
     std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, m_Data.m_Inputs[0]->Map());
     std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, m_Data.m_Outputs[0]->Map());
 
-    BOOST_ASSERT(decoder != nullptr);
-    BOOST_ASSERT(encoder != nullptr);
+    ARMNN_ASSERT(decoder != nullptr);
+    ARMNN_ASSERT(encoder != nullptr);
 
     LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
 }
diff --git a/src/backends/reference/workloads/RefStackWorkload.cpp b/src/backends/reference/workloads/RefStackWorkload.cpp
index be36f40..fc85950 100644
--- a/src/backends/reference/workloads/RefStackWorkload.cpp
+++ b/src/backends/reference/workloads/RefStackWorkload.cpp
@@ -26,7 +26,7 @@
     if (!m_Data.m_Parameters.m_Axis)
     {
         float* output = GetOutputTensorData<float>(0, m_Data);
-        BOOST_ASSERT(output != nullptr);
+        ARMNN_ASSERT(output != nullptr);
 
         unsigned int numInputs = m_Data.m_Parameters.m_NumInputs;
         unsigned int inputLength = GetTensorInfo(m_Data.m_Inputs[0]).GetNumElements();
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index bfd3c28..e994a09 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -27,7 +27,7 @@
     DataType inputDataType  = inputInfo.GetDataType();
     DataType outputDataType = outputInfo.GetDataType();
 
-    BOOST_ASSERT(inputDataType == outputDataType);
+    ARMNN_ASSERT(inputDataType == outputDataType);
     IgnoreUnused(outputDataType);
 
     StridedSlice(inputInfo,
diff --git a/src/backends/reference/workloads/Slice.cpp b/src/backends/reference/workloads/Slice.cpp
index 0223cdc..e972524 100644
--- a/src/backends/reference/workloads/Slice.cpp
+++ b/src/backends/reference/workloads/Slice.cpp
@@ -5,9 +5,9 @@
 
 #include "Slice.hpp"
 
+#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 
-#include <boost/assert.hpp>
 #include <boost/numeric/conversion/cast.hpp>
 
 namespace armnn
@@ -22,11 +22,11 @@
     const TensorShape& inputShape = inputInfo.GetShape();
     const unsigned int numDims    = inputShape.GetNumDimensions();
 
-    BOOST_ASSERT(descriptor.m_Begin.size() == numDims);
-    BOOST_ASSERT(descriptor.m_Size.size()  == numDims);
+    ARMNN_ASSERT(descriptor.m_Begin.size() == numDims);
+    ARMNN_ASSERT(descriptor.m_Size.size()  == numDims);
 
     constexpr unsigned int maxNumDims = 4;
-    BOOST_ASSERT(numDims <= maxNumDims);
+    ARMNN_ASSERT(numDims <= maxNumDims);
 
     std::vector<unsigned int> paddedInput(4);
     std::vector<unsigned int> paddedBegin(4);
@@ -65,10 +65,10 @@
     unsigned int size2  = paddedSize[2];
     unsigned int size3  = paddedSize[3];
 
-    BOOST_ASSERT(begin0 + size0 <= dim0);
-    BOOST_ASSERT(begin1 + size1 <= dim1);
-    BOOST_ASSERT(begin2 + size2 <= dim2);
-    BOOST_ASSERT(begin3 + size3 <= dim3);
+    ARMNN_ASSERT(begin0 + size0 <= dim0);
+    ARMNN_ASSERT(begin1 + size1 <= dim1);
+    ARMNN_ASSERT(begin2 + size2 <= dim2);
+    ARMNN_ASSERT(begin3 + size3 <= dim3);
 
     const unsigned char* input = reinterpret_cast<const unsigned char*>(inputData);
     unsigned char* output      = reinterpret_cast<unsigned char*>(outputData);
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index 5036389..32eca84 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -16,9 +16,9 @@
 /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
 void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis)
 {
-    BOOST_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
+    ARMNN_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
                      "Required axis index greater than number of dimensions.");
-    BOOST_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
+    ARMNN_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
                      "Required axis index lower than negative of the number of dimensions");
 
     unsigned int uAxis = axis < 0  ?
diff --git a/src/backends/reference/workloads/Splitter.cpp b/src/backends/reference/workloads/Splitter.cpp
index 3bddfb0..09edc5e 100644
--- a/src/backends/reference/workloads/Splitter.cpp
+++ b/src/backends/reference/workloads/Splitter.cpp
@@ -6,8 +6,7 @@
 #include "RefWorkloadUtils.hpp"
 #include <backendsCommon/WorkloadData.hpp>
 #include <armnn/Tensor.hpp>
-
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 #include "Splitter.hpp"
 
 #include <cmath>
@@ -47,7 +46,7 @@
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
-            BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
+            ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
diff --git a/src/backends/reference/workloads/Splitter.hpp b/src/backends/reference/workloads/Splitter.hpp
index 271c6fd..26309b0 100644
--- a/src/backends/reference/workloads/Splitter.hpp
+++ b/src/backends/reference/workloads/Splitter.hpp
@@ -8,7 +8,7 @@
 #include "RefWorkloadUtils.hpp"
 #include <backendsCommon/WorkloadData.hpp>
 #include <armnn/Tensor.hpp>
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -38,7 +38,7 @@
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& outputInfo = GetTensorInfo(data.m_Outputs[viewIdx]);
-            BOOST_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
+            ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
@@ -67,10 +67,10 @@
 
                 //We are within the view, to copy input data to the output corresponding to this view.
                 DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data);
-                BOOST_ASSERT(outputData);
+                ARMNN_ASSERT(outputData);
 
                 const DataType* inputData = GetInputTensorData<DataType>(0, data);
-                BOOST_ASSERT(inputData);
+                ARMNN_ASSERT(inputData);
 
                 outputData[outIndex] = inputData[index];
             }
diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp
index 62f06dc..b00b049 100644
--- a/src/backends/reference/workloads/StridedSlice.cpp
+++ b/src/backends/reference/workloads/StridedSlice.cpp
@@ -7,7 +7,8 @@
 
 #include <ResolveType.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
+
 #include <boost/numeric/conversion/cast.hpp>
 
 #include <cstring>
@@ -20,12 +21,12 @@
 
 void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
 {
-    BOOST_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
+    ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
 
     const unsigned int beginIndicesCount =
         boost::numeric_cast<unsigned int>(p.m_Begin.size());
 
-    BOOST_ASSERT(dimCount >= beginIndicesCount);
+    ARMNN_ASSERT(dimCount >= beginIndicesCount);
     const unsigned int padCount = dimCount - beginIndicesCount;
 
     p.m_Begin.resize(dimCount);
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index e03c42f..5d66fd5 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -9,7 +9,7 @@
 
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
-#include <boost/assert.hpp>
+#include <armnn/utility/Assert.hpp>
 
 namespace armnn
 {
@@ -25,7 +25,7 @@
         , m_Data(data)
         , m_DataLayout(dataLayout)
     {
-        BOOST_ASSERT(m_Shape.GetNumDimensions() == 4);
+        ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4);
     }
 
     DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const