IVGCVSW-7854 Remove/rewrite asserts in the backends.

* Identify usages of ARMNN_ASSERT that should be proper exceptions.
* Change ARMNN_ASSERT in Doctests to CHECK.
* Verify any remaining assertions are reasonable.

Signed-off-by: Colm Donelan <colm.donelan@arm.com>
Change-Id: Ifd1f2a5a4bb60135e8654305035ec70e09c4dc2d
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 40d243e..f97d03a 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -958,7 +958,6 @@
                                   "Reference concatenation: output type not supported");
     for (const TensorInfo* input : inputs)
     {
-        ARMNN_ASSERT(input != nullptr);
         supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
             "Reference concatenation: input type not supported");
 
@@ -2629,7 +2628,6 @@
                                   "Reference stack: output type not supported");
     for (const TensorInfo* input : inputs)
     {
-        ARMNN_ASSERT(input != nullptr);
         supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
             "Reference stack: input type not supported");
 
diff --git a/src/backends/reference/RefMemoryManager.cpp b/src/backends/reference/RefMemoryManager.cpp
index 76054e4..80f3531 100644
--- a/src/backends/reference/RefMemoryManager.cpp
+++ b/src/backends/reference/RefMemoryManager.cpp
@@ -1,10 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "RefMemoryManager.hpp"
 
-#include <armnn/utility/Assert.hpp>
+#include <armnn/Exceptions.hpp>
 
 #include <algorithm>
 
@@ -35,7 +35,7 @@
 
 void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool)
 {
-    ARMNN_ASSERT(pool);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(pool, "Null memory manager passed to RefMemoryManager.");
     m_FreePools.push_back(pool);
 }
 
@@ -75,25 +75,29 @@
 
 void* RefMemoryManager::Pool::GetPointer()
 {
-    ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
+    ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+                             "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
     return m_Pointer;
 }
 
 void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
 {
-    ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+    ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+                             "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
     m_Size = std::max(m_Size, numBytes);
 }
 
 void RefMemoryManager::Pool::Acquire()
 {
-    ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
+    ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+                             "RefMemoryManager::Pool::Acquire() called when memory already acquired");
     m_Pointer = ::operator new(size_t(m_Size));
 }
 
 void RefMemoryManager::Pool::Release()
 {
-    ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
+    ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+                             "RefMemoryManager::Pool::Release() called when memory not acquired");
     ::operator delete(m_Pointer);
     m_Pointer = nullptr;
 }
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index 07f497c..1158a14 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -44,8 +44,8 @@
 
 void RefTensorHandle::Manage()
 {
-    ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
-    ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+    ARMNN_THROW_MSG_IF_FALSE(!m_Pool, RuntimeException, "RefTensorHandle::Manage() called twice");
+    ARMNN_THROW_MSG_IF_FALSE(!m_UnmanagedMemory, RuntimeException, "RefTensorHandle::Manage() called after Allocate()");
 
     if (m_MemoryManager)
     {
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 694c229..5c5fff3 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -1,12 +1,11 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #pragma once
 
 #include <armnn/TypesUtils.hpp>
-#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
 #include <armnnUtils/FloatingPointConverter.hpp>
 #include <armnnUtils/TensorUtils.hpp>
@@ -78,28 +77,28 @@
 
     TypedIterator& operator++() override
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
         ++m_Iterator;
         return *this;
     }
 
     TypedIterator& operator+=(const unsigned int increment) override
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
         m_Iterator += increment;
         return *this;
     }
 
     TypedIterator& operator-=(const unsigned int increment) override
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
         m_Iterator -= increment;
         return *this;
     }
 
     TypedIterator& operator[](const unsigned int index) override
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
         m_Iterator = m_Start + index;
         return *this;
     }
@@ -763,7 +762,7 @@
 
     inline PerAxisIterator& SetIndexOnMem(const unsigned int index)
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "PerAxisIterator: m_Iterator is null!");
         m_Iterator = m_Start + index;
         if (index < m_AxisFactor)
         {
diff --git a/src/backends/reference/workloads/BatchMatMulImpl.cpp b/src/backends/reference/workloads/BatchMatMulImpl.cpp
index c592b3b..8e169cb 100644
--- a/src/backends/reference/workloads/BatchMatMulImpl.cpp
+++ b/src/backends/reference/workloads/BatchMatMulImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -145,7 +145,6 @@
     const auto& dataLayout = (type == DataSlot::InputX) ? params.m_DataLayoutX : params.m_DataLayoutY;
     const auto axesToAdjoint = BatchMatMulDescriptor::GetAxesToMul(dataLayout,inputInfo.GetShape());
 
-    ARMNN_ASSERT(inputInfo.GetShape()[axesToAdjoint.first] == inputInfo.GetShape()[axesToAdjoint.second]);
     // We grab a copy of the tensor data to prevent overwriting
     std::vector<float> inputDataClone = (type == DataSlot::InputX) ? inputXData : inputYData;
 
diff --git a/src/backends/reference/workloads/Concatenate.cpp b/src/backends/reference/workloads/Concatenate.cpp
index a0e0abf..fece43c 100644
--- a/src/backends/reference/workloads/Concatenate.cpp
+++ b/src/backends/reference/workloads/Concatenate.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -40,7 +40,9 @@
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& inputInfo = GetTensorInfo(inputs[viewIdx]);
-            ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+                inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions(),
+                "The number of output dimensions does not match the number of input dimensions.");
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
diff --git a/src/backends/reference/workloads/ConvImpl.cpp b/src/backends/reference/workloads/ConvImpl.cpp
index 320690e..098c931 100644
--- a/src/backends/reference/workloads/ConvImpl.cpp
+++ b/src/backends/reference/workloads/ConvImpl.cpp
@@ -1,12 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "ConvImpl.hpp"
 
-#include <armnn/utility/Assert.hpp>
-
 #include <cmath>
 #include <limits>
 
@@ -15,7 +13,8 @@
 
 QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier)
 {
-    ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(multiplier >= 0.0f && multiplier < 1.0f,
+                                        "QuantizedMultiplierSmallerThanOne: multiplier must be between 0.0f and 1.0f.");
     if (multiplier == 0.0f)
     {
         m_Multiplier = 0;
@@ -26,14 +25,11 @@
         const double q = std::frexp(multiplier, &m_RightShift);
         m_RightShift = -m_RightShift;
         int64_t qFixed = static_cast<int64_t>(::round(q * (1ll << 31)));
-        ARMNN_ASSERT(qFixed <= (1ll << 31));
         if (qFixed == (1ll << 31))
         {
             qFixed /= 2;
             --m_RightShift;
         }
-        ARMNN_ASSERT(m_RightShift >= 0);
-        ARMNN_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
         m_Multiplier = static_cast<int32_t>(qFixed);
     }
 }
@@ -61,7 +57,8 @@
 
 int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent)
 {
-    ARMNN_ASSERT(exponent >= 0 && exponent <= 31);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(exponent >= 0 && exponent <= 31,
+                                        "RoundingDivideByPOT: exponent must be between 0 and 31.");
     int32_t mask = (1 << exponent) - 1;
     int32_t remainder = x & mask;
     int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
index f5e9ec5..60098d1 100644
--- a/src/backends/reference/workloads/DepthToSpace.cpp
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -8,8 +8,6 @@
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/Permute.hpp>
 
-#include <armnn/utility/Assert.hpp>
-
 using namespace armnnUtils;
 
 namespace armnn
@@ -22,7 +20,6 @@
                   unsigned int dataTypeSize)
 {
     const unsigned int blockSize = descriptor.m_BlockSize;
-    ARMNN_ASSERT(blockSize != 0u);
 
     const TensorShape& inputShape = inputInfo.GetShape();
     const unsigned int batches = inputShape[0];
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index fdc8e30..3955458 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -1,12 +1,10 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "Dequantize.hpp"
 
-#include <armnn/utility/IgnoreUnused.hpp>
-
 namespace armnn
 {
 
@@ -15,8 +13,9 @@
                 const TensorInfo& inputInfo,
                 const TensorInfo& outputInfo)
 {
-    IgnoreUnused(outputInfo);
-    ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+        inputInfo.GetNumElements() == outputInfo.GetNumElements(),
+        "Dequantize: The number of elements in the input and output tensors must be the same.");
     for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
     {
         // inputDecoder.Get() dequantizes the data element from whatever
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index c5ab327..361f886 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -1,12 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "DetectionPostProcess.hpp"
 
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 #include <algorithm>
@@ -140,11 +138,11 @@
 
 void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
                           const TensorInfo& scoresInfo,
-                          const TensorInfo& anchorsInfo,
+                          const TensorInfo&,
                           const TensorInfo& detectionBoxesInfo,
-                          const TensorInfo& detectionClassesInfo,
-                          const TensorInfo& detectionScoresInfo,
-                          const TensorInfo& numDetectionsInfo,
+                          const TensorInfo&,
+                          const TensorInfo&,
+                          const TensorInfo&,
                           const DetectionPostProcessDescriptor& desc,
                           Decoder<float>& boxEncodings,
                           Decoder<float>& scores,
@@ -154,7 +152,6 @@
                           float* detectionScores,
                           float* numDetections)
 {
-    IgnoreUnused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
 
     // Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format,
     // which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax)
@@ -212,9 +209,6 @@
         boxCorners[indexH] = yCentre + halfH;
         // xmax
         boxCorners[indexW] = xCentre + halfW;
-
-        ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
-        ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
     }
 
     unsigned int numClassesWithBg = desc.m_NumClasses + 1;
diff --git a/src/backends/reference/workloads/FullyConnected.cpp b/src/backends/reference/workloads/FullyConnected.cpp
index 47968f4..19c01b8 100644
--- a/src/backends/reference/workloads/FullyConnected.cpp
+++ b/src/backends/reference/workloads/FullyConnected.cpp
@@ -1,12 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "FullyConnected.hpp"
 
-#include <armnn/utility/Assert.hpp>
-
 #include "RefWorkloadUtils.hpp"
 
 namespace armnn
@@ -31,7 +29,6 @@
 
     const TensorShape biasShape{outputSize};
 
-    ARMNN_ASSERT(!biasEnabled || pBiasDecoder != nullptr);
     const std::vector<float> decodedBiases = biasEnabled ? pBiasDecoder->DecodeTensor(biasShape) : std::vector<float>();
 
 
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 2b63849..0926894 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -1,13 +1,11 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "LogSoftmax.hpp"
 
 #include <armnnUtils/TensorUtils.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 #include <cmath>
@@ -33,10 +31,8 @@
 {
     const unsigned int numDimensions = inputInfo.GetNumDimensions();
 
-    bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
-    ARMNN_ASSERT_MSG(axisIsValid,
-        "Axis index is not in range [-numDimensions, numDimensions).");
-    IgnoreUnused(axisIsValid);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(ValidateAxis(descriptor.m_Axis, numDimensions),
+                                        "Axis index is not in range [-numDimensions, numDimensions).");
 
     unsigned int uAxis = descriptor.m_Axis < 0  ?
         numDimensions - armnn::numeric_cast<unsigned int>(std::abs(descriptor.m_Axis)) :
diff --git a/src/backends/reference/workloads/MirrorPad.cpp b/src/backends/reference/workloads/MirrorPad.cpp
index 7388fed..de3b74b 100644
--- a/src/backends/reference/workloads/MirrorPad.cpp
+++ b/src/backends/reference/workloads/MirrorPad.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -18,8 +18,8 @@
 {
     unsigned int numOfElements = shape.GetNumElements();
 
-    ARMNN_ASSERT_MSG(index <= numOfElements, "Index has to be in [0, num_elements]");
-    ARMNN_ASSERT_MSG(numOfElements != 0, "Cannot create coordinate from empty shape");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(index <= numOfElements, "Index has to be in [0, num_elements]");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(numOfElements != 0, "Cannot create coordinate from empty shape");
 
     std::vector<unsigned int> coord(shape.GetNumDimensions());
     for(unsigned int i = 0; i < shape.GetNumDimensions(); ++i)
@@ -36,8 +36,8 @@
 // E.g. [0, 0, 2] returns 2.
 inline unsigned int CoordToIndex(const armnn::TensorShape& shape, const std::vector<unsigned int>& coord)
 {
-    ARMNN_ASSERT_MSG(shape.GetNumDimensions() != 0, "Cannot get index from empty shape");
-    ARMNN_ASSERT_MSG(coord.size() != 0, "Cannot get index of empty coordinate");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(shape.GetNumDimensions() != 0, "Cannot get index from empty shape");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(coord.size() != 0, "Cannot get index of empty coordinate");
 
     unsigned int index    = 0;
     unsigned int dimSize  = 1;
diff --git a/src/backends/reference/workloads/Reduce.cpp b/src/backends/reference/workloads/Reduce.cpp
index 8b28a61..6ea333b 100644
--- a/src/backends/reference/workloads/Reduce.cpp
+++ b/src/backends/reference/workloads/Reduce.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -153,8 +153,6 @@
     for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
     {
         unsigned int current = inputDims[resolvedAxis[idx]];
-        ARMNN_ASSERT(armnn::numeric_cast<float>(current) <
-                     (std::numeric_limits<float>::max() / armnn::numeric_cast<float>(numElementsInAxis)));
         numElementsInAxis *= current;
     }
 
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
index e45d24a..47c537c 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -12,8 +12,6 @@
 
 #include <Profiling.hpp>
 
-#include <armnn/utility/Assert.hpp>
-
 namespace armnn
 {
 
@@ -38,9 +36,6 @@
     std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
     std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
 
-    ARMNN_ASSERT(decoder != nullptr);
-    ARMNN_ASSERT(encoder != nullptr);
-
     LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
 }
 
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index c4a4f7f..1dc95a2 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -31,13 +31,8 @@
     ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefStridedSliceWorkload_Execute");
 
     const TensorInfo& inputInfo  = GetTensorInfo(inputs[0]);
-    const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
 
     DataType inputDataType  = inputInfo.GetDataType();
-    DataType outputDataType = outputInfo.GetDataType();
-
-    ARMNN_ASSERT(inputDataType == outputDataType);
-    IgnoreUnused(outputDataType);
 
     StridedSlice(inputInfo,
                  m_Data.m_Parameters,
diff --git a/src/backends/reference/workloads/Resize.cpp b/src/backends/reference/workloads/Resize.cpp
index e80a205..7bed6c6 100644
--- a/src/backends/reference/workloads/Resize.cpp
+++ b/src/backends/reference/workloads/Resize.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -72,7 +72,8 @@
             bool halfPixelCenters)
 {
     // alignCorners and halfPixelCenters cannot both be true
-    ARMNN_ASSERT(!(alignCorners && halfPixelCenters));
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(!(alignCorners && halfPixelCenters),
+                                        "Resize: alignCorners and halfPixelCenters cannot both be true");
 
     // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output
     // image is projected into the input image to figure out the interpolants and weights. Note that this
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index 00d496d..d792361 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -16,10 +16,10 @@
 /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
 void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis)
 {
-    ARMNN_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
-                     "Required axis index greater than number of dimensions.");
-    ARMNN_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
-                     "Required axis index lower than negative of the number of dimensions");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
+                                        "Required axis index greater than number of dimensions.");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
+                                        "Required axis index lower than negative of the number of dimensions");
 
     unsigned int uAxis = axis < 0  ?
                          inputTensorInfo.GetNumDimensions() - static_cast<unsigned int>(abs(axis))
diff --git a/src/backends/reference/workloads/Splitter.cpp b/src/backends/reference/workloads/Splitter.cpp
index 695ae8a..963e3aa 100644
--- a/src/backends/reference/workloads/Splitter.cpp
+++ b/src/backends/reference/workloads/Splitter.cpp
@@ -1,12 +1,11 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "RefWorkloadUtils.hpp"
 #include <armnn/backends/WorkloadData.hpp>
 #include <armnn/Tensor.hpp>
-#include <armnn/utility/Assert.hpp>
 #include "Splitter.hpp"
 
 #include <cmath>
@@ -48,7 +47,9 @@
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& outputInfo = GetTensorInfo(outputs[viewIdx]);
-            ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+                outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions(),
+                "The number of output dimensions does not match the number of input dimensions.");
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
diff --git a/src/backends/reference/workloads/Splitter.hpp b/src/backends/reference/workloads/Splitter.hpp
index 730b071..f05f654 100644
--- a/src/backends/reference/workloads/Splitter.hpp
+++ b/src/backends/reference/workloads/Splitter.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -40,7 +40,9 @@
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& outputInfo = GetTensorInfo(outputs[viewIdx]);
-            ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+                outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions(),
+                "The number of output dimensions does not match the number of input dimensions.");
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
@@ -69,11 +71,7 @@
 
                 //We are within the view, to copy input data to the output corresponding to this view.
                 DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data);
-                ARMNN_ASSERT(outputData);
-
                 const DataType* inputData = GetInputTensorData<DataType>(0, data);
-                ARMNN_ASSERT(inputData);
-
                 outputData[outIndex] = inputData[index];
             }
         }
diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp
index 68600c9..fcd1c35 100644
--- a/src/backends/reference/workloads/StridedSlice.cpp
+++ b/src/backends/reference/workloads/StridedSlice.cpp
@@ -1,13 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "StridedSlice.hpp"
 
-#include <ResolveType.hpp>
-
-#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 #include <cstring>
@@ -20,12 +17,11 @@
 
 void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
 {
-    ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(dimCount <= 4, "Expected input with at most 4 dimensions");
 
     const unsigned int beginIndicesCount =
         armnn::numeric_cast<unsigned int>(p.m_Begin.size());
 
-    ARMNN_ASSERT(dimCount >= beginIndicesCount);
     const unsigned int padCount = dimCount - beginIndicesCount;
 
     p.m_Begin.resize(dimCount);
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index 0b448e6..c6a7571 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -9,8 +9,6 @@
 
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
-#include <armnn/utility/Assert.hpp>
-
 namespace armnn
 {
 
@@ -25,7 +23,8 @@
         , m_Data(data)
         , m_DataLayout(dataLayout)
     {
-        ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Shape.GetNumDimensions() == 4,
+                                            "Only $d tensors are supported by TensorBufferArrayView.");
     }
 
     DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const