IVGCVSW-7854 Remove/rewrite asserts in the backends.

* Identify usages of ARMNN_ASSERT that should be proper exceptions.
* Change ARMNN_ASSERT in Doctests to CHECK.
* Verify any remaining assertions are reasonable.

Signed-off-by: Colm Donelan <colm.donelan@arm.com>
Change-Id: Ifd1f2a5a4bb60135e8654305035ec70e09c4dc2d
diff --git a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
index a44acb0..9b88914 100644
--- a/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeSubgraphUtils.hpp
@@ -1,12 +1,11 @@
 //
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #pragma once
 
 #include <armnn/backends/OptimizationViews.hpp>
-#include <armnn/utility/Assert.hpp>
 
 #include <aclCommon/ArmComputeUtils.hpp>
 #include <backendsCommon/SubgraphUtils.hpp>
@@ -330,11 +329,6 @@
 
         layers.emplace_back(replacementLayer);
     }
-
-    // Check if the TensorInfo from the last layer equals the inferred output from the original layer.
-    ARMNN_ASSERT(baseLayer->GetOutputSlot(0).GetTensorInfo() ==
-                 PolymorphicDowncast<Layer*>(layers.back())->GetOutputSlot().GetTensorInfo());
-
     return layers;
 }
 
diff --git a/src/backends/aclCommon/ArmComputeTensorUtils.cpp b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
index a11b966..c5b4fa1 100644
--- a/src/backends/aclCommon/ArmComputeTensorUtils.cpp
+++ b/src/backends/aclCommon/ArmComputeTensorUtils.cpp
@@ -2,10 +2,11 @@
 // Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
+
+#include <armnn/Exceptions.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 #include <aclCommon/ArmComputeUtils.hpp>
 
-#include "armnn/Exceptions.hpp"
 #include "ArmComputeUtils.hpp"
 #include <armnn/Descriptors.hpp>
 
@@ -43,7 +44,6 @@
         case armnn::DataType::Signed32:
             return arm_compute::DataType::S32;
         default:
-            ARMNN_ASSERT_MSG(false, "Unknown data type");
             return arm_compute::DataType::UNKNOWN;
     }
 }
@@ -75,8 +75,7 @@
         case arm_compute::DataType::S32:
             return armnn::DataType::Signed32;
         default:
-            ARMNN_ASSERT_MSG(false, "Unknown data type");
-            return armnn::DataType::Float32;
+            throw InvalidArgumentException("Unknown arm_compute::DataType data type");
     }
 }
 
diff --git a/src/backends/aclCommon/ArmComputeUtils.hpp b/src/backends/aclCommon/ArmComputeUtils.hpp
index 9a30a74..d7025aa 100644
--- a/src/backends/aclCommon/ArmComputeUtils.hpp
+++ b/src/backends/aclCommon/ArmComputeUtils.hpp
@@ -1,12 +1,12 @@
 //
-// Copyright © 2017-2023 Arm Ltd. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
 
 #include <armnn/Descriptors.hpp>
+#include <armnn/Exceptions.hpp>
 #include <armnn/Tensor.hpp>
-#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
 #include <armnn/backends/WorkloadData.hpp>
 #include <armnnUtils/TensorUtils.hpp>
@@ -233,8 +233,7 @@
     }
 
     unsigned int dim = tensor.GetNumDimensions();
-
-    ARMNN_ASSERT(dim != 0);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(dim != 0, "The number of dimensions in this tensor cannot be zero.");
 
     // Currently ArmNN support axis 1.
     auto aclAxis = (static_cast<T>(dim) - 1);
@@ -274,9 +273,9 @@
 {
     int rank = static_cast<int>(tensor.GetNumDimensions());
 
-    ARMNN_ASSERT(rank != 0);
-    ARMNN_ASSERT((-1 * rank) <= armnnAxis);
-    ARMNN_ASSERT(armnnAxis < rank);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(rank != 0, "The number of dimensions in this tensor cannot be zero.");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(armnnAxis < rank, "Incompatible value of armnnAxis.");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE((-1 * rank) <= armnnAxis, "Incompatible value of armnnAxis.");
 
     int sign = (armnnAxis < 0) ? -1 : 1;
     int aclAxis = sign * rank - 1  - armnnAxis;
diff --git a/src/backends/aclCommon/BaseMemoryManager.cpp b/src/backends/aclCommon/BaseMemoryManager.cpp
index 206cf9b..50517cb 100644
--- a/src/backends/aclCommon/BaseMemoryManager.cpp
+++ b/src/backends/aclCommon/BaseMemoryManager.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "BaseMemoryManager.hpp"
@@ -18,7 +18,7 @@
 BaseMemoryManager::BaseMemoryManager(std::shared_ptr<arm_compute::IAllocator> alloc,
                                      MemoryAffinity memoryAffinity)
 {
-    ARMNN_ASSERT(alloc);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(alloc, "A null allocator has been passed to BaseMemoryManager.");
     m_Allocator = std::move(alloc);
 
     m_IntraLayerMemoryMgr = CreateArmComputeMemoryManager(memoryAffinity);
@@ -50,30 +50,24 @@
     static const size_t s_NumPools = 1;
 
     // Allocate memory pools for intra-layer memory manager
-    ARMNN_ASSERT(m_IntraLayerMemoryMgr);
     m_IntraLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
 
     // Allocate memory pools for inter-layer memory manager
-    ARMNN_ASSERT(m_InterLayerMemoryMgr);
     m_InterLayerMemoryMgr->populate(*m_Allocator, s_NumPools);
 
     // Acquire inter-layer memory group. NOTE: This has to come after allocating the pools
-    ARMNN_ASSERT(m_InterLayerMemoryGroup);
     m_InterLayerMemoryGroup->acquire();
 }
 
 void BaseMemoryManager::Release()
 {
     // Release inter-layer memory group. NOTE: This has to come before releasing the pools
-    ARMNN_ASSERT(m_InterLayerMemoryGroup);
     m_InterLayerMemoryGroup->release();
 
     // Release memory pools managed by intra-layer memory manager
-    ARMNN_ASSERT(m_IntraLayerMemoryMgr);
     m_IntraLayerMemoryMgr->clear();
 
     // Release memory pools managed by inter-layer memory manager
-    ARMNN_ASSERT(m_InterLayerMemoryMgr);
     m_InterLayerMemoryMgr->clear();
 }
 #else
diff --git a/src/backends/backendsCommon/LayerSupportRules.hpp b/src/backends/backendsCommon/LayerSupportRules.hpp
index a83fd62..1bd825b 100644
--- a/src/backends/backendsCommon/LayerSupportRules.hpp
+++ b/src/backends/backendsCommon/LayerSupportRules.hpp
@@ -1,11 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #pragma once
 
-#include <armnn/utility/Assert.hpp>
 #include <algorithm>
 
 namespace armnn
@@ -29,7 +28,7 @@
         case armnn::DataType::QSymmS16:
             return armnn::DataType::Signed32;
         default:
-            ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+            throw InvalidArgumentException("GetBiasTypeFromWeightsType(): Unsupported data type.");
     }
     return armnn::EmptyOptional();
 }
diff --git a/src/backends/backendsCommon/MakeWorkloadHelper.hpp b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
index 5601822..8ed6f05 100644
--- a/src/backends/backendsCommon/MakeWorkloadHelper.hpp
+++ b/src/backends/backendsCommon/MakeWorkloadHelper.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -70,8 +70,7 @@
         case DataType::QSymmS16:
             return nullptr;
         default:
-            ARMNN_ASSERT_MSG(false, "Unknown DataType.");
-            return nullptr;
+            throw InvalidArgumentException("Unknown data type passed to MakeWorkloadHelper");
     }
 }
 
diff --git a/src/backends/backendsCommon/WorkloadData.cpp b/src/backends/backendsCommon/WorkloadData.cpp
index 2d7a5fd..0ddb429 100644
--- a/src/backends/backendsCommon/WorkloadData.cpp
+++ b/src/backends/backendsCommon/WorkloadData.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -40,8 +40,7 @@
         case DataType::QSymmS16:
             return DataType::Signed32;
         default:
-            ARMNN_ASSERT_MSG(false, "Invalid input data type");
-            return DataType::Float32;
+            throw InvalidArgumentException("GetBiasDataType(): Unsupported data type.");
     }
 }
 
diff --git a/src/backends/backendsCommon/WorkloadFactory.cpp b/src/backends/backendsCommon/WorkloadFactory.cpp
index 2538211..1f8d4da 100644
--- a/src/backends/backendsCommon/WorkloadFactory.cpp
+++ b/src/backends/backendsCommon/WorkloadFactory.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -61,7 +61,7 @@
         case armnn::DataType::QSymmS16:
             return armnn::DataType::Signed32;
         default:
-            ARMNN_ASSERT_MSG(false, "GetBiasTypeFromWeightsType(): Unsupported data type.");
+            throw InvalidArgumentException("GetBiasTypeFromWeightsType(): Unsupported data type.");
     }
     return armnn::EmptyOptional();
 }
@@ -262,8 +262,9 @@
             const TensorInfo input  = OverrideDataType(layer.GetInputSlot(0).GetTensorInfo(),
                                                        dataType);
             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
-            ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
-                             "Convolution2dLayer: Weights should be connected as a Constant Layer.");
+
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(layer.GetInputSlot(1).GetConnection(),
+                                                "Convolution2dLayer: Weights should be connected as a Constant Layer.");
             const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
                                                         dataType);
 
@@ -273,8 +274,8 @@
             Optional<TensorInfo> biases;
             if (descriptor.m_BiasEnabled)
             {
-                ARMNN_ASSERT_MSG(layer.GetInputSlot(2).GetConnection(),
-                                 "Convolution2dLayer: Bias should be connected as a Constant Layer.");
+                ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(layer.GetInputSlot(2).GetConnection(),
+                                                    "Convolution2dLayer:Bias should be connected as a Constant Layer.");
                 biases = OverrideDataType(layer.GetInputSlot(2).GetTensorInfo(),
                                           GetBiasTypeFromWeightsType(dataType));
             }
@@ -296,8 +297,8 @@
                                                        dataType);
             const TensorInfo output = OverrideDataType(layer.GetOutputSlot(0).GetTensorInfo(), dataType);
 
-            ARMNN_ASSERT_MSG(layer.GetInputSlot(1).GetConnection(),
-                             "Convolution3dLayer: Weights should be connected as a Constant Layer.");
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(layer.GetInputSlot(1).GetConnection(),
+                                                "Convolution3dLayer: Weights should be connected as a Constant Layer.");
             const TensorInfo weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
                                                         dataType);
 
@@ -352,8 +353,6 @@
             const TensorInfo& weights = OverrideDataType(layer.GetInputSlot(1).GetTensorInfo(),
                                                          dataType);
 
-            ARMNN_ASSERT(cLayer->GetInputSlot(1).GetConnection() != nullptr);
-
             const DepthwiseConvolution2dDescriptor& descriptor = cLayer->GetParameters();
 
             // Construct optional biases object based on the value of m_BiasEnabled
@@ -524,7 +523,7 @@
                     }
                     default:
                     {
-                        ARMNN_ASSERT_MSG(false, "Unexpected bias type");
+                        throw InvalidArgumentException("Unexpected bias type");
                     }
                 }
             }
@@ -987,9 +986,6 @@
             LstmInputParamsInfo paramsInfo;
 
             // Basic parameters
-            ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToForgetWeights.get() != nullptr);
-            ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToCellWeights.get() != nullptr);
-            ARMNN_ASSERT(cLayer->m_BasicParameters.m_InputToOutputWeights.get() != nullptr);
             paramsInfo.m_InputToForgetWeights = &cLayer->m_BasicParameters.m_InputToForgetWeights->GetTensorInfo();
             paramsInfo.m_InputToCellWeights   = &cLayer->m_BasicParameters.m_InputToCellWeights->GetTensorInfo();
             paramsInfo.m_InputToOutputWeights = &cLayer->m_BasicParameters.m_InputToOutputWeights->GetTensorInfo();
@@ -1431,12 +1427,15 @@
             Optional<TensorInfo> biases;
             if (descriptor.m_BiasEnabled)
             {
-                ARMNN_ASSERT(cLayer->m_Bias.get() != nullptr);
+                ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+                    cLayer->m_Bias.get() != nullptr,
+                    "TransposeConvolution2d: Bias was enabled in the descriptor but no value was supplied.");
                 biases = OverrideDataType(cLayer->m_Bias->GetTensorInfo(),
                                           GetBiasTypeFromWeightsType(dataType));
             }
 
-            ARMNN_ASSERT(cLayer->m_Weight.get() != nullptr);
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(cLayer->m_Weight.get() != nullptr,
+                                                "TransposeConvolution2d: Weights cannot be null.");
             const TensorInfo weights = OverrideDataType(cLayer->m_Weight->GetTensorInfo(), dataType);
 
             result = layerSupportObject.IsTransposeConvolution2dSupported(input,
@@ -1602,7 +1601,6 @@
         }
         default:
         {
-            ARMNN_ASSERT_MSG(false, "WorkloadFactory did not recognise type of layer.");
             reason.value() = "Unrecognised layer type";
             result = false;
             break;
diff --git a/src/backends/cl/ClBackend.cpp b/src/backends/cl/ClBackend.cpp
index 6d191a5..3073999 100644
--- a/src/backends/cl/ClBackend.cpp
+++ b/src/backends/cl/ClBackend.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -18,7 +18,6 @@
 
 #include <aclCommon/ArmComputeSubgraphUtils.hpp>
 #include <aclCommon/ArmComputeUtils.hpp>
-#include <aclCommon/BaseMemoryManager.hpp>
 
 #include <armnn/backends/IBackendContext.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
diff --git a/src/backends/cl/ClBackendContext.cpp b/src/backends/cl/ClBackendContext.cpp
index adee276..8df8143 100644
--- a/src/backends/cl/ClBackendContext.cpp
+++ b/src/backends/cl/ClBackendContext.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -7,7 +7,6 @@
 #include "ClContextControl.hpp"
 
 #include <armnn/Logging.hpp>
-#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <arm_compute/core/CL/OpenCL.h>
@@ -94,8 +93,7 @@
                             return TuningLevel::Exhaustive;
                         default:
                         {
-                            ARMNN_ASSERT_MSG(false, "Tuning level not recognised.");
-                            return TuningLevel::None;
+                            throw InvalidArgumentException("Invalid value of tuning level specified.");
                         }
                     }
                 };
diff --git a/src/backends/cl/ClContextControl.cpp b/src/backends/cl/ClContextControl.cpp
index 34eca96..20223ae 100644
--- a/src/backends/cl/ClContextControl.cpp
+++ b/src/backends/cl/ClContextControl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -9,9 +9,6 @@
 
 #include <LeakChecking.hpp>
 
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
-
 #include <arm_compute/core/CL/CLKernelLibrary.h>
 #include <arm_compute/runtime/CL/CLScheduler.h>
 
@@ -34,9 +31,6 @@
     , m_HeuristicsHandle(heuristicsHandle)
     , m_ProfilingEnabled(profilingEnabled)
 {
-    // Ignore m_ProfilingEnabled if unused to avoid compiling problems when ArmCompute is disabled.
-    IgnoreUnused(m_ProfilingEnabled);
-
     try
     {
         std::vector<cl::Platform> platforms;
@@ -60,11 +54,9 @@
 
     // Removes the use of global CL context.
     cl::Context::setDefault(cl::Context{});
-    ARMNN_ASSERT(cl::Context::getDefault()() == NULL);
 
     // Removes the use of global CL command queue.
     cl::CommandQueue::setDefault(cl::CommandQueue{});
-    ARMNN_ASSERT(cl::CommandQueue::getDefault()() == NULL);
 
     // Always load the OpenCL runtime.
     LoadOpenClRuntime();
diff --git a/src/backends/cl/ClImportTensorHandle.hpp b/src/backends/cl/ClImportTensorHandle.hpp
index a03a4e9..b863f08 100644
--- a/src/backends/cl/ClImportTensorHandle.hpp
+++ b/src/backends/cl/ClImportTensorHandle.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -244,8 +244,6 @@
         {
             throw MemoryImportException(status.error_description());
         }
-
-        ARMNN_ASSERT(!m_Tensor.info()->is_resizable());
         return imported;
     }
     // Only used for testing
diff --git a/src/backends/cl/ClImportTensorHandleFactory.hpp b/src/backends/cl/ClImportTensorHandleFactory.hpp
index 7e22949..b22eb52 100644
--- a/src/backends/cl/ClImportTensorHandleFactory.hpp
+++ b/src/backends/cl/ClImportTensorHandleFactory.hpp
@@ -4,7 +4,6 @@
 //
 #pragma once
 
-#include <aclCommon/BaseMemoryManager.hpp>
 #include <armnn/MemorySources.hpp>
 #include <armnn/backends/IMemoryManager.hpp>
 #include <armnn/backends/ITensorHandleFactory.hpp>
diff --git a/src/backends/cl/ClTensorHandleFactory.cpp b/src/backends/cl/ClTensorHandleFactory.cpp
index be3ca5e..df99677 100644
--- a/src/backends/cl/ClTensorHandleFactory.cpp
+++ b/src/backends/cl/ClTensorHandleFactory.cpp
@@ -1,11 +1,12 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "ClTensorHandleFactory.hpp"
 #include "ClTensorHandle.hpp"
 
+#include <armnn/backends/IMemoryManager.hpp>
 #include <armnn/utility/NumericCast.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
diff --git a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
index 9ce0571..0e10b37 100644
--- a/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
+++ b/src/backends/cl/workloads/ClChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -80,8 +80,8 @@
             aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
             break;
         default:
-            ARMNN_ASSERT_MSG(false, "Unsupported axis");
-            break;
+            throw InvalidArgumentException("Value for axis: " + std::to_string(descriptor.m_Parameters.m_Axis) +
+                                           " is not valid");
     }
     input.info()->set_data_layout(aclDataLayout);
     output.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/cl/workloads/ClConstantWorkload.cpp b/src/backends/cl/workloads/ClConstantWorkload.cpp
index bbf6476..619c0f8 100644
--- a/src/backends/cl/workloads/ClConstantWorkload.cpp
+++ b/src/backends/cl/workloads/ClConstantWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -61,7 +61,7 @@
     {
         const ConstantQueueDescriptor& data = this->m_Data;
 
-        ARMNN_ASSERT(data.m_LayerOutput != nullptr);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(data.m_LayerOutput, "Output tensor handle is null.");
         arm_compute::CLTensor& output = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetTensor();
         arm_compute::DataType computeDataType = static_cast<ClTensorHandle*>(data.m_Outputs[0])->GetDataType();
 
@@ -105,8 +105,7 @@
             }
             default:
             {
-                ARMNN_ASSERT_MSG(false, "Unknown data type");
-                break;
+                throw InvalidArgumentException("Unknown data type.");
             }
         }
 
diff --git a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
index 2fc174c..7ae09e3 100644
--- a/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -99,7 +99,8 @@
         arm_compute::ICLTensor& bias = static_cast<IClTensorHandle*>(m_Data.m_Inputs[2])->GetTensor();
         bias.info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
         // We assume here that NeonConvolution2dWorkloadValidate has been called before the constructor.
-        ARMNN_ASSERT(info.m_InputTensorInfos[2].IsConstant() == true);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(info.m_InputTensorInfos[2].IsConstant() == true,
+                                            "The bias tensor must be constant.");
         m_BiasProxy = std::make_unique<ICLTensorProxy>(&bias);
     }
 
diff --git a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
index e5ee9b9..088814b 100644
--- a/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/cl/workloads/ClDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -140,7 +140,6 @@
                 activationInfo,
                 aclDilationInfo);
     }
-    ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
 
     // Add details for profiling output
     WorkloadInfo detailsInfo;
@@ -158,7 +157,6 @@
 void ClDepthwiseConvolutionWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID("ClDepthwiseConvolutionWorkload_Execute");
-    ARMNN_ASSERT(m_DepthwiseConvolutionLayer);
 
     RunClFunction(*m_DepthwiseConvolutionLayer, CHECK_LOCATION());
 }
diff --git a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
index 959f430..0b6606f 100644
--- a/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
+++ b/src/backends/cl/workloads/ClFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,7 +32,9 @@
     arm_compute::TensorInfo* optionalAclBiases = nullptr;
     if (descriptor.m_BiasEnabled)
     {
-        ARMNN_ASSERT(biases.has_value());
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+            biases.has_value(),
+            "ClFullyConnectedWorkload: Bias was enabled in the descriptor but no value was supplied.");
         aclBiases = BuildArmComputeTensorInfo(biases.value());
         aclBiases.set_are_values_constant(biases.value().IsConstant());
         optionalAclBiases = &aclBiases;
diff --git a/src/backends/cl/workloads/ClGatherNdWorkload.cpp b/src/backends/cl/workloads/ClGatherNdWorkload.cpp
index 1351f96..4e9dd75 100644
--- a/src/backends/cl/workloads/ClGatherNdWorkload.cpp
+++ b/src/backends/cl/workloads/ClGatherNdWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -142,8 +142,6 @@
     flattenedCoeff_Info.SetShape({ keyIndices["ND"] });
     BuildArmComputeTensor(m_FlattenedCoeff, flattenedCoeff_Info);
     armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_FlattenedCoeff);
-    ARMNN_ASSERT_MSG(indicesInfo.GetDataType() == DataType::Signed32,
-                     "flattenedCoeff must be same data type as m_FlattenedCoeff");
     CopyArmComputeClTensorData<int32_t>(m_FlattenedCoeff, flattenedCoeff.data());
 
     // Prepare the tensor to store the output of the multiplication
diff --git a/src/backends/cl/workloads/ClSqrtWorkload.cpp b/src/backends/cl/workloads/ClSqrtWorkload.cpp
index e36adf6..d41584e 100644
--- a/src/backends/cl/workloads/ClSqrtWorkload.cpp
+++ b/src/backends/cl/workloads/ClSqrtWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -34,7 +34,10 @@
                                const arm_compute::CLCompileContext& clCompileContext)
     : ClBaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
 {
-    ARMNN_ASSERT(descriptor.m_Parameters.m_Operation == UnaryOperation::Sqrt);
+    if (descriptor.m_Parameters.m_Operation != UnaryOperation::Sqrt)
+    {
+        throw InvalidArgumentException("ClSqrtWorkload: The descriptor does not indicate a Sqrt operation.");
+    }
 
     // Report Profiling Details
     ARMNN_REPORT_PROFILING_WORKLOAD_DESC("ClSqrtWorkload_Construct",
diff --git a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
index d3eeade..c3fafd4 100644
--- a/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
+++ b/src/backends/cl/workloads/ClTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -38,8 +38,11 @@
 
     if (descriptor.m_BiasEnabled)
     {
-        ARMNN_ASSERT(biases.has_value());
-
+        if (!biases.has_value())
+        {
+            return arm_compute::Status{arm_compute::ErrorCode::RUNTIME_ERROR,
+                                        "ArmNN ClTransposeConv2dWorkload has empty bias value."};
+        }
         aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
     }
diff --git a/src/backends/cl/workloads/ClWorkloadUtils.hpp b/src/backends/cl/workloads/ClWorkloadUtils.hpp
index 4b491e3..78b09b0 100644
--- a/src/backends/cl/workloads/ClWorkloadUtils.hpp
+++ b/src/backends/cl/workloads/ClWorkloadUtils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -124,8 +124,7 @@
 inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
                                              const ConstTensorHandle* handle)
 {
-    ARMNN_ASSERT(handle);
-
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
     armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
     switch(handle->GetTensorInfo().GetDataType())
     {
diff --git a/src/backends/gpuFsa/workloads/GpuFsaConstantWorkload.cpp b/src/backends/gpuFsa/workloads/GpuFsaConstantWorkload.cpp
index 39d3c0d..a68d3e6 100644
--- a/src/backends/gpuFsa/workloads/GpuFsaConstantWorkload.cpp
+++ b/src/backends/gpuFsa/workloads/GpuFsaConstantWorkload.cpp
@@ -57,8 +57,6 @@
     if (!m_RanOnce)
     {
         const ConstantQueueDescriptor& data = this->m_Data;
-
-        ARMNN_ASSERT(data.m_LayerOutput != nullptr);
         arm_compute::CLTensor& output = static_cast<GpuFsaTensorHandle*>(data.m_Outputs[0])->GetTensor();
         arm_compute::DataType computeDataType = static_cast<GpuFsaTensorHandle*>(data.m_Outputs[0])->GetDataType();
 
@@ -102,7 +100,7 @@
             }
             default:
             {
-                ARMNN_ASSERT_MSG(false, "Unknown data type");
+                throw InvalidArgumentException("Unknown data type passed to GpuFsaConstantWorkload::Execute()");
                 break;
             }
         }
diff --git a/src/backends/gpuFsa/workloads/GpuFsaWorkloadUtils.hpp b/src/backends/gpuFsa/workloads/GpuFsaWorkloadUtils.hpp
index 10954b0..567b9e3 100644
--- a/src/backends/gpuFsa/workloads/GpuFsaWorkloadUtils.hpp
+++ b/src/backends/gpuFsa/workloads/GpuFsaWorkloadUtils.hpp
@@ -100,7 +100,7 @@
     inline void InitializeArmComputeClTensorData(arm_compute::CLTensor& clTensor,
                                                  const ConstTensorHandle* handle)
     {
-        ARMNN_ASSERT(handle);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeClTensorData.");
 
         armcomputetensorutils::InitialiseArmComputeTensorEmpty(clTensor);
         switch(handle->GetTensorInfo().GetDataType())
diff --git a/src/backends/neon/NeonTensorHandle.hpp b/src/backends/neon/NeonTensorHandle.hpp
index e5f2107..3036760 100644
--- a/src/backends/neon/NeonTensorHandle.hpp
+++ b/src/backends/neon/NeonTensorHandle.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -8,10 +8,9 @@
 #include <BFloat16.hpp>
 #include <Half.hpp>
 
-#include <armnn/utility/Assert.hpp>
-
 #include <aclCommon/ArmComputeTensorHandle.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
+#include <armnn/Exceptions.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <arm_compute/runtime/MemoryGroup.h>
@@ -68,7 +67,7 @@
         // If we have enabled Importing, don't manage the tensor
         if (!m_IsImportEnabled)
         {
-            ARMNN_ASSERT(m_MemoryGroup != nullptr);
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_MemoryGroup, "arm_compute::MemoryGroup is null.");
             m_MemoryGroup->manage(&m_Tensor);
         }
     }
diff --git a/src/backends/neon/NeonTimer.cpp b/src/backends/neon/NeonTimer.cpp
index dbb1503..88d8cb0 100644
--- a/src/backends/neon/NeonTimer.cpp
+++ b/src/backends/neon/NeonTimer.cpp
@@ -1,12 +1,11 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "NeonTimer.hpp"
 #include "NeonInterceptorScheduler.hpp"
 
-#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 
 #include <memory>
@@ -21,7 +20,10 @@
 void NeonTimer::Start()
 {
     m_Kernels.clear();
-    ARMNN_ASSERT(g_Interceptor->GetKernels() == nullptr);
+    if (g_Interceptor->GetKernels() != nullptr)
+    {
+        throw RuntimeException("This NeonTimer instance has already been started.");
+    }
     g_Interceptor->SetKernels(&m_Kernels);
 
     m_RealSchedulerType = arm_compute::Scheduler::get_type();
diff --git a/src/backends/neon/test/NeonCreateWorkloadTests.cpp b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
index 96429a8..9c32e32 100644
--- a/src/backends/neon/test/NeonCreateWorkloadTests.cpp
+++ b/src/backends/neon/test/NeonCreateWorkloadTests.cpp
@@ -1,13 +1,11 @@
 //
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "NeonWorkloadFactoryHelper.hpp"
 
 #include <aclCommon/ArmComputeTensorUtils.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/PolymorphicDowncast.hpp>
 #include <armnn/backends/MemCopyWorkload.hpp>
 
@@ -283,27 +281,20 @@
 TEST_CASE("CreateConvolution2dFastMathEnabledWorkload")
 {
     Graph graph;
-    using ModelOptions = std::vector<BackendOptions>;
+    using ModelOptions        = std::vector<BackendOptions>;
     ModelOptions modelOptions = {};
-    BackendOptions cpuAcc("CpuAcc",
-    {
-        { "FastMathEnabled", true }
-    });
+    BackendOptions cpuAcc("CpuAcc", { { "FastMathEnabled", true } });
     modelOptions.push_back(cpuAcc);
     NeonWorkloadFactory factory =
         NeonWorkloadFactoryHelper::GetFactory(NeonWorkloadFactoryHelper::GetMemoryManager(), modelOptions);
 
-    auto workload =
-        CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(factory,
-                                                                                             graph,
-                                                                                             DataLayout::NCHW,
-                                                                                             modelOptions);
+    auto workload = CreateConvolution2dWorkloadFastMathTest<NeonConvolution2dWorkload, armnn::DataType::Float32>(
+        factory, graph, DataLayout::NCHW, modelOptions);
 
-    ARMNN_ASSERT(workload != nullptr);
+    CHECK(workload != nullptr);
     auto conv2dWorkload = PolymorphicDowncast<NeonConvolution2dWorkload*>(workload.get());
-    IgnoreUnused(conv2dWorkload);
-    ARMNN_ASSERT(conv2dWorkload != nullptr);
-    ARMNN_ASSERT(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
+    CHECK(conv2dWorkload != nullptr);
+    CHECK(conv2dWorkload->GetConvolutionMethod() == arm_compute::ConvolutionMethod::WINOGRAD);
 }
 
 template <typename armnn::DataType DataType>
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index bc8ad5d..d6fd081 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -1,10 +1,12 @@
 //
-// Copyright © 2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2021, 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include <Graph.hpp>
 #include <Network.hpp>
 
+#include <aclCommon/BaseMemoryManager.hpp>
+
 #include <neon/NeonTensorHandle.hpp>
 #include <neon/NeonTensorHandleFactory.hpp>
 
@@ -16,7 +18,6 @@
 #include <CommonTestUtils.hpp>
 
 #include <doctest/doctest.h>
-#include <armnn/utility/Assert.hpp>
 
 TEST_SUITE("NeonTensorHandleTests")
 {
@@ -190,7 +191,7 @@
     NeonTensorHandleFactory handleFactory(memoryManager);
 
     // NeonTensorHandleFactory supports InPlaceComputation
-    ARMNN_ASSERT(handleFactory.SupportsInPlaceComputation());
+    CHECK(handleFactory.SupportsInPlaceComputation() == true);
 }
 
 }
diff --git a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
index a44c9aa..8cd36db 100644
--- a/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
+++ b/src/backends/neon/workloads/NeonChannelShuffleWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -76,7 +76,8 @@
             aclDataLayout = ConvertDataLayout(armnn::DataLayout::NHWC);
             break;
         default:
-            ARMNN_ASSERT_MSG(false, "Unsupported axis");
+            throw InvalidArgumentException("Value for axis: " + std::to_string(descriptor.m_Parameters.m_Axis) +
+                                           " is not valid");
             break;
     }
     input.info()->set_data_layout(aclDataLayout);
diff --git a/src/backends/neon/workloads/NeonConstantWorkload.cpp b/src/backends/neon/workloads/NeonConstantWorkload.cpp
index f5b0128..270e3fa 100644
--- a/src/backends/neon/workloads/NeonConstantWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConstantWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2018,2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -65,7 +65,7 @@
     {
         const ConstantQueueDescriptor& data = this->m_Data;
 
-        ARMNN_ASSERT(data.m_LayerOutput != nullptr);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(data.m_LayerOutput, "Output tensor handle is null.");
         arm_compute::ITensor& output =
             PolymorphicDowncast<NeonTensorHandle*>(data.m_Outputs[0])->GetTensor();
         arm_compute::DataType computeDataType =
@@ -116,8 +116,7 @@
             }
             default:
             {
-                ARMNN_ASSERT_MSG(false, "Unknown data type");
-                break;
+                throw InvalidArgumentException("Unknown data type.");
             }
         }
 
diff --git a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
index c81022b..fdc52ef 100644
--- a/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -95,8 +95,6 @@
         m_BiasTensor = std::make_unique<arm_compute::Tensor>();
         BuildArmComputeTensor(*m_BiasTensor, info.m_InputTensorInfos[2], m_Data.m_Parameters.m_DataLayout);
         m_BiasTensor->info()->set_are_values_constant(info.m_InputTensorInfos[2].IsConstant());
-        // We assume here that NeonConvolution2dWorkloadValidate has been called before the constructor.
-        ARMNN_ASSERT(info.m_InputTensorInfos[2].IsConstant() == true);
     }
 
     arm_compute::PadStrideInfo padStrideInfo = BuildArmComputePadStrideInfo(m_Data.m_Parameters);
@@ -141,8 +139,6 @@
                                          GetGuid());
 
     m_ConvolutionLayer.reset(convolutionLayer.release());
-
-    ARMNN_ASSERT(m_ConvolutionLayer);
     m_KernelTensorInfo = info.m_InputTensorInfos[1];
 
     if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
index 5bf6e10..ef03dde 100644
--- a/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonConvolution3dWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -105,9 +105,6 @@
                                          this->GetGuid());
 
     m_ConvolutionLayer.reset(convolutionLayer.release());
-
-    ARMNN_ASSERT(m_ConvolutionLayer);
-
     m_ConvolutionLayer->prepare();
 }
 
diff --git a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
index b9e9ebb..de6601f 100644
--- a/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
+++ b/src/backends/neon/workloads/NeonDepthwiseConvolutionWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -146,15 +146,12 @@
                                          detailsInfo,
                                          GetGuid());
 
-    ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
-
     m_pDepthwiseConvolutionLayer->prepare();
 }
 
 void NeonDepthwiseConvolutionWorkload::Execute() const
 {
     ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID("NeonDepthwiseConvolutionWorkload_Execute");
-    ARMNN_ASSERT(m_pDepthwiseConvolutionLayer);
 
     m_pDepthwiseConvolutionLayer->run();
 }
diff --git a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
index 9503abd..d372792 100644
--- a/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
+++ b/src/backends/neon/workloads/NeonFullyConnectedWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,7 +37,9 @@
     arm_compute::TensorInfo* optionalAclBiases = nullptr;
     if (descriptor.m_BiasEnabled)
     {
-        ARMNN_ASSERT(biases.has_value());
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+            biases.has_value(),
+            "NeonFullyConnectedWorkload: Bias was enabled in the descriptor but no value was supplied.");
         aclBiases = BuildArmComputeTensorInfo(biases.value());
         aclBiases.set_are_values_constant(biases.value().IsConstant());
         optionalAclBiases = &aclBiases;
diff --git a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
index 9388472..59fc20a 100644
--- a/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
+++ b/src/backends/neon/workloads/NeonGatherNdWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -139,8 +139,6 @@
     flattenedCoeff_Info.SetShape({ keyIndices["ND"] });
     BuildArmComputeTensor(m_FlattenedCoeff, flattenedCoeff_Info);
     armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_FlattenedCoeff);
-    ARMNN_ASSERT_MSG(indicesInfo.GetDataType() == DataType::Signed32,
-                     "flattenedCoeff must be same data type as m_FlattenedCoeff");
     CopyArmComputeITensorData<int32_t>(flattenedCoeff.data(), m_FlattenedCoeff);
 
     // Prepare the tensor to store the output of the multiplication
diff --git a/src/backends/neon/workloads/NeonSqrtWorkload.cpp b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
index 9c3d8a0..ee57a01 100644
--- a/src/backends/neon/workloads/NeonSqrtWorkload.cpp
+++ b/src/backends/neon/workloads/NeonSqrtWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -31,7 +31,10 @@
 NeonSqrtWorkload::NeonSqrtWorkload(const ElementwiseUnaryQueueDescriptor& descriptor, const WorkloadInfo& info)
     : NeonBaseWorkload<ElementwiseUnaryQueueDescriptor>(descriptor, info)
 {
-    ARMNN_ASSERT(descriptor.m_Parameters.m_Operation == UnaryOperation::Sqrt);
+    if (descriptor.m_Parameters.m_Operation != UnaryOperation::Sqrt)
+    {
+        throw InvalidArgumentException("NeonSqrtWorkload: The descriptor does not indicate a Sqrt operation.");
+    }
 
     ARMNN_REPORT_PROFILING_WORKLOAD_DESC("NeonSqrtWorkload_Construct",
                                          descriptor.m_Parameters,
diff --git a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
index 2fa118b..2eedf98 100644
--- a/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
+++ b/src/backends/neon/workloads/NeonTransposeConvolution2dWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "NeonTransposeConvolution2dWorkload.hpp"
@@ -37,9 +37,10 @@
 
     if (descriptor.m_BiasEnabled)
     {
-        ARMNN_ASSERT(biases.has_value());
-
-        aclBiasesInfo = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+            biases.has_value(),
+            "NeonTransposeConvolution2dWorkload: Bias was enabled in the descriptor but no value was supplied.");
+        aclBiasesInfo         = BuildArmComputeTensorInfo(biases.value(), descriptor.m_DataLayout);
         optionalAclBiasesInfo = &aclBiasesInfo;
     }
 
@@ -97,8 +98,6 @@
     m_Layer = std::make_unique<arm_compute::NEDeconvolutionLayer>(memoryManager);
     m_Layer->configure(&input, m_KernelTensor.get(), m_BiasTensor.get(), &output, padStrideInfo);
 
-    ARMNN_ASSERT(m_Layer);
-
     InitializeArmComputeTensorData(*m_KernelTensor, m_Data.m_Weight);
 
     if (m_Data.m_Parameters.m_BiasEnabled)
diff --git a/src/backends/neon/workloads/NeonWorkloadUtils.hpp b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
index 694c3ab..2c7cd1b 100644
--- a/src/backends/neon/workloads/NeonWorkloadUtils.hpp
+++ b/src/backends/neon/workloads/NeonWorkloadUtils.hpp
@@ -1,9 +1,10 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
 
+#include <armnn/Exceptions.hpp>
 #include <armnn/backends/Workload.hpp>
 #include <aclCommon/ArmComputeTensorUtils.hpp>
 #include <neon/NeonTensorHandle.hpp>
@@ -69,8 +70,7 @@
                                            TensorInfo tensorInfo,
                                            const ITensorHandle* handle)
 {
-    ARMNN_ASSERT(handle);
-
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
     switch(tensorInfo.GetDataType())
     {
         case DataType::Float16:
@@ -104,8 +104,7 @@
 inline void InitializeArmComputeTensorData(arm_compute::Tensor& tensor,
                                            const ConstTensorHandle* handle)
 {
-    ARMNN_ASSERT(handle);
-
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(handle, "Null tensor handle passed to InitializeArmComputeTensorData.");
     switch(handle->GetTensorInfo().GetDataType())
     {
         case DataType::Float16:
diff --git a/src/backends/reference/RefLayerSupport.cpp b/src/backends/reference/RefLayerSupport.cpp
index 40d243e..f97d03a 100644
--- a/src/backends/reference/RefLayerSupport.cpp
+++ b/src/backends/reference/RefLayerSupport.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -958,7 +958,6 @@
                                   "Reference concatenation: output type not supported");
     for (const TensorInfo* input : inputs)
     {
-        ARMNN_ASSERT(input != nullptr);
         supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
             "Reference concatenation: input type not supported");
 
@@ -2629,7 +2628,6 @@
                                   "Reference stack: output type not supported");
     for (const TensorInfo* input : inputs)
     {
-        ARMNN_ASSERT(input != nullptr);
         supported &= CheckSupportRule(TypeAnyOf(*input, supportedTypes), reasonIfUnsupported,
             "Reference stack: input type not supported");
 
diff --git a/src/backends/reference/RefMemoryManager.cpp b/src/backends/reference/RefMemoryManager.cpp
index 76054e4..80f3531 100644
--- a/src/backends/reference/RefMemoryManager.cpp
+++ b/src/backends/reference/RefMemoryManager.cpp
@@ -1,10 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "RefMemoryManager.hpp"
 
-#include <armnn/utility/Assert.hpp>
+#include <armnn/Exceptions.hpp>
 
 #include <algorithm>
 
@@ -35,7 +35,7 @@
 
 void RefMemoryManager::Allocate(RefMemoryManager::Pool* pool)
 {
-    ARMNN_ASSERT(pool);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(pool, "Null memory manager passed to RefMemoryManager.");
     m_FreePools.push_back(pool);
 }
 
@@ -75,25 +75,29 @@
 
 void* RefMemoryManager::Pool::GetPointer()
 {
-    ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
+    ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+                             "RefMemoryManager::Pool::GetPointer() called when memory not acquired");
     return m_Pointer;
 }
 
 void RefMemoryManager::Pool::Reserve(unsigned int numBytes)
 {
-    ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+    ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+                             "RefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
     m_Size = std::max(m_Size, numBytes);
 }
 
 void RefMemoryManager::Pool::Acquire()
 {
-    ARMNN_ASSERT_MSG(!m_Pointer, "RefMemoryManager::Pool::Acquire() called when memory already acquired");
+    ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+                             "RefMemoryManager::Pool::Acquire() called when memory already acquired");
     m_Pointer = ::operator new(size_t(m_Size));
 }
 
 void RefMemoryManager::Pool::Release()
 {
-    ARMNN_ASSERT_MSG(m_Pointer, "RefMemoryManager::Pool::Release() called when memory not acquired");
+    ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+                             "RefMemoryManager::Pool::Release() called when memory not acquired");
     ::operator delete(m_Pointer);
     m_Pointer = nullptr;
 }
diff --git a/src/backends/reference/RefTensorHandle.cpp b/src/backends/reference/RefTensorHandle.cpp
index 07f497c..1158a14 100644
--- a/src/backends/reference/RefTensorHandle.cpp
+++ b/src/backends/reference/RefTensorHandle.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -44,8 +44,8 @@
 
 void RefTensorHandle::Manage()
 {
-    ARMNN_ASSERT_MSG(!m_Pool, "RefTensorHandle::Manage() called twice");
-    ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "RefTensorHandle::Manage() called after Allocate()");
+    ARMNN_THROW_MSG_IF_FALSE(!m_Pool, RuntimeException, "RefTensorHandle::Manage() called twice");
+    ARMNN_THROW_MSG_IF_FALSE(!m_UnmanagedMemory, RuntimeException, "RefTensorHandle::Manage() called after Allocate()");
 
     if (m_MemoryManager)
     {
diff --git a/src/backends/reference/workloads/BaseIterator.hpp b/src/backends/reference/workloads/BaseIterator.hpp
index 694c229..5c5fff3 100644
--- a/src/backends/reference/workloads/BaseIterator.hpp
+++ b/src/backends/reference/workloads/BaseIterator.hpp
@@ -1,12 +1,11 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #pragma once
 
 #include <armnn/TypesUtils.hpp>
-#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
 #include <armnnUtils/FloatingPointConverter.hpp>
 #include <armnnUtils/TensorUtils.hpp>
@@ -78,28 +77,28 @@
 
     TypedIterator& operator++() override
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
         ++m_Iterator;
         return *this;
     }
 
     TypedIterator& operator+=(const unsigned int increment) override
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
         m_Iterator += increment;
         return *this;
     }
 
     TypedIterator& operator-=(const unsigned int increment) override
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
         m_Iterator -= increment;
         return *this;
     }
 
     TypedIterator& operator[](const unsigned int index) override
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "TypedIterator: m_Iterator is null!");
         m_Iterator = m_Start + index;
         return *this;
     }
@@ -763,7 +762,7 @@
 
     inline PerAxisIterator& SetIndexOnMem(const unsigned int index)
     {
-        ARMNN_ASSERT(m_Iterator);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Iterator, "PerAxisIterator: m_Iterator is null!");
         m_Iterator = m_Start + index;
         if (index < m_AxisFactor)
         {
diff --git a/src/backends/reference/workloads/BatchMatMulImpl.cpp b/src/backends/reference/workloads/BatchMatMulImpl.cpp
index c592b3b..8e169cb 100644
--- a/src/backends/reference/workloads/BatchMatMulImpl.cpp
+++ b/src/backends/reference/workloads/BatchMatMulImpl.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -145,7 +145,6 @@
     const auto& dataLayout = (type == DataSlot::InputX) ? params.m_DataLayoutX : params.m_DataLayoutY;
     const auto axesToAdjoint = BatchMatMulDescriptor::GetAxesToMul(dataLayout,inputInfo.GetShape());
 
-    ARMNN_ASSERT(inputInfo.GetShape()[axesToAdjoint.first] == inputInfo.GetShape()[axesToAdjoint.second]);
     // We grab a copy of the tensor data to prevent overwriting
     std::vector<float> inputDataClone = (type == DataSlot::InputX) ? inputXData : inputYData;
 
diff --git a/src/backends/reference/workloads/Concatenate.cpp b/src/backends/reference/workloads/Concatenate.cpp
index a0e0abf..fece43c 100644
--- a/src/backends/reference/workloads/Concatenate.cpp
+++ b/src/backends/reference/workloads/Concatenate.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -40,7 +40,9 @@
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& inputInfo = GetTensorInfo(inputs[viewIdx]);
-            ARMNN_ASSERT(inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions());
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+                inputInfo.GetNumDimensions() == outputInfo0.GetNumDimensions(),
+                "The number of output dimensions does not match the number of input dimensions.");
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
diff --git a/src/backends/reference/workloads/ConvImpl.cpp b/src/backends/reference/workloads/ConvImpl.cpp
index 320690e..098c931 100644
--- a/src/backends/reference/workloads/ConvImpl.cpp
+++ b/src/backends/reference/workloads/ConvImpl.cpp
@@ -1,12 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "ConvImpl.hpp"
 
-#include <armnn/utility/Assert.hpp>
-
 #include <cmath>
 #include <limits>
 
@@ -15,7 +13,8 @@
 
 QuantizedMultiplierSmallerThanOne::QuantizedMultiplierSmallerThanOne(float multiplier)
 {
-    ARMNN_ASSERT(multiplier >= 0.0f && multiplier < 1.0f);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(multiplier >= 0.0f && multiplier < 1.0f,
+                                        "QuantizedMultiplierSmallerThanOne: multiplier must be between 0.0f and 1.0f.");
     if (multiplier == 0.0f)
     {
         m_Multiplier = 0;
@@ -26,14 +25,11 @@
         const double q = std::frexp(multiplier, &m_RightShift);
         m_RightShift = -m_RightShift;
         int64_t qFixed = static_cast<int64_t>(::round(q * (1ll << 31)));
-        ARMNN_ASSERT(qFixed <= (1ll << 31));
         if (qFixed == (1ll << 31))
         {
             qFixed /= 2;
             --m_RightShift;
         }
-        ARMNN_ASSERT(m_RightShift >= 0);
-        ARMNN_ASSERT(qFixed <= std::numeric_limits<int32_t>::max());
         m_Multiplier = static_cast<int32_t>(qFixed);
     }
 }
@@ -61,7 +57,8 @@
 
 int32_t QuantizedMultiplierSmallerThanOne::RoundingDivideByPOT(int32_t x, int exponent)
 {
-    ARMNN_ASSERT(exponent >= 0 && exponent <= 31);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(exponent >= 0 && exponent <= 31,
+                                        "RoundingDivideByPOT: exponent must be between 0 and 31.");
     int32_t mask = (1 << exponent) - 1;
     int32_t remainder = x & mask;
     int32_t threshold = (mask >> 1) + (x < 0 ? 1 : 0);
diff --git a/src/backends/reference/workloads/DepthToSpace.cpp b/src/backends/reference/workloads/DepthToSpace.cpp
index f5e9ec5..60098d1 100644
--- a/src/backends/reference/workloads/DepthToSpace.cpp
+++ b/src/backends/reference/workloads/DepthToSpace.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -8,8 +8,6 @@
 #include <armnnUtils/DataLayoutIndexed.hpp>
 #include <armnnUtils/Permute.hpp>
 
-#include <armnn/utility/Assert.hpp>
-
 using namespace armnnUtils;
 
 namespace armnn
@@ -22,7 +20,6 @@
                   unsigned int dataTypeSize)
 {
     const unsigned int blockSize = descriptor.m_BlockSize;
-    ARMNN_ASSERT(blockSize != 0u);
 
     const TensorShape& inputShape = inputInfo.GetShape();
     const unsigned int batches = inputShape[0];
diff --git a/src/backends/reference/workloads/Dequantize.cpp b/src/backends/reference/workloads/Dequantize.cpp
index fdc8e30..3955458 100644
--- a/src/backends/reference/workloads/Dequantize.cpp
+++ b/src/backends/reference/workloads/Dequantize.cpp
@@ -1,12 +1,10 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "Dequantize.hpp"
 
-#include <armnn/utility/IgnoreUnused.hpp>
-
 namespace armnn
 {
 
@@ -15,8 +13,9 @@
                 const TensorInfo& inputInfo,
                 const TensorInfo& outputInfo)
 {
-    IgnoreUnused(outputInfo);
-    ARMNN_ASSERT(inputInfo.GetNumElements() == outputInfo.GetNumElements());
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+        inputInfo.GetNumElements() == outputInfo.GetNumElements(),
+        "Dequantize: The number of elements in the input and output tensors must be the same.");
     for (unsigned int i = 0; i < inputInfo.GetNumElements(); i++)
     {
         // inputDecoder.Get() dequantizes the data element from whatever
diff --git a/src/backends/reference/workloads/DetectionPostProcess.cpp b/src/backends/reference/workloads/DetectionPostProcess.cpp
index c5ab327..361f886 100644
--- a/src/backends/reference/workloads/DetectionPostProcess.cpp
+++ b/src/backends/reference/workloads/DetectionPostProcess.cpp
@@ -1,12 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "DetectionPostProcess.hpp"
 
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 #include <algorithm>
@@ -140,11 +138,11 @@
 
 void DetectionPostProcess(const TensorInfo& boxEncodingsInfo,
                           const TensorInfo& scoresInfo,
-                          const TensorInfo& anchorsInfo,
+                          const TensorInfo&,
                           const TensorInfo& detectionBoxesInfo,
-                          const TensorInfo& detectionClassesInfo,
-                          const TensorInfo& detectionScoresInfo,
-                          const TensorInfo& numDetectionsInfo,
+                          const TensorInfo&,
+                          const TensorInfo&,
+                          const TensorInfo&,
                           const DetectionPostProcessDescriptor& desc,
                           Decoder<float>& boxEncodings,
                           Decoder<float>& scores,
@@ -154,7 +152,6 @@
                           float* detectionScores,
                           float* numDetections)
 {
-    IgnoreUnused(anchorsInfo, detectionClassesInfo, detectionScoresInfo, numDetectionsInfo);
 
     // Transform center-size format which is (ycenter, xcenter, height, width) to box-corner format,
     // which represents the lower left corner and the upper right corner (ymin, xmin, ymax, xmax)
@@ -212,9 +209,6 @@
         boxCorners[indexH] = yCentre + halfH;
         // xmax
         boxCorners[indexW] = xCentre + halfW;
-
-        ARMNN_ASSERT(boxCorners[indexY] < boxCorners[indexH]);
-        ARMNN_ASSERT(boxCorners[indexX] < boxCorners[indexW]);
     }
 
     unsigned int numClassesWithBg = desc.m_NumClasses + 1;
diff --git a/src/backends/reference/workloads/FullyConnected.cpp b/src/backends/reference/workloads/FullyConnected.cpp
index 47968f4..19c01b8 100644
--- a/src/backends/reference/workloads/FullyConnected.cpp
+++ b/src/backends/reference/workloads/FullyConnected.cpp
@@ -1,12 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "FullyConnected.hpp"
 
-#include <armnn/utility/Assert.hpp>
-
 #include "RefWorkloadUtils.hpp"
 
 namespace armnn
@@ -31,7 +29,6 @@
 
     const TensorShape biasShape{outputSize};
 
-    ARMNN_ASSERT(!biasEnabled || pBiasDecoder != nullptr);
     const std::vector<float> decodedBiases = biasEnabled ? pBiasDecoder->DecodeTensor(biasShape) : std::vector<float>();
 
 
diff --git a/src/backends/reference/workloads/LogSoftmax.cpp b/src/backends/reference/workloads/LogSoftmax.cpp
index 2b63849..0926894 100644
--- a/src/backends/reference/workloads/LogSoftmax.cpp
+++ b/src/backends/reference/workloads/LogSoftmax.cpp
@@ -1,13 +1,11 @@
 //
-// Copyright © 2019 Arm Ltd. All rights reserved.
+// Copyright © 2019, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "LogSoftmax.hpp"
 
 #include <armnnUtils/TensorUtils.hpp>
-#include <armnn/utility/Assert.hpp>
-#include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 #include <cmath>
@@ -33,10 +31,8 @@
 {
     const unsigned int numDimensions = inputInfo.GetNumDimensions();
 
-    bool axisIsValid = ValidateAxis(descriptor.m_Axis, numDimensions);
-    ARMNN_ASSERT_MSG(axisIsValid,
-        "Axis index is not in range [-numDimensions, numDimensions).");
-    IgnoreUnused(axisIsValid);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(ValidateAxis(descriptor.m_Axis, numDimensions),
+                                        "Axis index is not in range [-numDimensions, numDimensions).");
 
     unsigned int uAxis = descriptor.m_Axis < 0  ?
         numDimensions - armnn::numeric_cast<unsigned int>(std::abs(descriptor.m_Axis)) :
diff --git a/src/backends/reference/workloads/MirrorPad.cpp b/src/backends/reference/workloads/MirrorPad.cpp
index 7388fed..de3b74b 100644
--- a/src/backends/reference/workloads/MirrorPad.cpp
+++ b/src/backends/reference/workloads/MirrorPad.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -18,8 +18,8 @@
 {
     unsigned int numOfElements = shape.GetNumElements();
 
-    ARMNN_ASSERT_MSG(index <= numOfElements, "Index has to be in [0, num_elements]");
-    ARMNN_ASSERT_MSG(numOfElements != 0, "Cannot create coordinate from empty shape");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(index <= numOfElements, "Index has to be in [0, num_elements]");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(numOfElements != 0, "Cannot create coordinate from empty shape");
 
     std::vector<unsigned int> coord(shape.GetNumDimensions());
     for(unsigned int i = 0; i < shape.GetNumDimensions(); ++i)
@@ -36,8 +36,8 @@
 // E.g. [0, 0, 2] returns 2.
 inline unsigned int CoordToIndex(const armnn::TensorShape& shape, const std::vector<unsigned int>& coord)
 {
-    ARMNN_ASSERT_MSG(shape.GetNumDimensions() != 0, "Cannot get index from empty shape");
-    ARMNN_ASSERT_MSG(coord.size() != 0, "Cannot get index of empty coordinate");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(shape.GetNumDimensions() != 0, "Cannot get index from empty shape");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(coord.size() != 0, "Cannot get index of empty coordinate");
 
     unsigned int index    = 0;
     unsigned int dimSize  = 1;
diff --git a/src/backends/reference/workloads/Reduce.cpp b/src/backends/reference/workloads/Reduce.cpp
index 8b28a61..6ea333b 100644
--- a/src/backends/reference/workloads/Reduce.cpp
+++ b/src/backends/reference/workloads/Reduce.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -153,8 +153,6 @@
     for (unsigned int idx = 0; idx < numResolvedAxis; ++idx)
     {
         unsigned int current = inputDims[resolvedAxis[idx]];
-        ARMNN_ASSERT(armnn::numeric_cast<float>(current) <
-                     (std::numeric_limits<float>::max() / armnn::numeric_cast<float>(numElementsInAxis)));
         numElementsInAxis *= current;
     }
 
diff --git a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
index e45d24a..47c537c 100644
--- a/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
+++ b/src/backends/reference/workloads/RefLogSoftmaxWorkload.cpp
@@ -12,8 +12,6 @@
 
 #include <Profiling.hpp>
 
-#include <armnn/utility/Assert.hpp>
-
 namespace armnn
 {
 
@@ -38,9 +36,6 @@
     std::unique_ptr<Decoder<float>> decoder = MakeDecoder<float>(inputInfo, inputs[0]->Map());
     std::unique_ptr<Encoder<float>> encoder = MakeEncoder<float>(outputInfo, outputs[0]->Map());
 
-    ARMNN_ASSERT(decoder != nullptr);
-    ARMNN_ASSERT(encoder != nullptr);
-
     LogSoftmax(*decoder, *encoder, inputInfo, m_Data.m_Parameters);
 }
 
diff --git a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
index c4a4f7f..1dc95a2 100644
--- a/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
+++ b/src/backends/reference/workloads/RefStridedSliceWorkload.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -31,13 +31,8 @@
     ARMNN_SCOPED_PROFILING_EVENT_REF_NAME_GUID("RefStridedSliceWorkload_Execute");
 
     const TensorInfo& inputInfo  = GetTensorInfo(inputs[0]);
-    const TensorInfo& outputInfo = GetTensorInfo(outputs[0]);
 
     DataType inputDataType  = inputInfo.GetDataType();
-    DataType outputDataType = outputInfo.GetDataType();
-
-    ARMNN_ASSERT(inputDataType == outputDataType);
-    IgnoreUnused(outputDataType);
 
     StridedSlice(inputInfo,
                  m_Data.m_Parameters,
diff --git a/src/backends/reference/workloads/Resize.cpp b/src/backends/reference/workloads/Resize.cpp
index e80a205..7bed6c6 100644
--- a/src/backends/reference/workloads/Resize.cpp
+++ b/src/backends/reference/workloads/Resize.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -72,7 +72,8 @@
             bool halfPixelCenters)
 {
     // alignCorners and halfPixelCenters cannot both be true
-    ARMNN_ASSERT(!(alignCorners && halfPixelCenters));
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(!(alignCorners && halfPixelCenters),
+                                        "Resize: alignCorners and halfPixelCenters cannot both be true");
 
     // We follow the definition of TensorFlow and AndroidNN: the top-left corner of a texel in the output
     // image is projected into the input image to figure out the interpolants and weights. Note that this
diff --git a/src/backends/reference/workloads/Softmax.cpp b/src/backends/reference/workloads/Softmax.cpp
index 00d496d..d792361 100644
--- a/src/backends/reference/workloads/Softmax.cpp
+++ b/src/backends/reference/workloads/Softmax.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -16,10 +16,10 @@
 /// Computes the softmax function on some inputs, into outputs, with a shape given by tensorInfo.
 void Softmax(Decoder<float>& in, Encoder<float>& out, const TensorInfo& inputTensorInfo, float beta, int axis)
 {
-    ARMNN_ASSERT_MSG(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
-                     "Required axis index greater than number of dimensions.");
-    ARMNN_ASSERT_MSG(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
-                     "Required axis index lower than negative of the number of dimensions");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(axis < static_cast<int>(inputTensorInfo.GetNumDimensions()),
+                                        "Required axis index greater than number of dimensions.");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(axis >= -static_cast<int>(inputTensorInfo.GetNumDimensions()),
+                                        "Required axis index lower than negative of the number of dimensions");
 
     unsigned int uAxis = axis < 0  ?
                          inputTensorInfo.GetNumDimensions() - static_cast<unsigned int>(abs(axis))
diff --git a/src/backends/reference/workloads/Splitter.cpp b/src/backends/reference/workloads/Splitter.cpp
index 695ae8a..963e3aa 100644
--- a/src/backends/reference/workloads/Splitter.cpp
+++ b/src/backends/reference/workloads/Splitter.cpp
@@ -1,12 +1,11 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "RefWorkloadUtils.hpp"
 #include <armnn/backends/WorkloadData.hpp>
 #include <armnn/Tensor.hpp>
-#include <armnn/utility/Assert.hpp>
 #include "Splitter.hpp"
 
 #include <cmath>
@@ -48,7 +47,9 @@
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& outputInfo = GetTensorInfo(outputs[viewIdx]);
-            ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions());
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+                outputInfo.GetNumDimensions() == inputInfo.GetNumDimensions(),
+                "The number of output dimensions does not match the number of input dimensions.");
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
diff --git a/src/backends/reference/workloads/Splitter.hpp b/src/backends/reference/workloads/Splitter.hpp
index 730b071..f05f654 100644
--- a/src/backends/reference/workloads/Splitter.hpp
+++ b/src/backends/reference/workloads/Splitter.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -40,7 +40,9 @@
 
             //Split view extents are defined by the size of (the corresponding) input tensor.
             const TensorInfo& outputInfo = GetTensorInfo(outputs[viewIdx]);
-            ARMNN_ASSERT(outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions());
+            ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(
+                outputInfo.GetNumDimensions() == inputInfo0.GetNumDimensions(),
+                "The number of output dimensions does not match the number of input dimensions.");
 
             // Check all dimensions to see if this element is inside the given input view.
             bool insideView = true;
@@ -69,11 +71,7 @@
 
                 //We are within the view, to copy input data to the output corresponding to this view.
                 DataType* outputData = GetOutputTensorData<DataType>(viewIdx, data);
-                ARMNN_ASSERT(outputData);
-
                 const DataType* inputData = GetInputTensorData<DataType>(0, data);
-                ARMNN_ASSERT(inputData);
-
                 outputData[outIndex] = inputData[index];
             }
         }
diff --git a/src/backends/reference/workloads/StridedSlice.cpp b/src/backends/reference/workloads/StridedSlice.cpp
index 68600c9..fcd1c35 100644
--- a/src/backends/reference/workloads/StridedSlice.cpp
+++ b/src/backends/reference/workloads/StridedSlice.cpp
@@ -1,13 +1,10 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
 #include "StridedSlice.hpp"
 
-#include <ResolveType.hpp>
-
-#include <armnn/utility/Assert.hpp>
 #include <armnn/utility/NumericCast.hpp>
 
 #include <cstring>
@@ -20,12 +17,11 @@
 
 void PadParams(StridedSliceDescriptor& p, unsigned int dimCount)
 {
-    ARMNN_ASSERT_MSG(dimCount <= 4, "Expected input with at most 4 dimensions");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(dimCount <= 4, "Expected input with at most 4 dimensions");
 
     const unsigned int beginIndicesCount =
         armnn::numeric_cast<unsigned int>(p.m_Begin.size());
 
-    ARMNN_ASSERT(dimCount >= beginIndicesCount);
     const unsigned int padCount = dimCount - beginIndicesCount;
 
     p.m_Begin.resize(dimCount);
diff --git a/src/backends/reference/workloads/TensorBufferArrayView.hpp b/src/backends/reference/workloads/TensorBufferArrayView.hpp
index 0b448e6..c6a7571 100644
--- a/src/backends/reference/workloads/TensorBufferArrayView.hpp
+++ b/src/backends/reference/workloads/TensorBufferArrayView.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017, 2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -9,8 +9,6 @@
 
 #include <armnnUtils/DataLayoutIndexed.hpp>
 
-#include <armnn/utility/Assert.hpp>
-
 namespace armnn
 {
 
@@ -25,7 +23,8 @@
         , m_Data(data)
         , m_DataLayout(dataLayout)
     {
-        ARMNN_ASSERT(m_Shape.GetNumDimensions() == 4);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_Shape.GetNumDimensions() == 4,
+                                            "Only $d tensors are supported by TensorBufferArrayView.");
     }
 
     DataType& Get(unsigned int b, unsigned int c, unsigned int h, unsigned int w) const
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
index 6c2b314..a9af249 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseBinaryOperator.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -88,7 +88,6 @@
         default:
             throw armnn::Exception("ConvertElementwiseBinaryToTosaOperator: Unsupported layer type.");
     }
-    ARMNN_ASSERT(op != nullptr);
 
     std::vector<TosaSerializationTensor*> tensors;
     // Only add input tensors if connected layer is an input layer.
diff --git a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp b/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
index 17ea64b..02dddab 100644
--- a/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
+++ b/src/backends/tosaCommon/operatorMappings/ElementwiseUnaryOperator.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -44,8 +44,6 @@
             throw armnn::Exception("ConvertElementwiseUnaryToTosaOperator: Unsupported layer type.");
     }
 
-    ARMNN_ASSERT(op != nullptr);
-
     std::vector<TosaSerializationTensor*> tensors;
     // Only add input tensor if connected layer is an input layer.
     // As intermediate or constant tensors will be created separately.
diff --git a/src/backends/tosaReference/TosaRefMemoryManager.cpp b/src/backends/tosaReference/TosaRefMemoryManager.cpp
index 745e6be..4384b08 100644
--- a/src/backends/tosaReference/TosaRefMemoryManager.cpp
+++ b/src/backends/tosaReference/TosaRefMemoryManager.cpp
@@ -1,11 +1,10 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "TosaRefMemoryManager.hpp"
 
-#include <armnn/utility/Assert.hpp>
-
+#include <armnn/Exceptions.hpp>
 #include <algorithm>
 
 namespace armnn
@@ -35,7 +34,7 @@
 
 void TosaRefMemoryManager::Allocate(TosaRefMemoryManager::Pool* pool)
 {
-    ARMNN_ASSERT(pool);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(pool, "Null memory manager passed to TosaRefMemoryManager.");
     m_FreePools.push_back(pool);
 }
 
@@ -75,25 +74,29 @@
 
 void* TosaRefMemoryManager::Pool::GetPointer()
 {
-    ARMNN_ASSERT_MSG(m_Pointer, "TosaRefMemoryManager::Pool::GetPointer() called when memory not acquired");
+    ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+                             "TosaRefMemoryManager::Pool::GetPointer() called when memory not acquired");
     return m_Pointer;
 }
 
 void TosaRefMemoryManager::Pool::Reserve(unsigned int numBytes)
 {
-    ARMNN_ASSERT_MSG(!m_Pointer, "TosaRefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
+    ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+                             "TosaRefMemoryManager::Pool::Reserve() cannot be called after memory acquired");
     m_Size = std::max(m_Size, numBytes);
 }
 
 void TosaRefMemoryManager::Pool::Acquire()
 {
-    ARMNN_ASSERT_MSG(!m_Pointer, "TosaRefMemoryManager::Pool::Acquire() called when memory already acquired");
+    ARMNN_THROW_MSG_IF_FALSE(!m_Pointer, RuntimeException,
+                             "TosaRefMemoryManager::Pool::Acquire() called when memory already acquired");
     m_Pointer = ::operator new(size_t(m_Size));
 }
 
 void TosaRefMemoryManager::Pool::Release()
 {
-    ARMNN_ASSERT_MSG(m_Pointer, "TosaRefMemoryManager::Pool::Release() called when memory not acquired");
+    ARMNN_THROW_MSG_IF_FALSE(m_Pointer, RuntimeException,
+                             "TosaRefMemoryManager::Pool::Release() called when memory not acquired");
     ::operator delete(m_Pointer);
     m_Pointer = nullptr;
 }
diff --git a/src/backends/tosaReference/TosaRefTensorHandle.cpp b/src/backends/tosaReference/TosaRefTensorHandle.cpp
index aaffc8a..e9dc45e 100644
--- a/src/backends/tosaReference/TosaRefTensorHandle.cpp
+++ b/src/backends/tosaReference/TosaRefTensorHandle.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022, 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "TosaRefTensorHandle.hpp"
@@ -44,9 +44,9 @@
 {
     if (!m_IsImportEnabled)
     {
-        ARMNN_ASSERT_MSG(!m_Pool, "TosaRefTensorHandle::Manage() called twice");
-        ARMNN_ASSERT_MSG(!m_UnmanagedMemory, "TosaRefTensorHandle::Manage() called after Allocate()");
-
+        ARMNN_THROW_MSG_IF_FALSE(!m_Pool, RuntimeException, "TosaRefTensorHandle::Manage() called twice");
+        ARMNN_THROW_MSG_IF_FALSE(!m_UnmanagedMemory, RuntimeException,
+                                 "TosaRefTensorHandle::Manage() called after Allocate()");
         m_Pool = m_MemoryManager->Manage(m_TensorInfo.GetNumBytes());
     }
 }