IVGCVSW-7853 Assert audit and removal

  * src/armnn
  * src/armnn/layers

Signed-off-by: Declan-ARM <decmce01@arm.com>
Change-Id: Ic78cbbb59e90fbb15f893205a358c45264243721
diff --git a/src/armnn/ArmNNProfilingServiceInitialiser.cpp b/src/armnn/ArmNNProfilingServiceInitialiser.cpp
index bbed43a..7ca3fc1 100644
--- a/src/armnn/ArmNNProfilingServiceInitialiser.cpp
+++ b/src/armnn/ArmNNProfilingServiceInitialiser.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd. All rights reserved.
+// Copyright © 2022,2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -40,7 +40,11 @@
                                                                   networkLoads,
                                                                   "The number of networks loaded at runtime",
                                                                   networks);
-        ARMNN_ASSERT(loadedNetworksCounter);
+        if (!loadedNetworksCounter)
+        {
+            throw armnn::NullPointerException("loadedNetworksCounter must not be null.");
+        }
+
         profilingService.InitializeCounterValue(loadedNetworksCounter->m_Uid);
     }
     // Register a counter for the number of unloaded networks
@@ -57,7 +61,7 @@
                                                                   networkUnloads,
                                                                   "The number of networks unloaded at runtime",
                                                                   networks);
-        ARMNN_ASSERT(unloadedNetworksCounter);
+
         profilingService.InitializeCounterValue(unloadedNetworksCounter->m_Uid);
     }
     std::string backends("backends");
@@ -75,7 +79,7 @@
                                                                   backendsRegistered,
                                                                   "The number of registered backends",
                                                                   backends);
-        ARMNN_ASSERT(registeredBackendsCounter);
+
         profilingService.InitializeCounterValue(registeredBackendsCounter->m_Uid);
 
         // Due to backends being registered before the profiling service becomes active,
@@ -97,7 +101,7 @@
                                                                   backendsUnregistered,
                                                                   "The number of unregistered backends",
                                                                   backends);
-        ARMNN_ASSERT(unregisteredBackendsCounter);
+
         profilingService.InitializeCounterValue(unregisteredBackendsCounter->m_Uid);
     }
     // Register a counter for the number of inferences run
@@ -115,7 +119,7 @@
                                                                  inferencesRun,
                                                                  "The number of inferences run",
                                                                  inferences);
-        ARMNN_ASSERT(inferencesRunCounter);
+
         profilingService.InitializeCounterValue(inferencesRunCounter->m_Uid);
     }
 }
diff --git a/src/armnn/Descriptors.cpp b/src/armnn/Descriptors.cpp
index 5e4628b..b8f4c7a 100644
--- a/src/armnn/Descriptors.cpp
+++ b/src/armnn/Descriptors.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "armnn/Descriptors.hpp"
@@ -203,8 +203,9 @@
 // Reorders the viewOrigins in accordance with the indices presented in newOrdering array.
 void OriginsDescriptor::ReorderOrigins(unsigned int*  newOrdering, unsigned int numNewOrdering)
 {
-    ARMNN_ASSERT_MSG(m_NumViews == numNewOrdering, "number of views must match number of "
-        "elements in the new ordering array");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_NumViews == numNewOrdering,
+                                        "number of views must match number of elements in the new ordering array");
+
     std::vector<uint32_t*> viewOrigins(&m_ViewOrigins[0], &m_ViewOrigins[m_NumViews]);
 
     for (unsigned int i = 0; i < numNewOrdering; ++i)
diff --git a/src/armnn/Graph.cpp b/src/armnn/Graph.cpp
index f7fbba7..70ecb32 100644
--- a/src/armnn/Graph.cpp
+++ b/src/armnn/Graph.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -207,7 +207,8 @@
 Status Graph::AllocateDynamicBuffers()
 {
     // Layers must be sorted in topological order
-    ARMNN_ASSERT(m_LayersInOrder);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(m_LayersInOrder, "layers must be in order.");
+
     ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "LoadNetwork_AllocateDynamicBuffers");
 
     std::unordered_set<const ITensorHandle*> preallocatedTensors;
@@ -334,7 +335,10 @@
     auto MayNeedCompatibilityLayer = [](const Layer& layer)
     {
         // All layers should have been associated with a valid compute device at this point.
-        ARMNN_ASSERT(layer.GetBackendId() != Compute::Undefined);
+        if (layer.GetBackendId() == Compute::Undefined)
+        {
+            throw armnn::Exception("AddCompatibilityLayers: All layers must be assigned to a backend at this point.");
+        }
         // Does not need another compatibility layer if a copy or import layer is already present.
         return layer.GetType() != LayerType::MemCopy &&
                layer.GetType() != LayerType::MemImport;
@@ -348,7 +352,7 @@
 
     ForEachLayer([this, &backends, &registry, MayNeedCompatibilityLayer, IsCompatibilityStrategy](Layer* srcLayer)
     {
-        ARMNN_ASSERT(srcLayer);
+        ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(srcLayer, "source layer must not be null.");
 
         if (!MayNeedCompatibilityLayer(*srcLayer))
         {
@@ -365,11 +369,17 @@
             for (unsigned int srcConnectionIndex = 0; srcConnectionIndex < srcConnections.size(); srcConnectionIndex++)
             {
                 InputSlot* dstInputSlot = srcConnections[srcConnectionIndex];
-                ARMNN_ASSERT(dstInputSlot);
+                if (!dstInputSlot)
+                {
+                    throw armnn::Exception("dstInputSlot must not be null.");
+                }
 
                 EdgeStrategy strategy = srcEdgeStrategies[srcConnectionIndex];
-                ARMNN_ASSERT_MSG(strategy != EdgeStrategy::Undefined,
-                                 "Undefined memory strategy found while adding copy layers for compatibility");
+                if (strategy == EdgeStrategy::Undefined)
+                {
+                    throw armnn::Exception("Undefined memory strategy found "
+                                           "while adding copy layers for compatibility");
+                }
 
                 const Layer& dstLayer = dstInputSlot->GetOwningLayer();
                 if (MayNeedCompatibilityLayer(dstLayer) &&
@@ -390,7 +400,11 @@
                     }
                     else
                     {
-                        ARMNN_ASSERT_MSG(strategy == EdgeStrategy::ExportToTarget, "Invalid edge strategy found.");
+                        if (strategy != EdgeStrategy::ExportToTarget)
+                        {
+                            throw armnn::Exception("Invalid edge strategy found.");
+                        }
+
                         compLayer = InsertNewLayer<MemImportLayer>(*dstInputSlot, compLayerName.c_str());
                     }
 
@@ -460,7 +474,7 @@
 
 void Graph::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer)
 {
-    ARMNN_ASSERT(substituteLayer != nullptr);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(substituteLayer, "substituteLayer should not be null");
 
     // Create a new sub-graph with only the given layer, using
     // the given sub-graph as a reference of which parent graph to use
@@ -491,16 +505,19 @@
 
 void Graph::ReplaceSubgraphConnections(const SubgraphView& subgraph, const SubgraphView& substituteSubgraph)
 {
-    ARMNN_ASSERT_MSG(!substituteSubgraph.GetIConnectableLayers().empty(),
-                     "New sub-graph used for substitution must not be empty");
+    if (substituteSubgraph.GetIConnectableLayers().empty())
+    {
+        throw armnn::Exception("New sub-graph used for substitution must not be empty");
+    }
 
     const SubgraphView::IConnectableLayers& substituteSubgraphLayers = substituteSubgraph.GetIConnectableLayers();
     std::for_each(substituteSubgraphLayers.begin(), substituteSubgraphLayers.end(), [&](IConnectableLayer* layer)
     {
-        IgnoreUnused(layer);
         layer = PolymorphicDowncast<Layer*>(layer);
-        ARMNN_ASSERT_MSG(std::find(m_Layers.begin(), m_Layers.end(), layer) != m_Layers.end(),
-                         "Substitute layer is not a member of graph");
+        if (std::find(m_Layers.begin(), m_Layers.end(), layer) == m_Layers.end())
+        {
+            throw armnn::Exception("Substitute layer is not a member of graph");
+        }
     });
 
     const SubgraphView::IInputSlots& subgraphInputSlots = subgraph.GetIInputSlots();
@@ -512,8 +529,15 @@
     const SubgraphView::IInputSlots& substituteSubgraphInputSlots = substituteSubgraph.GetIInputSlots();
     const SubgraphView::IOutputSlots& substituteSubgraphOutputSlots = substituteSubgraph.GetIOutputSlots();
 
-    ARMNN_ASSERT(subgraphNumInputSlots == substituteSubgraphInputSlots.size());
-    ARMNN_ASSERT(subgraphNumOutputSlots == substituteSubgraphOutputSlots.size());
+    if (subgraphNumInputSlots != substituteSubgraphInputSlots.size())
+    {
+        throw armnn::Exception("subgraph and substitute subgraph input slot sizes must be the same.");
+    }
+
+    if (subgraphNumOutputSlots != substituteSubgraphOutputSlots.size())
+    {
+        throw armnn::Exception("subgraph and substitute subgraph output slot sizes must be the same.");
+    }
 
     // Disconnect the sub-graph and replace it with the substitute sub-graph
 
@@ -521,7 +545,10 @@
     for (unsigned int inputSlotIdx = 0; inputSlotIdx < subgraphNumInputSlots; ++inputSlotIdx)
     {
         IInputSlot* subgraphInputSlot = subgraphInputSlots.at(inputSlotIdx);
-        ARMNN_ASSERT(subgraphInputSlot);
+        if (!subgraphInputSlot)
+        {
+            throw armnn::NullPointerException("subgraphInputSlot must not be null.");
+        }
 
         // Only disconnect if the InputSlot has a connection, this might not be the case when
         // dealing with working copies of SubgraphViews
@@ -532,11 +559,19 @@
             InputSlot* inputSlot = PolymorphicDowncast<InputSlot*>(subgraphInputSlot);
             bool isOverridden = inputSlot->IsTensorInfoOverridden();
 
-            ARMNN_ASSERT(connectedOutputSlot);
+            if (!connectedOutputSlot)
+            {
+                throw armnn::NullPointerException("connectedOutputSlot must not be null.");
+            }
+
             connectedOutputSlot->Disconnect(*subgraphInputSlot);
 
             IInputSlot* substituteInputSlot = substituteSubgraphInputSlots.at(inputSlotIdx);
-            ARMNN_ASSERT(substituteInputSlot);
+            if (!substituteInputSlot)
+            {
+                throw armnn::NullPointerException("substituteInputSlot must not be null.");
+            }
+
             connectedOutputSlot->Connect(*substituteInputSlot);
 
             if (isOverridden)
@@ -553,11 +588,17 @@
     {
         auto subgraphOutputSlot =
                 PolymorphicDowncast<OutputSlot*>(subgraphOutputSlots.at(outputSlotIdx));
-        ARMNN_ASSERT(subgraphOutputSlot);
+        if (!subgraphOutputSlot)
+        {
+            throw armnn::NullPointerException("subgraphOutputSlot must not be null.");
+        }
 
         auto substituteOutputSlot =
                 PolymorphicDowncast<OutputSlot*>(substituteSubgraphOutputSlots.at(outputSlotIdx));
-        ARMNN_ASSERT(substituteOutputSlot);
+        if (!substituteOutputSlot)
+        {
+            throw armnn::NullPointerException("substituteOutputSlot must not be null.");
+        }
 
         subgraphOutputSlot->MoveAllConnections(*substituteOutputSlot);
     }
diff --git a/src/armnn/Graph.hpp b/src/armnn/Graph.hpp
index aa543c1..5999588 100644
--- a/src/armnn/Graph.hpp
+++ b/src/armnn/Graph.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #pragma once
@@ -126,8 +126,15 @@
             otherLayer->Reparent(*this, m_Layers.end());
         });
 
-        ARMNN_ASSERT(other.m_PosInGraphMap.empty());
-        ARMNN_ASSERT(other.m_Layers.empty());
+        if (!other.m_PosInGraphMap.empty())
+        {
+            throw armnn::Exception("assignment positions in graph map must be empty.");
+        }
+
+        if (!other.m_Layers.empty())
+        {
+            throw armnn::Exception("assignment layers must be empty.");
+        }
 
         return *this;
     }
@@ -336,8 +343,10 @@
         graph.m_Layers.erase(layerIt);
 
         const size_t numErased = graph.m_PosInGraphMap.erase(this);
-        IgnoreUnused(numErased);
-        ARMNN_ASSERT(numErased == 1);
+        if (numErased != 1)
+        {
+            throw armnn::Exception("numErased must be \"1\".");
+        }
     }
 
 protected:
@@ -415,7 +424,6 @@
     {
         const size_t numErased = m_Graph->m_InputIds.erase(GetBindingId());
         IgnoreUnused(numErased);
-        ARMNN_ASSERT(numErased == 1);
     }
 };
 
@@ -441,14 +449,16 @@
     {
         const size_t numErased = m_Graph->m_OutputIds.erase(GetBindingId());
         IgnoreUnused(numErased);
-        ARMNN_ASSERT(numErased == 1);
     }
 };
 
 inline Graph::Iterator Graph::GetPosInGraph(Layer& layer)
 {
     auto it = m_PosInGraphMap.find(&layer);
-    ARMNN_ASSERT(it != m_PosInGraphMap.end());
+    if (it == m_PosInGraphMap.end())
+    {
+        throw armnn::Exception("unable to find layer in graph map.");
+    }
     return it->second;
 }
 
@@ -491,7 +501,10 @@
     const Iterator pos = std::next(GetPosInGraph(owningLayer));
     LayerT* const layer = new LayerInGraph<LayerT>(*this, pos, std::forward<Args>(args)...);
 
-    ARMNN_ASSERT(layer->GetNumInputSlots() == 1);
+    if (layer->GetNumInputSlots() != 1)
+    {
+        throw armnn::Exception("layer must only one input slot.");
+    }
 
     insertAfter.MoveAllConnections(layer->GetOutputSlot());
     insertAfter.Connect(layer->GetInputSlot(0));
@@ -511,7 +524,11 @@
 template <typename LayerT>
 inline void Graph::EraseLayer(LayerT*& layer)
 {
-    ARMNN_ASSERT(layer != nullptr);
+    if (!layer)
+    {
+        throw armnn::NullPointerException("layer must not be null.");
+    }
+
     EraseLayer(GetPosInGraph(*layer));
     layer = nullptr;
 }
diff --git a/src/armnn/InternalTypes.cpp b/src/armnn/InternalTypes.cpp
index aebc721..4a83802 100644
--- a/src/armnn/InternalTypes.cpp
+++ b/src/armnn/InternalTypes.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -18,7 +18,7 @@
       LIST_OF_LAYER_TYPE
 #undef X
         default:
-            ARMNN_ASSERT_MSG(false, "Unknown layer type");
+            throw armnn::InvalidArgumentException("Unknown layer type");
             return "Unknown";
     }
 }
diff --git a/src/armnn/Layer.cpp b/src/armnn/Layer.cpp
index d2f8f2c..5a1ec9c 100644
--- a/src/armnn/Layer.cpp
+++ b/src/armnn/Layer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "Layer.hpp"
@@ -33,13 +33,18 @@
         case LayerType::DepthwiseConvolution2d:
         case LayerType::FullyConnected:
         {
-            ARMNN_ASSERT(layer.GetNumInputSlots() == 2 ||
-                         layer.GetNumInputSlots() == 3);
+            if (layer.GetNumInputSlots() != 2 && layer.GetNumInputSlots() != 3)
+            {
+                throw armnn::Exception("layer must have either 2 or 3 input slots.");
+            }
             break;
         }
         default:
         {
-            ARMNN_ASSERT(layer.GetNumInputSlots() == 1);
+            if (layer.GetNumInputSlots() != 1)
+            {
+                throw armnn::Exception("layer must have one input slot.");
+            }
             break;
         }
     }
@@ -47,7 +52,10 @@
 
 void InputSlot::Insert(Layer& layer)
 {
-    ARMNN_ASSERT(layer.GetNumOutputSlots() == 1);
+    if (layer.GetNumOutputSlots() != 1)
+    {
+        throw armnn::Exception("layer must have one output slot.");
+    }
 
     OutputSlot* const prevSlot = GetConnectedOutputSlot();
 
@@ -105,7 +113,10 @@
 
 bool OutputSlot::ValidateTensorShape(const TensorShape& shape) const
 {
-    ARMNN_ASSERT_MSG(IsTensorInfoSet(), "TensorInfo must be set in order to validate the shape.");
+    if (!IsTensorInfoSet())
+    {
+        throw armnn::Exception("TensorInfo must be set in order to validate the shape.");
+    }
     return shape == m_OutputHandler.GetTensorInfo().GetShape();
 }
 
@@ -146,8 +157,10 @@
 {
     while (GetNumConnections() > 0)
     {
-        ARMNN_ASSERT_MSG(m_EdgeStrategies[0] == EdgeStrategy::Undefined,
-            "Cannot move connections once memory strategies have be established.");
+        if (m_EdgeStrategies[0] != EdgeStrategy::Undefined)
+        {
+            throw armnn::Exception("Cannot move connections once memory strategies have be established.");
+        }
 
         InputSlot& connection = *GetConnection(0);
         Disconnect(connection);
@@ -165,7 +178,7 @@
             return i;
         }
     }
-    ARMNN_ASSERT_MSG(false, "Did not find slot on owner.");
+    throw armnn::Exception("Did not find slot on owner.");
     return 0; // Error
 }
 
@@ -257,7 +270,10 @@
     for (auto&& inputSlot : GetInputSlots())
     {
         // The graph must be well-formed at this point.
-        ARMNN_ASSERT(inputSlot.GetConnection());
+        if (!inputSlot.GetConnection())
+        {
+            throw armnn::Exception("input slot must have valid connection.");
+        }
         const OutputHandler& outputHandler = inputSlot.GetConnectedOutputSlot()->GetOutputHandler();
 
         if (inputSlot.IsTensorInfoOverridden() && outputHandler.GetData())
@@ -308,7 +324,10 @@
         {
             ITensorHandleFactory* handleFactory;
             handleFactory = registry.GetFactory(factoryId);
-            ARMNN_ASSERT(handleFactory);
+            if (!handleFactory)
+            {
+                throw armnn::NullPointerException("handleFactory must not be null.");
+            }
             handler.CreateTensorHandles(*handleFactory, IsMemoryManaged);
         }
     }
@@ -390,7 +409,10 @@
 
 void Layer::VerifyLayerConnections(unsigned int expectedConnections, const CheckLocation& location) const
 {
-    ARMNN_ASSERT(GetNumInputSlots() == expectedConnections);
+    if (GetNumInputSlots() != expectedConnections)
+    {
+        throw armnn::Exception("input slots must match expected connections.");
+    }
 
     for (unsigned int i=0; i<expectedConnections; ++i)
     {
@@ -409,8 +431,8 @@
 
 std::vector<TensorShape> Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(GetNumInputSlots() != 0);
-    ARMNN_ASSERT(GetNumOutputSlots() != 0);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(GetNumInputSlots()  != 0, "input slots must not be zero.");
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(GetNumOutputSlots() != 0, "output slots must not be zero.");
 
     // By default we return what we got, meaning the output shape(s) are the same as the input(s).
     // This only works if the number of inputs and outputs are the same. Since we are in the Layer
diff --git a/src/armnn/LoadedNetwork.cpp b/src/armnn/LoadedNetwork.cpp
index 91113c5..d0bafc0 100644
--- a/src/armnn/LoadedNetwork.cpp
+++ b/src/armnn/LoadedNetwork.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -60,7 +60,10 @@
     for (auto&& input : layer.GetInputSlots())
     {
         const IOutputSlot* source = input.GetConnectedOutputSlot();
-        ARMNN_ASSERT(source != NULL);
+        if (!source)
+        {
+            throw armnn::NullPointerException("Null source found on input to layer \"" + layerName + "\".");
+        }
         timelineUtils->CreateConnectionRelationship(ProfilingRelationshipType::RetentionLink,
                                                     source->GetOwningLayerGuid(),
                                                     layer.GetGuid());
@@ -643,7 +646,10 @@
         {
             const auto& outSlot = layer->GetOutputSlots()[0];
             const auto factoryId = outSlot.GetTensorHandleFactoryId();
-            ARMNN_ASSERT(factoryId != ITensorHandleFactory::LegacyFactoryId);
+            if (factoryId == ITensorHandleFactory::LegacyFactoryId)
+            {
+                throw armnn::Exception("factoryId must not be of type \"Legacy\".");
+            }
             auto& workloadFactory = GetWorkloadFactory(*layer);
 
             layer->CreateTensorHandles(m_TensorHandleFactoryRegistry, workloadFactory);
@@ -710,7 +716,11 @@
 {
     for (auto&& inputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetInputLayers())
     {
-        ARMNN_ASSERT_MSG(inputLayer->GetNumOutputSlots() == 1, "Input layer should have exactly 1 output slot");
+        if (inputLayer->GetNumOutputSlots() != 1)
+        {
+            throw armnn::GraphValidationException("Input layer should have exactly 1 output slot");
+        }
+
         if (inputLayer->GetBindingId() == layerId)
         {
             return inputLayer->GetOutputSlot(0).GetTensorInfo();
@@ -724,8 +734,16 @@
 {
     for (auto&& outputLayer : m_OptimizedNetwork->pOptimizedNetworkImpl->GetGraph().GetOutputLayers())
     {
-        ARMNN_ASSERT_MSG(outputLayer->GetNumInputSlots() == 1, "Output layer should have exactly 1 input slot");
-        ARMNN_ASSERT_MSG(outputLayer->GetInputSlot(0).GetConnection(), "Input slot on Output layer must be connected");
+        if (outputLayer->GetNumInputSlots() != 1)
+        {
+            throw armnn::GraphValidationException("Output layer should have exactly 1 input slot");
+        }
+
+        if (!outputLayer->GetInputSlot(0).GetConnection())
+        {
+            throw armnn::GraphValidationException("Input slot on Output layer must be connected");
+        }
+
         if (outputLayer->GetBindingId() == layerId)
         {
             return outputLayer->GetInputSlot(0).GetTensorInfo();
@@ -750,7 +768,10 @@
 
     workloadFactory = it->second.get();
 
-    ARMNN_ASSERT_MSG(workloadFactory, "No workload factory");
+    if (!workloadFactory)
+    {
+        throw armnn::NullPointerException("No workload factory");
+    }
 
     return *workloadFactory;
 }
@@ -962,14 +983,22 @@
                     m_IsOutputImported[outputIndex] = true;
                 }
 
-                ARMNN_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
+                if (!inputTensorHandle)
+                {
+                    throw armnn::NullPointerException("Data should have been allocated.");
+                }
+
                 MemSyncQueueDescriptor syncDesc;
                 syncDesc.m_Inputs.push_back(inputTensorHandle);
                 WorkloadInfo info;
-                info.m_InputTensorInfos.push_back(
-                        outputLayer->GetInputSlot(0).GetTensorInfo());
+                info.m_InputTensorInfos.push_back(outputLayer->GetInputSlot(0).GetTensorInfo());
+
                 auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
-                ARMNN_ASSERT_MSG(syncWorkload, "No sync workload created");
+                if (!syncWorkload)
+                {
+                    throw armnn::NullPointerException("No sync workload created");
+                }
+
                 m_OutputQueue.push_back(std::move(syncWorkload));
                 importedOutputIdIndex++;
             }
@@ -1058,12 +1087,20 @@
     inputQueueDescriptor.m_Inputs.push_back(tensorHandle);
     info.m_InputTensorInfos.push_back(tensorInfo);
 
-    ARMNN_ASSERT_MSG(layer.GetNumOutputSlots() == 1, "Can only handle Input Layer with one output");
+    if (layer.GetNumOutputSlots() != 1)
+    {
+        throw armnn::GraphValidationException("Can only handle Input Layer with one output");
+    }
+
     const OutputHandler& handler = layer.GetOutputHandler();
     const TensorInfo& outputTensorInfo = handler.GetTensorInfo();
     ITensorHandle* outputTensorHandle = handler.GetData();
-    ARMNN_ASSERT_MSG(outputTensorHandle != nullptr,
-                     "Data should have been allocated.");
+
+    if (!outputTensorHandle)
+    {
+        throw armnn::NullPointerException("Data should have been allocated.");
+    }
+
     inputQueueDescriptor.m_Outputs.push_back(outputTensorHandle);
     info.m_OutputTensorInfos.push_back(outputTensorInfo);
 
@@ -1090,7 +1127,10 @@
         // Create a mem copy workload for input since we did not import
         std::unique_ptr<IWorkload> inputWorkload = std::make_unique<CopyMemGenericWorkload>(inputQueueDescriptor, info);
 
-        ARMNN_ASSERT_MSG(inputWorkload, "No input workload created");
+        if (!inputWorkload)
+        {
+            throw armnn::NullPointerException("No input workload created");
+        }
 
         std::unique_ptr<TimelineUtilityMethods> timelineUtils =
                             TimelineUtilityMethods::GetTimelineUtils(*m_ProfilingService);
@@ -1123,14 +1163,20 @@
     outputQueueDescriptor.m_Outputs.push_back(tensorHandle);
     info.m_OutputTensorInfos.push_back(tensorInfo);
 
-    ARMNN_ASSERT_MSG(layer.GetNumInputSlots() == 1, "Output Layer should have exactly one input.");
+    if (layer.GetNumInputSlots() != 1)
+    {
+        throw armnn::GraphValidationException("Output Layer should have exactly one input.");
+    }
 
     // Gets the output handler from the previous node.
     const OutputHandler& outputHandler = layer.GetInputSlots()[0].GetConnectedOutputSlot()->GetOutputHandler();
 
     const TensorInfo& inputTensorInfo = outputHandler.GetTensorInfo();
     ITensorHandle* inputTensorHandle = outputHandler.GetData();
-    ARMNN_ASSERT_MSG(inputTensorHandle != nullptr, "Data should have been allocated.");
+    if (!inputTensorHandle)
+    {
+        throw armnn::NullPointerException("Data should have been allocated.");
+    }
 
     // Try import the output tensor.
     // Note: We can only import the output pointer if all of the following  hold true:
@@ -1160,7 +1206,10 @@
                     syncDesc.m_Inputs.push_back(inputTensorHandle);
                     info.m_InputTensorInfos.push_back(inputTensorInfo);
                     auto syncWorkload = std::make_unique<SyncMemGenericWorkload>(syncDesc, info);
-                    ARMNN_ASSERT_MSG(syncWorkload, "No sync workload created");
+                    if (!syncWorkload)
+                    {
+                        throw armnn::NullPointerException("No sync workload created");
+                    }
                     m_OutputQueue.push_back(std::move(syncWorkload));
                 }
                 else
@@ -1178,7 +1227,10 @@
 
         std::unique_ptr<IWorkload> outputWorkload =
             std::make_unique<CopyMemGenericWorkload>(outputQueueDescriptor, info);
-        ARMNN_ASSERT_MSG(outputWorkload, "No output workload created");
+        if (!outputWorkload)
+        {
+            throw armnn::NullPointerException("No output workload created");
+        }
 
         std::unique_ptr<TimelineUtilityMethods> timelineUtils =
             TimelineUtilityMethods::GetTimelineUtils(*m_ProfilingService);
@@ -1361,7 +1413,11 @@
 // e) m_IsExportEnabled must be set to true
 void LoadedNetwork::ImportOutputTensor(const Tensor& outputTensor, ITensorHandle* outputTensorHandle)
 {
-    ARMNN_ASSERT_MSG(outputTensorHandle != nullptr, "Data should have been allocated.");
+    if (!outputTensorHandle)
+    {
+        throw armnn::NullPointerException("Data should have been allocated.");
+    }
+
     MemorySourceFlags importFlags = outputTensorHandle->GetImportFlags();
     if (CheckFlag(importFlags, m_NetworkProperties.m_OutputSource))
     {
@@ -1534,7 +1590,10 @@
             const TensorInfo& tensorInfo = outputSlot.GetTensorInfo();
 
             ITensorHandleFactory* handleFactory = m_TensorHandleFactoryRegistry.GetFactory(factoryId);
-            ARMNN_ASSERT(handleFactory);
+            if (!handleFactory)
+            {
+                throw armnn::NullPointerException("handleFactory must not be null.");
+            }
 
             ImportedTensorHandlePin importedTensorHandlePin{layerBindingId,
                                                             handleFactory->CreateTensorHandle(tensorInfo, false)};
@@ -1667,7 +1726,10 @@
         const TensorInfo& tensorInfo = inputSlot.GetTensorInfo();
 
         ITensorHandleFactory* handleFactory = m_TensorHandleFactoryRegistry.GetFactory(factoryId);
-        ARMNN_ASSERT(handleFactory);
+        if (!handleFactory)
+        {
+            throw armnn::NullPointerException("handleFactory must not be null.");
+        }
 
         ImportedTensorHandlePin importedTensorHandlePin{layerBindingId,
                                                         handleFactory->CreateTensorHandle(tensorInfo, false)};
@@ -1987,7 +2049,10 @@
         else
         {
             ITensorHandleFactory* handleFactory = m_TensorHandleFactoryRegistry.GetFactory(factoryId);
-            ARMNN_ASSERT(handleFactory);
+            if (!handleFactory)
+            {
+                throw armnn::NullPointerException("handleFactory must not be null.");
+            }
             return handleFactory->CreateTensorHandle(tensorInfo, false);
         }
     };
@@ -2098,7 +2163,11 @@
         // so that the next tensor handle with a non overlapping lifetime can share its memory.
         for (auto& slot : layer->GetInputSlots())
         {
-            ARMNN_ASSERT(slot.GetConnection());
+            if (!slot.GetConnection())
+            {
+                throw armnn::GraphValidationException("slot must be a valid input slot.");
+            }
+
             auto outputSlot = slot.GetConnectedOutputSlot();
             auto key = outputSlot->GetOwningLayer().GetGuid();
 
diff --git a/src/armnn/Logging.cpp b/src/armnn/Logging.cpp
index 73879e6..bceb110 100644
--- a/src/armnn/Logging.cpp
+++ b/src/armnn/Logging.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019,2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -99,7 +99,7 @@
             SimpleLogger<LogSeverity::Fatal>::Get().Enable(true);
             break;
         default:
-            ARMNN_ASSERT(false);
+            throw armnn::InvalidArgumentException("Unknown LoggingSeverity level.");
     }
 }
 
diff --git a/src/armnn/Network.cpp b/src/armnn/Network.cpp
index 1a4fec5..6f33fb6 100644
--- a/src/armnn/Network.cpp
+++ b/src/armnn/Network.cpp
@@ -1039,7 +1039,7 @@
     // Check if the first preferred backend has FP16 support
     auto firstBackend = availablePreferredBackends[0];
     auto backendObjPtr = backends.find(firstBackend)->second.get();
-    ARMNN_ASSERT(backendObjPtr);
+
     auto hasFp16Capability = BackendOptions::BackendOption{"HasFp16", true};
     auto backendCapabilities = backendObjPtr->GetCapabilities();
 
@@ -1158,10 +1158,6 @@
                 // Note: we don't need to log the error as it would already
                 // be logged in AttemptBackendAssignment().
             }
-            else
-            {
-                ARMNN_ASSERT_MSG(res.IsWarningOnly(), "OptimizationResult in unexpected state.");
-            }
         }
     }
 
@@ -1321,7 +1317,6 @@
     {
         auto backendFactory = backendRegistry.GetFactory(selectedBackend);
         auto backendObjPtr = backendFactory();
-        ARMNN_ASSERT(backendObjPtr);
 
         backendObjPtr->RegisterTensorHandleFactories(handleFactoryRegistry);
 
@@ -1337,7 +1332,6 @@
                                              const ModelOptions& modelOptions,
                                              Optional<std::vector<std::string>&> errMessages)
 {
-    ARMNN_ASSERT(optNetObjPtr);
     ARMNN_SCOPED_PROFILING_EVENT(Compute::Undefined, "Optimizer_ApplyBackendOptimizations")
     OptimizationResult result;
 
@@ -1348,7 +1342,10 @@
     for (auto&& selectedBackend : backendSettings.m_SelectedBackends)
     {
         auto backendObjPtr = backends.find(selectedBackend)->second.get();
-        ARMNN_ASSERT(backendObjPtr);
+        if (!backendObjPtr)
+        {
+            throw armnn::NullPointerException("backendObjPtr must not be null.");
+        }
 
         if (selectedBackend == armnn::Compute::GpuAcc || selectedBackend == armnn::Compute::CpuAcc)
         {
@@ -1379,7 +1376,10 @@
             // Try to optimize the current sub-graph
             ARMNN_SCOPED_PROFILING_EVENT(backendObjPtr->GetId(), "Optimizer_OptimizeSubgraph");
             OptimizationViews optimizationViews = backendObjPtr->OptimizeSubgraphView(*subgraph, modelOptions);
-            ARMNN_ASSERT(optimizationViews.Validate(*subgraph));
+            if (!optimizationViews.Validate(*subgraph))
+            {
+                throw armnn::Exception("optimizationViews must have a valid subgraph.");
+            }
 
             // Optimization attempted, check the resulting optimized sub-graph
             for (auto& substitution : optimizationViews.GetSubstitutions())
@@ -1393,7 +1393,6 @@
                 const SubgraphView::IConnectableLayers& subgraphLayers = replacementSubgraph.GetIConnectableLayers();
                 std::for_each(subgraphLayers.begin(), subgraphLayers.end(), [&selectedBackend](IConnectableLayer* l)
                     {
-                        ARMNN_ASSERT(l);
                         PolymorphicDowncast<Layer*>(l)->SetBackendId(selectedBackend);
                     });
             }
@@ -1487,7 +1486,11 @@
                                                             bool importEnabled)
 {
     Layer& layer = slot.GetOwningLayer();
-    ARMNN_ASSERT(layer.GetType() == LayerType::Input);
+
+    if (layer.GetType() != LayerType::Input)
+    {
+        throw armnn::Exception("layer must be of type \"Input\".");
+    }
 
     // Explicitly select the tensorhandle factory for InputLayer because the rules for it are slightly different. It
     // doesn't matter which backend it is assigned to because they all use the same implementation, which
@@ -1514,7 +1517,10 @@
         const Layer& connectedLayer = connection->GetOwningLayer();
 
         auto toBackend = backends.find(connectedLayer.GetBackendId());
-        ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+        if (toBackend == backends.end())
+        {
+            throw armnn::Exception("Backend id not found for the connected layer");
+        }
 
         if (!toBackend->second.get()->SupportsTensorAllocatorAPI())
         {
@@ -1672,7 +1678,10 @@
         const Layer& connectedLayer = connection->GetOwningLayer();
 
         auto toBackend = backends.find(connectedLayer.GetBackendId());
-        ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+        if (toBackend == backends.end())
+        {
+            throw armnn::Exception("Backend id not found for the connected layer");
+        }
 
         auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
         for (auto&& src : srcPrefs)
@@ -1734,7 +1743,10 @@
                                    bool importEnabled)
 {
     auto toBackend = backends.find(connectedLayer.GetBackendId());
-    ARMNN_ASSERT_MSG(toBackend != backends.end(), "Backend id not found for the connected layer");
+    if (toBackend == backends.end())
+    {
+        throw armnn::Exception("Backend id not found for the connected layer");
+    }
 
     auto dstPrefs = toBackend->second.get()->GetHandleFactoryPreferences();
 
@@ -1827,11 +1839,12 @@
 
     optGraph.ForEachLayer([&backends, &registry, &result, &errMessages, importEnabled, exportEnabled](Layer* layer)
     {
-        ARMNN_ASSERT(layer);
-
         // Lets make sure the backend is in our list of supported backends. Something went wrong during backend
         // assignment if this check fails
-        ARMNN_ASSERT(backends.find(layer->GetBackendId()) != backends.end());
+        if (backends.find(layer->GetBackendId()) == backends.end())
+        {
+            throw armnn::Exception("Backend id not found for the layer");
+        }
 
         // Check each output separately
         for (unsigned int slotIdx = 0; slotIdx < layer->GetNumOutputSlots(); slotIdx++)
diff --git a/src/armnn/NetworkUtils.cpp b/src/armnn/NetworkUtils.cpp
index 1d46f02..f711209 100644
--- a/src/armnn/NetworkUtils.cpp
+++ b/src/armnn/NetworkUtils.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022,2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -125,7 +125,11 @@
             graph.InsertNewLayer<DebugLayer>(*outputSlot, debugName.c_str(), toFile);
 
         // Sets output tensor info for the debug layer.
-        ARMNN_ASSERT(debugLayer->GetInputSlot(0).GetConnectedOutputSlot() == &(*outputSlot));
+        if (debugLayer->GetInputSlot(0).GetConnectedOutputSlot() != &(*outputSlot))
+        {
+            throw armnn::Exception("unable to set output tensor info for the debug layer.");
+        }
+
         TensorInfo debugInfo = debugLayer->GetInputSlot(0).GetConnectedOutputSlot()->GetTensorInfo();
 
         debugLayer->GetOutputSlot().SetTensorInfo(debugInfo);
diff --git a/src/armnn/Optimizer.cpp b/src/armnn/Optimizer.cpp
index 1d6a52e..3d1b67e 100644
--- a/src/armnn/Optimizer.cpp
+++ b/src/armnn/Optimizer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "Optimizer.hpp"
@@ -29,7 +29,11 @@
         --it;
         for (auto&& optimization : optimizations)
         {
-            ARMNN_ASSERT(*it);
+            if (!*it)
+            {
+                throw armnn::NullPointerException("Layer must not be null.");
+            }
+
             optimization->Run(graph, **it);
 
             if ((*it)->IsOutputUnconnected())
diff --git a/src/armnn/Profiling.cpp b/src/armnn/Profiling.cpp
index db2962e..78afb05 100644
--- a/src/armnn/Profiling.cpp
+++ b/src/armnn/Profiling.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "Profiling.hpp"
@@ -43,7 +43,7 @@
 Measurement FindMeasurement(const std::string& name, const Event* event)
 {
 
-    ARMNN_ASSERT(event != nullptr);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(event, "event should not be null.");
 
     // Search though the measurements.
     for (const auto& measurement : event->GetMeasurements())
@@ -61,7 +61,7 @@
 
 std::vector<Measurement> FindKernelMeasurements(const Event* event)
 {
-    ARMNN_ASSERT(event != nullptr);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(event, "event should not be null.");
 
     std::vector<Measurement> measurements;
 
@@ -230,13 +230,24 @@
 {
     event->Stop();
 
-    ARMNN_ASSERT(!m_Parents.empty());
-    ARMNN_ASSERT(event == m_Parents.top());
+    if (!!m_Parents.empty())
+    {
+        throw armnn::Exception("m_Parents must not be empty.");
+    }
+
+    if (event != m_Parents.top())
+    {
+        throw armnn::Exception("event must match the top of m_Parents.");
+    }
+
     m_Parents.pop();
 
     Event* parent = m_Parents.empty() ? nullptr : m_Parents.top();
-    IgnoreUnused(parent);
-    ARMNN_ASSERT(event->GetParentEvent() == parent);
+
+    if (event->GetParentEvent() != parent)
+    {
+        throw armnn::Exception("parent events must match.");
+    }
 
 #if ARMNN_STREAMLINE_ENABLED
     ANNOTATE_CHANNEL_END(uint32_t(m_Parents.size()));
@@ -305,7 +316,7 @@
                         JsonChildObject& parentObject,
                         std::map<const Event*, std::vector<const Event*>> descendantsMap)
 {
-    ARMNN_ASSERT(parentEvent);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(parentEvent, "parentEvent must not be null.");
 
     // If profiling GUID is entered, process it
     if (parentEvent->GetProfilingGuid().has_value())
@@ -339,7 +350,10 @@
             measurementObject.SetUnit(instrumentMeasurements[measurementIndex].m_Unit);
             measurementObject.SetType(JsonObjectType::Measurement);
 
-            ARMNN_ASSERT(parentObject.NumChildren() == childIdx);
+            if (parentObject.NumChildren() != childIdx)
+            {
+                throw armnn::Exception("parentObject must have the same number of children as childIdx.");
+            }
             parentObject.AddChild(measurementObject);
         }
         else
diff --git a/src/armnn/Runtime.cpp b/src/armnn/Runtime.cpp
index b0fc550..a8f1eb7 100644
--- a/src/armnn/Runtime.cpp
+++ b/src/armnn/Runtime.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017, 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -348,10 +348,13 @@
         // Store backend contexts for the supported ones
         try {
             auto factoryFun = BackendRegistryInstance().GetFactory(id);
-            ARMNN_ASSERT(factoryFun != nullptr);
+
+            if (!factoryFun)
+            {
+                throw armnn::NullPointerException("Factory Function should not be null.");
+            }
+
             auto backend = factoryFun();
-            ARMNN_ASSERT(backend != nullptr);
-            ARMNN_ASSERT(backend.get() != nullptr);
 
             auto customAllocatorMapIterator = options.m_CustomAllocatorMap.find(id);
             if (customAllocatorMapIterator != options.m_CustomAllocatorMap.end() &&
diff --git a/src/armnn/SubgraphView.cpp b/src/armnn/SubgraphView.cpp
index 4259c4f..3ede181 100644
--- a/src/armnn/SubgraphView.cpp
+++ b/src/armnn/SubgraphView.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017, 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017, 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -27,14 +27,17 @@
     std::unordered_set<T> duplicateSet;
     std::for_each(container.begin(), container.end(), [&duplicateSet, &errorMessage](const T& i)
     {
-        // Ignore unused for release builds
-        IgnoreUnused(errorMessage);
-
         // Check if the item is valid
-        ARMNN_ASSERT_MSG(i, errorMessage.c_str());
+        if (!i)
+        {
+            throw armnn::GraphValidationException(errorMessage.c_str());
+        }
 
         // Check if a duplicate has been found
-        ARMNN_ASSERT_MSG(duplicateSet.find(i) == duplicateSet.end(), errorMessage.c_str());
+        if (duplicateSet.find(i) != duplicateSet.end())
+        {
+            throw armnn::GraphValidationException(errorMessage.c_str());
+        }
 
         duplicateSet.insert(i);
     });
@@ -493,7 +496,8 @@
 
 void SubgraphView::SubstituteSubgraph(SubgraphView& subgraph, IConnectableLayer* substituteLayer)
 {
-    ARMNN_ASSERT(substituteLayer != nullptr);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(substituteLayer, "substituteLayer should not be null");
+
     SubgraphView substituteSubgraph(substituteLayer);
 
     SubstituteSubgraph(subgraph, substituteSubgraph);
diff --git a/src/armnn/SubgraphViewSelector.cpp b/src/armnn/SubgraphViewSelector.cpp
index 9fa8252..6a134a3 100644
--- a/src/armnn/SubgraphViewSelector.cpp
+++ b/src/armnn/SubgraphViewSelector.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017, 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -81,14 +81,19 @@
             for (PartialSubgraph* a : m_Antecedents)
             {
                 size_t numErased = a->m_Dependants.erase(this);
-                ARMNN_ASSERT(numErased == 1);
-                IgnoreUnused(numErased);
+                if (numErased != 1)
+                {
+                    throw armnn::Exception("number of dependents erased must only be 1.");
+                }
                 a->m_Dependants.insert(m_Parent);
             }
             for (PartialSubgraph* a : m_Dependants)
             {
                 size_t numErased = a->m_Antecedents.erase(this);
-                ARMNN_ASSERT(numErased == 1);
+                if (numErased != 1)
+                {
+                    throw armnn::Exception("number of antecedents erased must only be 1.");
+                }
                 IgnoreUnused(numErased);
                 a->m_Antecedents.insert(m_Parent);
             }
@@ -200,7 +205,12 @@
              ++slot)
         {
             OutputSlot* parentLayerOutputSlot = slot->GetConnectedOutputSlot();
-            ARMNN_ASSERT_MSG(parentLayerOutputSlot != nullptr, "The input slots must be connected here.");
+
+            if (!parentLayerOutputSlot)
+            {
+                throw armnn::NullPointerException("The input slots must be connected here.");
+            }
+
             if (parentLayerOutputSlot)
             {
                 Layer& parentLayer = parentLayerOutputSlot->GetOwningLayer();
@@ -273,7 +283,10 @@
     for (auto inputSlot : layer.GetInputSlots())
     {
         auto connectedInput = PolymorphicDowncast<OutputSlot*>(inputSlot.GetConnection());
-        ARMNN_ASSERT_MSG(connectedInput, "Dangling input slot detected.");
+        if (!connectedInput)
+        {
+            throw armnn::Exception("Dangling input slot detected.");
+        }
         Layer& inputLayer = connectedInput->GetOwningLayer();
 
         auto parentInfo = layerInfos.find(&inputLayer);
diff --git a/src/armnn/Tensor.cpp b/src/armnn/Tensor.cpp
index ab4ecc9..3b116d9 100644
--- a/src/armnn/Tensor.cpp
+++ b/src/armnn/Tensor.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017,2024 Arm Ltd. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
diff --git a/src/armnn/layers/AbsLayer.cpp b/src/armnn/layers/AbsLayer.cpp
index 6858b36..67654ac 100644
--- a/src/armnn/layers/AbsLayer.cpp
+++ b/src/armnn/layers/AbsLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -42,7 +42,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "AbsLayer");
 }
@@ -52,4 +57,4 @@
     strategy.ExecuteStrategy(this, GetParameters(), {}, GeName());
 }
 
-} // namespace armnn
\ No newline at end of file
+} // namespace armnn
diff --git a/src/armnn/layers/ActivationLayer.cpp b/src/armnn/layers/ActivationLayer.cpp
index fe4aaa7..999415d 100644
--- a/src/armnn/layers/ActivationLayer.cpp
+++ b/src/armnn/layers/ActivationLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "ActivationLayer.hpp"
@@ -40,7 +40,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ActivationLayer");
 }
diff --git a/src/armnn/layers/ArgMinMaxLayer.cpp b/src/armnn/layers/ArgMinMaxLayer.cpp
index 3798657..537d7d1 100644
--- a/src/armnn/layers/ArgMinMaxLayer.cpp
+++ b/src/armnn/layers/ArgMinMaxLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -36,7 +36,11 @@
 
 std::vector<TensorShape> ArgMinMaxLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                                              "\" - should be \"1\".");
+    }
 
     TensorShape inputShape = inputShapes[0];
     auto inputNumDimensions = inputShape.GetNumDimensions();
@@ -44,7 +48,13 @@
     auto axis = m_Param.m_Axis;
     auto unsignedAxis = armnnUtils::GetUnsignedAxis(inputNumDimensions, axis);
 
-    ARMNN_ASSERT(unsignedAxis <= inputNumDimensions);
+    if (unsignedAxis > inputNumDimensions)
+    {
+        throw armnn::LayerValidationException("Axis must not be greater than number of input dimensions (\""
+                                              + std::to_string(unsignedAxis) +
+                                              "\" vs \""
+                                              + std::to_string(inputNumDimensions) + "\").");
+    }
 
     // 1D input shape results in scalar output
     if (inputShape.GetNumDimensions() == 1)
@@ -81,7 +91,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ArgMinMaxLayer");
 }
diff --git a/src/armnn/layers/BatchMatMulLayer.cpp b/src/armnn/layers/BatchMatMulLayer.cpp
index 8b2629c..cafb051 100644
--- a/src/armnn/layers/BatchMatMulLayer.cpp
+++ b/src/armnn/layers/BatchMatMulLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "BatchMatMulLayer.hpp"
@@ -32,7 +32,11 @@
 
 std::vector<TensorShape> BatchMatMulLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::LayerValidationException("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                                              "\" - should be \"2\".");
+    }
 
     TensorShape inputXShape = inputShapes[0];
     TensorShape inputYShape = inputShapes[1];
@@ -102,9 +106,14 @@
         GetInputSlot(0).GetTensorInfo().GetShape(),
         GetInputSlot(1).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchMatMulLayer");
 }
 
-} // namespace armnn
\ No newline at end of file
+} // namespace armnn
diff --git a/src/armnn/layers/BatchNormalizationLayer.cpp b/src/armnn/layers/BatchNormalizationLayer.cpp
index 17463f8..9936041 100644
--- a/src/armnn/layers/BatchNormalizationLayer.cpp
+++ b/src/armnn/layers/BatchNormalizationLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "BatchNormalizationLayer.hpp"
@@ -21,10 +21,25 @@
 std::unique_ptr<IWorkload> BatchNormalizationLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
     // on this level constant data should not be released..
-    ARMNN_ASSERT_MSG(m_Mean != nullptr, "BatchNormalizationLayer: Mean data should not be null.");
-    ARMNN_ASSERT_MSG(m_Variance != nullptr, "BatchNormalizationLayer: Variance data should not be null.");
-    ARMNN_ASSERT_MSG(m_Beta != nullptr, "BatchNormalizationLayer: Beta data should not be null.");
-    ARMNN_ASSERT_MSG(m_Gamma != nullptr, "BatchNormalizationLayer: Gamma data should not be null.");
+    if (!m_Mean)
+    {
+        throw armnn::NullPointerException("BatchNormalizationLayer: Mean data should not be null.");
+    }
+
+    if (!m_Variance)
+    {
+        throw armnn::NullPointerException("BatchNormalizationLayer: Variance data should not be null.");
+    }
+
+    if (!m_Beta)
+    {
+        throw armnn::NullPointerException("BatchNormalizationLayer: Beta data should not be null.");
+    }
+
+    if (!m_Gamma)
+    {
+        throw armnn::NullPointerException("BatchNormalizationLayer: Gamma data should not be null.");
+    }
 
     BatchNormalizationQueueDescriptor descriptor;
     SetAdditionalInfo(descriptor);
@@ -59,7 +74,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchNormalizationLayer");
 
diff --git a/src/armnn/layers/BatchToSpaceNdLayer.cpp b/src/armnn/layers/BatchToSpaceNdLayer.cpp
index 63817dd..9f60450 100644
--- a/src/armnn/layers/BatchToSpaceNdLayer.cpp
+++ b/src/armnn/layers/BatchToSpaceNdLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -45,7 +45,12 @@
 
     auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape()});
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "BatchToSpaceNdLayer");
 }
diff --git a/src/armnn/layers/CastLayer.cpp b/src/armnn/layers/CastLayer.cpp
index fc1ab81..8dff6ba 100644
--- a/src/armnn/layers/CastLayer.cpp
+++ b/src/armnn/layers/CastLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "CastLayer.hpp"
@@ -41,7 +41,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "CastLayer");
 }
diff --git a/src/armnn/layers/ChannelShuffleLayer.cpp b/src/armnn/layers/ChannelShuffleLayer.cpp
index ce6c0ba..b05f63c 100644
--- a/src/armnn/layers/ChannelShuffleLayer.cpp
+++ b/src/armnn/layers/ChannelShuffleLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -42,9 +42,14 @@
 
     auto inferredShapes = Layer::InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ChannelShuffleLayer");
 }
 
-}
\ No newline at end of file
+}
diff --git a/src/armnn/layers/ComparisonLayer.cpp b/src/armnn/layers/ComparisonLayer.cpp
index 5d18a58..dc5437b 100644
--- a/src/armnn/layers/ComparisonLayer.cpp
+++ b/src/armnn/layers/ComparisonLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -35,7 +35,12 @@
 
 std::vector<TensorShape> ComparisonLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     TensorShape input0 = inputShapes[0];
     TensorShape input1 = inputShapes[1];
 
@@ -55,8 +60,10 @@
         unsigned int dim1 = input1[i - shiftedDims];
 
         // Validate inputs are broadcast compatible.
-        ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
-                         "Dimensions should either match or one should be of size 1.");
+        if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
+        {
+            throw armnn::Exception("Dimensions should either match or one should be of size 1.");
+        }
 
         dims[i] = std::max(dim0, dim1);
     }
@@ -82,7 +89,13 @@
         GetInputSlot(0).GetTensorInfo().GetShape(),
         GetInputSlot(1).GetTensorInfo().GetShape()
     });
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ComparisonLayer");
 }
diff --git a/src/armnn/layers/ConcatLayer.cpp b/src/armnn/layers/ConcatLayer.cpp
index 4629bf2..021e736 100644
--- a/src/armnn/layers/ConcatLayer.cpp
+++ b/src/armnn/layers/ConcatLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "ConcatLayer.hpp"
@@ -164,7 +164,11 @@
                 OutputSlot* slot = currentLayer->GetInputSlot(i).GetConnectedOutputSlot();
                 OutputHandler& outputHandler = slot->GetOutputHandler();
 
-                ARMNN_ASSERT_MSG(subTensor, "ConcatLayer: Expected a valid sub-tensor for substitution.");
+                if (!subTensor)
+                {
+                    throw armnn::Exception("ConcatLayer: Expected a valid sub-tensor for substitution.");
+                }
+
                 outputHandler.SetData(std::move(subTensor));
 
                 Layer& inputLayer = slot->GetOwningLayer();
@@ -193,7 +197,10 @@
     else
     {
         ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
-        ARMNN_ASSERT(handleFactory);
+        if (!handleFactory)
+        {
+            throw armnn::NullPointerException("handleFactory is returning a nullptr.");
+        }
         CreateTensors(registry, *handleFactory, isMemoryManaged);
     }
 }
@@ -205,7 +212,13 @@
 
 std::vector<TensorShape> ConcatLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == m_Param.GetNumViews());
+    if (inputShapes.size() != m_Param.GetNumViews())
+    {
+        throw armnn::Exception("inputShapes' and m_NumViews' sizes do not match (\""
+                               + std::to_string(inputShapes.size()) +
+                               "\" vs \""
+                               + std::to_string(m_Param.GetNumViews()) + "\")");
+    }
 
     unsigned int numDims = m_Param.GetNumDimensions();
     for (unsigned int i=0; i< inputShapes.size(); i++)
@@ -315,7 +328,12 @@
 
     auto inferredShapes = InferOutputShapes(inputShapes);
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::Exception("inferredShapes has "
+                               + std::to_string(inferredShapes.size()) +
+                               " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConcatLayer");
 }
diff --git a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
index 9fefe20..d0b00cb 100644
--- a/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
+++ b/src/armnn/layers/ConvertFp16ToFp32Layer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -42,7 +42,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ConvertFp16ToFp32Layer");
 }
diff --git a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
index f1abba3..898ef30 100644
--- a/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
+++ b/src/armnn/layers/ConvertFp32ToFp16Layer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "ConvertFp32ToFp16Layer.hpp"
@@ -42,7 +42,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LayerName");
 }
diff --git a/src/armnn/layers/Convolution2dLayer.cpp b/src/armnn/layers/Convolution2dLayer.cpp
index df971a5..2fcc4aa 100644
--- a/src/armnn/layers/Convolution2dLayer.cpp
+++ b/src/armnn/layers/Convolution2dLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -63,15 +63,30 @@
 
 std::vector<TensorShape> Convolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     const TensorShape& inputShape = inputShapes[0];
     const TensorShape filterShape = inputShapes[1];
 
     // If we support multiple batch dimensions in the future, then this assert will need to change.
-    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+    if (inputShape.GetNumDimensions() != 4)
+    {
+        throw armnn::Exception("Convolutions will always have 4D input.");
+    }
 
-    ARMNN_ASSERT( m_Param.m_StrideX > 0);
-    ARMNN_ASSERT( m_Param.m_StrideY > 0);
+    if (m_Param.m_StrideX == 0)
+    {
+        throw armnn::Exception("m_StrideX cannot be 0.");
+    }
+
+    if (m_Param.m_StrideY == 0)
+    {
+        throw armnn::Exception("m_StrideY cannot be 0.");
+    }
 
     DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
 
@@ -107,14 +122,21 @@
 
     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
-    ARMNN_ASSERT_MSG(GetInputSlot(1).GetConnection(),
-                     "Convolution2dLayer: Weights should be connected to input slot 1.");
+    if (!GetInputSlot(1).GetConnection())
+    {
+        throw armnn::NullPointerException("Convolution2dLayer: Weights should be connected to input slot 1.");
+    }
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
              GetInputSlot(0).GetTensorInfo().GetShape(),
              GetInputSlot(1).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::Exception("inferredShapes has "
+                               + std::to_string(inferredShapes.size()) +
+                               " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution2dLayer");
 }
diff --git a/src/armnn/layers/Convolution3dLayer.cpp b/src/armnn/layers/Convolution3dLayer.cpp
index 2d697be..89ea004 100644
--- a/src/armnn/layers/Convolution3dLayer.cpp
+++ b/src/armnn/layers/Convolution3dLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -61,15 +61,34 @@
 
 std::vector<TensorShape> Convolution3dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     const TensorShape& inputShape = inputShapes[0];
     const TensorShape& filterShape = inputShapes[1];
 
-    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 5, "Convolutions will always have 5D input.");
+    if (inputShape.GetNumDimensions() != 5)
+    {
+        throw armnn::Exception("Convolutions will always have 5D input.");
+    }
 
-    ARMNN_ASSERT( m_Param.m_StrideX > 0);
-    ARMNN_ASSERT( m_Param.m_StrideY > 0);
-    ARMNN_ASSERT( m_Param.m_StrideZ > 0);
+    if (m_Param.m_StrideX == 0)
+    {
+        throw armnn::Exception("m_StrideX cannot be 0.");
+    }
+
+    if (m_Param.m_StrideY == 0)
+    {
+        throw armnn::Exception("m_StrideY cannot be 0.");
+    }
+
+    if (m_Param.m_StrideZ == 0)
+    {
+        throw armnn::Exception("m_StrideZ cannot be 0.");
+    }
 
     DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
 
@@ -112,14 +131,21 @@
 
     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
-    ARMNN_ASSERT_MSG(GetInputSlot(1).GetConnection(),
-                     "Convolution3dLayer: Weights should be connected to input slot 1.");
+    if (!GetInputSlot(1).GetConnection())
+    {
+        throw armnn::LayerValidationException("Convolution3dLayer: Weights should be connected to input slot 1.");
+    }
 
     auto inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetTensorInfo().GetShape(),
         GetInputSlot(1).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Convolution3dLayer");
 }
diff --git a/src/armnn/layers/DebugLayer.cpp b/src/armnn/layers/DebugLayer.cpp
index 01c1c7b..ca8215d 100644
--- a/src/armnn/layers/DebugLayer.cpp
+++ b/src/armnn/layers/DebugLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "DebugLayer.hpp"
@@ -53,7 +53,12 @@
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::Exception("inferredShapes has "
+                               + std::to_string(inferredShapes.size()) +
+                               " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DebugLayer");
 }
diff --git a/src/armnn/layers/DepthToSpaceLayer.cpp b/src/armnn/layers/DepthToSpaceLayer.cpp
index b94eccc..b303474 100644
--- a/src/armnn/layers/DepthToSpaceLayer.cpp
+++ b/src/armnn/layers/DepthToSpaceLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -40,7 +40,11 @@
 
 std::vector<TensorShape> DepthToSpaceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
 
     TensorShape inputShape = inputShapes[0];
     TensorShape outputShape(inputShape);
@@ -70,7 +74,12 @@
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthToSpaceLayer");
 }
diff --git a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
index 4f08b23..69c3d38 100644
--- a/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
+++ b/src/armnn/layers/DepthwiseConvolution2dLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -64,14 +64,30 @@
 std::vector<TensorShape>
 DepthwiseConvolution2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     const TensorShape& inputShape  = inputShapes[0];
     const TensorShape& filterShape = inputShapes[1];
 
-    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Convolutions will always have 4D input.");
+    if (inputShape.GetNumDimensions() != 4)
+    {
+        throw armnn::Exception("Convolutions will always have 4D input.");
+    }
 
-    ARMNN_ASSERT( m_Param.m_StrideX > 0);
-    ARMNN_ASSERT( m_Param.m_StrideY > 0);
+    if (m_Param.m_StrideX == 0)
+    {
+        throw armnn::Exception("m_StrideX cannot be 0.");
+    }
+
+    if (m_Param.m_StrideY == 0)
+    {
+        throw armnn::Exception("m_StrideY cannot be 0.");
+    }
+
 
     DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
 
@@ -110,15 +126,22 @@
 
     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
-    ARMNN_ASSERT_MSG(GetInputSlot(1).GetConnection(),
-                     "DepthwiseConvolution2dLayer: Weights data should not be null.");
+    if (!GetInputSlot(1).GetConnection())
+    {
+        throw armnn::LayerValidationException("DepthwiseConvolution2dLayer: Weights data should not be null.");
+    }
 
     auto inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetTensorInfo().GetShape(),
         GetInputSlot(1).GetTensorInfo().GetShape()
     });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DepthwiseConvolution2dLayer");
 }
diff --git a/src/armnn/layers/DequantizeLayer.cpp b/src/armnn/layers/DequantizeLayer.cpp
index b398cf6..79ab969 100644
--- a/src/armnn/layers/DequantizeLayer.cpp
+++ b/src/armnn/layers/DequantizeLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "DequantizeLayer.hpp"
@@ -41,7 +41,12 @@
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DequantizeLayer");
 }
diff --git a/src/armnn/layers/DetectionPostProcessLayer.cpp b/src/armnn/layers/DetectionPostProcessLayer.cpp
index 6bddf51..f71f72a 100644
--- a/src/armnn/layers/DetectionPostProcessLayer.cpp
+++ b/src/armnn/layers/DetectionPostProcessLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -45,19 +45,33 @@
     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     // on this level constant data should not be released.
-    ARMNN_ASSERT_MSG(m_Anchors != nullptr, "DetectionPostProcessLayer: Anchors data should not be null.");
+    if (!m_Anchors)
+    {
+        throw armnn::LayerValidationException("DetectionPostProcessLayer: Anchors data should not be null.");
+    }
 
-    ARMNN_ASSERT_MSG(GetNumOutputSlots() == 4, "DetectionPostProcessLayer: The layer should return 4 outputs.");
+    if (GetNumOutputSlots() != 4)
+    {
+        throw armnn::LayerValidationException("DetectionPostProcessLayer: The layer should return 4 outputs.");
+    }
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes(
             { GetInputSlot(0).GetTensorInfo().GetShape(),
               GetInputSlot(1).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 4);
-    ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
-    ARMNN_ASSERT(inferredShapes[1].GetDimensionality() == Dimensionality::Specified);
-    ARMNN_ASSERT(inferredShapes[2].GetDimensionality() == Dimensionality::Specified);
-    ARMNN_ASSERT(inferredShapes[3].GetDimensionality() == Dimensionality::Specified);
+    if (inferredShapes.size() != 4)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " element(s) - should only have 4.");
+    }
+
+    if (std::any_of(inferredShapes.begin(), inferredShapes.end(), [] (auto&& inferredShape) {
+        return inferredShape.GetDimensionality() != Dimensionality::Specified;
+    }))
+    {
+        throw armnn::Exception("One of inferredShapes' dimensionalities is not specified.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "DetectionPostProcessLayer");
 
diff --git a/src/armnn/layers/ElementwiseBaseLayer.cpp b/src/armnn/layers/ElementwiseBaseLayer.cpp
index 3cbddfa..e813f48 100644
--- a/src/armnn/layers/ElementwiseBaseLayer.cpp
+++ b/src/armnn/layers/ElementwiseBaseLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2018,2020-2021,2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2018,2020-2021,2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -22,7 +22,12 @@
 
 std::vector<TensorShape> ElementwiseBaseLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     TensorShape input0 = inputShapes[0];
     TensorShape input1 = inputShapes[1];
 
@@ -43,8 +48,10 @@
         unsigned int dim1 = input1[i - shiftedDims];
 
         // Validate inputs are broadcast compatible.
-        ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
-                         "Dimensions should either match or one should be of size 1.");
+        if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
+        {
+            throw armnn::Exception("Dimensions should either match or one should be of size 1.");
+        }
 
         dims[i] = std::max(dim0, dim1);
     }
@@ -69,7 +76,12 @@
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape(),
                                               GetInputSlot(1).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
 }
diff --git a/src/armnn/layers/ElementwiseBinaryLayer.cpp b/src/armnn/layers/ElementwiseBinaryLayer.cpp
index 67619fc..5459aaf 100644
--- a/src/armnn/layers/ElementwiseBinaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseBinaryLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -30,7 +30,12 @@
 
 std::vector<TensorShape> ElementwiseBinaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     TensorShape input0 = inputShapes[0];
     TensorShape input1 = inputShapes[1];
 
@@ -51,8 +56,10 @@
         unsigned int dim1 = input1[i - shiftedDims];
 
         // Validate inputs are broadcast compatible.
-        ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
-                         "Dimensions should either match or one should be of size 1.");
+        if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
+        {
+            throw armnn::Exception("Dimensions should either match or one should be of size 1.");
+        }
 
         dims[i] = std::max(dim0, dim1);
     }
@@ -77,7 +84,12 @@
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape(),
                                               GetInputSlot(1).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
 }
diff --git a/src/armnn/layers/ElementwiseUnaryLayer.cpp b/src/armnn/layers/ElementwiseUnaryLayer.cpp
index c648f9b..791a3d5 100644
--- a/src/armnn/layers/ElementwiseUnaryLayer.cpp
+++ b/src/armnn/layers/ElementwiseUnaryLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -34,7 +34,12 @@
 std::vector<TensorShape> ElementwiseUnaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
     // Should return the shape of the input tensor
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
+
     const TensorShape& input = inputShapes[0];
 
     return std::vector<TensorShape>({ input });
@@ -50,7 +55,13 @@
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetTensorInfo().GetShape()});
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::Exception("inferredShapes has "
+                               + std::to_string(inferredShapes.size()) +
+                               " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, GetLayerTypeAsCString(GetType()));
 }
diff --git a/src/armnn/layers/FakeQuantizationLayer.cpp b/src/armnn/layers/FakeQuantizationLayer.cpp
index a612b5a..bb9e6a4 100644
--- a/src/armnn/layers/FakeQuantizationLayer.cpp
+++ b/src/armnn/layers/FakeQuantizationLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "FakeQuantizationLayer.hpp"
@@ -41,7 +41,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::Exception("inferredShapes has "
+                               + std::to_string(inferredShapes.size()) +
+                               " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FakeQuantizationLayer");
 }
diff --git a/src/armnn/layers/FillLayer.cpp b/src/armnn/layers/FillLayer.cpp
index af01b99..c40efb3 100644
--- a/src/armnn/layers/FillLayer.cpp
+++ b/src/armnn/layers/FillLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "FillLayer.hpp"
@@ -41,7 +41,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::Exception("inferredShapes has "
+                               + std::to_string(inferredShapes.size()) +
+                               " elements - should only have 1.");
+    }
 
     // Cannot validate the output shape from the input shape. but we can validate that the correct dims have been
     // inferred
diff --git a/src/armnn/layers/FloorLayer.cpp b/src/armnn/layers/FloorLayer.cpp
index 2db8d91..1177b93 100644
--- a/src/armnn/layers/FloorLayer.cpp
+++ b/src/armnn/layers/FloorLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "FloorLayer.hpp"
@@ -40,7 +40,13 @@
     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::Exception("inferredShapes has "
+                               + std::to_string(inferredShapes.size()) +
+                               " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FloorLayer");
 }
diff --git a/src/armnn/layers/FullyConnectedLayer.cpp b/src/armnn/layers/FullyConnectedLayer.cpp
index 87a8ada..5b6b2a3 100644
--- a/src/armnn/layers/FullyConnectedLayer.cpp
+++ b/src/armnn/layers/FullyConnectedLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "FullyConnectedLayer.hpp"
@@ -34,7 +34,12 @@
 
 std::vector<TensorShape> FullyConnectedLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     const TensorShape& inputShape = inputShapes[0];
     const TensorShape weightShape = inputShapes[1];
 
@@ -55,8 +60,17 @@
             {GetInputSlot(0).GetTensorInfo().GetShape(),
              GetInputSlot(1).GetTensorInfo().GetShape()});
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
-    ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
+
+    if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified)
+    {
+        throw armnn::LayerValidationException("inferredShapes' dimensionality has not been specified.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "FullyConnectedLayer");
 }
diff --git a/src/armnn/layers/GatherLayer.cpp b/src/armnn/layers/GatherLayer.cpp
index ae5ecd6..359f311 100644
--- a/src/armnn/layers/GatherLayer.cpp
+++ b/src/armnn/layers/GatherLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,7 +33,12 @@
 
 std::vector<TensorShape> GatherLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     const TensorShape& params = inputShapes[0];
     const TensorShape& indices = inputShapes[1];
 
@@ -82,9 +87,19 @@
     std::vector<TensorShape> inferredShapes = InferOutputShapes(
             {GetInputSlot(0).GetTensorInfo().GetShape(),
              GetInputSlot(1).GetTensorInfo().GetShape()});
-    ARMNN_ASSERT(inferredShapes.size() == 1);
-    ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified ||
-                 inferredShapes[0].GetDimensionality() == Dimensionality::Scalar);
+
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
+
+    if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified &&
+        inferredShapes[0].GetDimensionality() != Dimensionality::Scalar)
+    {
+        throw armnn::LayerValidationException("inferredShapes' dimensionality is neither specified nor scalar.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherLayer");
 }
diff --git a/src/armnn/layers/GatherNdLayer.cpp b/src/armnn/layers/GatherNdLayer.cpp
index 0f06946..56e1500 100644
--- a/src/armnn/layers/GatherNdLayer.cpp
+++ b/src/armnn/layers/GatherNdLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,7 +33,12 @@
 
 std::vector<TensorShape> GatherNdLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     const TensorShape& params = inputShapes[0];
     const TensorShape& indices = inputShapes[1];
 
@@ -47,7 +52,13 @@
 
     // last dimension of indices
     unsigned int index_depth = indices[indicesDim - 1];
-    ARMNN_ASSERT(index_depth <= paramsDim);
+    if (index_depth > paramsDim)
+    {
+        throw armnn::Exception("index_depth must not be greater than paramsDim (\""
+                               + std::to_string(index_depth) +
+                               "\" vs \""
+                               + std::to_string(paramsDim) + "\")");
+    }
 
     // all but the last dimension of indices
     std::vector<unsigned int> outer_shape;
@@ -86,9 +97,19 @@
     std::vector<TensorShape> inferredShapes = InferOutputShapes(
             {GetInputSlot(0).GetTensorInfo().GetShape(),
              GetInputSlot(1).GetTensorInfo().GetShape()});
-    ARMNN_ASSERT(inferredShapes.size() == 1);
-    ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified ||
-                 inferredShapes[0].GetDimensionality() == Dimensionality::Scalar);
+
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
+
+    if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified &&
+        inferredShapes[0].GetDimensionality() != Dimensionality::Scalar)
+    {
+        throw armnn::LayerValidationException("inferredShapes' dimensionality is neither specified nor scalar.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "GatherNdLayer");
 }
diff --git a/src/armnn/layers/InstanceNormalizationLayer.cpp b/src/armnn/layers/InstanceNormalizationLayer.cpp
index db6cd20..9cc9745 100644
--- a/src/armnn/layers/InstanceNormalizationLayer.cpp
+++ b/src/armnn/layers/InstanceNormalizationLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "InstanceNormalizationLayer.hpp"
@@ -41,7 +41,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "InstanceNormalizationLayer");
 }
diff --git a/src/armnn/layers/L2NormalizationLayer.cpp b/src/armnn/layers/L2NormalizationLayer.cpp
index 2d268dd..14a5f90 100644
--- a/src/armnn/layers/L2NormalizationLayer.cpp
+++ b/src/armnn/layers/L2NormalizationLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "L2NormalizationLayer.hpp"
@@ -41,7 +41,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "L2NormalizationLayer");
 }
diff --git a/src/armnn/layers/LogSoftmaxLayer.cpp b/src/armnn/layers/LogSoftmaxLayer.cpp
index 872d422..da82dfe 100644
--- a/src/armnn/layers/LogSoftmaxLayer.cpp
+++ b/src/armnn/layers/LogSoftmaxLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -40,7 +40,13 @@
     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogSoftmaxLayer");
 }
diff --git a/src/armnn/layers/LogicalBinaryLayer.cpp b/src/armnn/layers/LogicalBinaryLayer.cpp
index 84a6e8e..a781d6e 100644
--- a/src/armnn/layers/LogicalBinaryLayer.cpp
+++ b/src/armnn/layers/LogicalBinaryLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -33,11 +33,23 @@
 
 std::vector<TensorShape> LogicalBinaryLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     const TensorShape& input0 = inputShapes[0];
     const TensorShape& input1 = inputShapes[1];
 
-    ARMNN_ASSERT(input0.GetNumDimensions() == input1.GetNumDimensions());
+    if (input0.GetNumDimensions() != input1.GetNumDimensions())
+    {
+        throw armnn::Exception("Input dimensions do not match (\""
+                               + std::to_string(input0.GetNumDimensions()) +
+                               "\" vs \""
+                               + std::to_string(input1.GetNumDimensions()) + "\").");
+    }
+
     unsigned int numDims = input0.GetNumDimensions();
 
     std::vector<unsigned int> dims(numDims);
@@ -46,8 +58,10 @@
         unsigned int dim0 = input0[i];
         unsigned int dim1 = input1[i];
 
-        ARMNN_ASSERT_MSG(dim0 == dim1 || dim0 == 1 || dim1 == 1,
-                         "Dimensions should either match or one should be of size 1.");
+        if (dim0 != dim1 && dim0 != 1 && dim1 != 1)
+        {
+            throw armnn::Exception("Dimensions should either match or one should be of size 1.");
+        }
 
         dims[i] = std::max(dim0, dim1);
     }
@@ -67,7 +81,13 @@
         GetInputSlot(0).GetTensorInfo().GetShape(),
         GetInputSlot(1).GetTensorInfo().GetShape()
     });
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LogicalBinaryLayer");
 }
diff --git a/src/armnn/layers/LstmLayer.cpp b/src/armnn/layers/LstmLayer.cpp
index 0e6f3d8..d87ad64 100644
--- a/src/armnn/layers/LstmLayer.cpp
+++ b/src/armnn/layers/LstmLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "LstmLayer.hpp"
@@ -149,7 +149,11 @@
 
 std::vector<TensorShape> LstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 3);
+    if (inputShapes.size() != 3)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"3\".");
+    }
 
     // Get input values for validation
     unsigned int batchSize = inputShapes[0][0];
@@ -179,69 +183,148 @@
         GetInputSlot(2).GetTensorInfo().GetShape()
     });
 
-    ARMNN_ASSERT(inferredShapes.size() == 4);
+    if (inferredShapes.size() != 4)
+    {
+        throw armnn::Exception("inferredShapes has "
+                               + std::to_string(inferredShapes.size()) +
+                               " element(s) - should only have 4.");
+    }
 
     // Check if the weights are nullptr
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
-                     "LstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
-                     "LstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
-                     "LstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
-                     "LstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
-                     "LstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
-                     "LstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
-                     "LstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
-                     "LstmLayer: m_BasicParameters.m_CellBias should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
-                     "LstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
+    if (!m_BasicParameters.m_InputToForgetWeights)
+    {
+        throw armnn::NullPointerException("LstmLayer: "
+                                          "m_BasicParameters.m_InputToForgetWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_InputToCellWeights)
+    {
+        throw armnn::NullPointerException("LstmLayer: "
+                                          "m_BasicParameters.m_InputToCellWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_InputToOutputWeights)
+    {
+        throw armnn::NullPointerException("LstmLayer: "
+                                          "m_BasicParameters.m_InputToOutputWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_RecurrentToForgetWeights)
+    {
+        throw armnn::NullPointerException("LstmLayer: "
+                                           "m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_RecurrentToCellWeights)
+    {
+        throw armnn::NullPointerException("LstmLayer: "
+                                          "m_BasicParameters.m_RecurrentToCellWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_RecurrentToOutputWeights)
+    {
+        throw armnn::NullPointerException("LstmLayer: "
+                                          "m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_ForgetGateBias)
+    {
+        throw armnn::NullPointerException("LstmLayer: "
+                                          "m_BasicParameters.m_ForgetGateBias should not be null.");
+    }
+
+    if (!m_BasicParameters.m_CellBias)
+    {
+        throw armnn::NullPointerException("LstmLayer: "
+                                          "m_BasicParameters.m_CellBias should not be null.");
+    }
+
+    if (!m_BasicParameters.m_OutputGateBias)
+    {
+        throw armnn::NullPointerException("LstmLayer: "
+                                          "m_BasicParameters.m_OutputGateBias should not be null.");
+    }
 
     if (!m_Param.m_CifgEnabled)
     {
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
-                         "LstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
-                         "LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
-                         "LstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
+        if (!m_CifgParameters.m_InputToInputWeights)
+        {
+            throw armnn::NullPointerException("LstmLayer: "
+                                              "m_CifgParameters.m_InputToInputWeights should not be null.");
+        }
+
+        if (!m_CifgParameters.m_RecurrentToInputWeights)
+        {
+            throw armnn::NullPointerException("LstmLayer: "
+                                              "m_CifgParameters.m_RecurrentToInputWeights should not be null.");
+        }
+
+        if (!m_CifgParameters.m_InputGateBias)
+        {
+            throw armnn::NullPointerException("LstmLayer: "
+                                              "m_CifgParameters.m_InputGateBias should not be null.");
+        }
 
         ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
     }
     else
     {
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
-            "LstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
-            "LstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not have a value when CIFG is enabled.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
-            "LstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
+        if (m_CifgParameters.m_InputToInputWeights)
+        {
+            throw armnn::Exception("LstmLayer: "
+                                   "m_CifgParameters.m_InputToInputWeights should not have a value "
+                                   "when CIFG is enabled.");
+        }
+
+        if (m_CifgParameters.m_RecurrentToInputWeights)
+        {
+            throw armnn::Exception("LstmLayer: "
+                                   "m_CifgParameters.m_RecurrentToInputWeights should not have a value "
+                                   "when CIFG is enabled.");
+        }
+
+        if (m_CifgParameters.m_InputGateBias)
+        {
+            throw armnn::Exception("LstmLayer: "
+                                   "m_CifgParameters.m_InputGateBias should not have a value "
+                                   "when CIFG is enabled.");
+        }
 
         ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "LstmLayer");
     }
 
     if (m_Param.m_ProjectionEnabled)
     {
-        ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
-                         "LstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
+        if (!m_ProjectionParameters.m_ProjectionWeights)
+        {
+            throw armnn::NullPointerException("LstmLayer: "
+                                              "m_ProjectionParameters.m_ProjectionWeights should not be null.");
+        }
     }
 
     if (m_Param.m_PeepholeEnabled)
     {
         if (!m_Param.m_CifgEnabled)
         {
-            ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
-                             "LstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
-                             "when Peephole is enabled and CIFG is disabled.");
+            if (!m_PeepholeParameters.m_CellToInputWeights)
+            {
+                throw armnn::NullPointerException("LstmLayer: "
+                                                  "m_PeepholeParameters.m_CellToInputWeights should not be null "
+                                                  "when Peephole is enabled and CIFG is disabled.");
+            }
         }
-        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
-                         "LstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
-                         "LstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
+
+        if (!m_PeepholeParameters.m_CellToForgetWeights)
+        {
+            throw armnn::NullPointerException("LstmLayer: "
+                                              "m_PeepholeParameters.m_CellToForgetWeights should not be null.");
+        }
+
+        if (!m_PeepholeParameters.m_CellToOutputWeights)
+        {
+            throw armnn::NullPointerException("LstmLayer: "
+                                              "m_PeepholeParameters.m_CellToOutputWeights should not be null.");
+        }
     }
 
     ValidateAndCopyShape(
@@ -255,15 +338,30 @@
     {
         if(!m_Param.m_CifgEnabled)
         {
-            ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
-                             "LstmLayer: m_LayerNormParameters.m_inputLayerNormWeights should not be null.");
+            if (!m_LayerNormParameters.m_InputLayerNormWeights)
+            {
+                throw armnn::NullPointerException("LstmLayer: "
+                                                  "m_LayerNormParameters.m_inputLayerNormWeights should not be null.");
+            }
         }
-        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
-                         "LstmLayer: m_LayerNormParameters.m_forgetLayerNormWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
-                         "LstmLayer: m_LayerNormParameters.m_cellLayerNormWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
-                         "LstmLayer: m_LayerNormParameters.m_outputLayerNormWeights should not be null.");
+
+        if (!m_LayerNormParameters.m_ForgetLayerNormWeights)
+        {
+            throw armnn::NullPointerException("LstmLayer: "
+                                              "m_LayerNormParameters.m_forgetLayerNormWeights should not be null.");
+        }
+
+        if (!m_LayerNormParameters.m_CellLayerNormWeights)
+        {
+            throw armnn::NullPointerException("LstmLayer: "
+                                              "m_LayerNormParameters.m_cellLayerNormWeights should not be null.");
+        }
+
+        if (!m_LayerNormParameters.m_OutputLayerNormWeights)
+        {
+            throw armnn::NullPointerException("LstmLayer: "
+                                              "m_LayerNormParameters.m_outputLayerNormWeights should not be null.");
+        }
     }
 }
 
diff --git a/src/armnn/layers/MapLayer.cpp b/src/armnn/layers/MapLayer.cpp
index 6141974..71814a2 100644
--- a/src/armnn/layers/MapLayer.cpp
+++ b/src/armnn/layers/MapLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "MapLayer.hpp"
@@ -38,7 +38,11 @@
 {
     // validates that the input is connected.
     VerifyLayerConnections(1, CHECK_LOCATION());
-    ARMNN_ASSERT(GetNumOutputSlots() == 0);
+    if (GetNumOutputSlots() != 0)
+    {
+        throw armnn::LayerValidationException("Output slots must be \"0\" - currently \""
+                                              + std::to_string(GetNumOutputSlots()) + "\".");
+    }
 }
 
 void MapLayer::ExecuteStrategy(IStrategy& strategy) const
diff --git a/src/armnn/layers/MeanLayer.cpp b/src/armnn/layers/MeanLayer.cpp
index bd49f50..62a39237 100644
--- a/src/armnn/layers/MeanLayer.cpp
+++ b/src/armnn/layers/MeanLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -52,19 +52,35 @@
     std::vector<TensorShape> inferredShapes = InferOutputShapes(
             { GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
-    ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
+
+    if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified)
+    {
+        throw armnn::LayerValidationException("inferredShapes' dimensionality has not been specified.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MeanLayer");
 }
 
 std::vector<TensorShape> MeanLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
+
     const TensorShape& input = inputShapes[0];
 
-    ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
-                     "MeanLayer: Mean supports up to 4D input.");
+    if (auto inputDims = input.GetNumDimensions(); inputDims != std::clamp(inputDims, 1u, 4u))
+    {
+        throw armnn::Exception("ReduceLayer: Reduce supports up to 4D input.");
+    }
 
     unsigned int rank = input.GetNumDimensions();
     unsigned int outputRank = 0;
diff --git a/src/armnn/layers/MemCopyLayer.cpp b/src/armnn/layers/MemCopyLayer.cpp
index 6dd20344..6fc7d73 100644
--- a/src/armnn/layers/MemCopyLayer.cpp
+++ b/src/armnn/layers/MemCopyLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "MemCopyLayer.hpp"
@@ -44,7 +44,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemCopyLayer");
 }
diff --git a/src/armnn/layers/MemImportLayer.cpp b/src/armnn/layers/MemImportLayer.cpp
index a1c92f6..10e6cd4 100644
--- a/src/armnn/layers/MemImportLayer.cpp
+++ b/src/armnn/layers/MemImportLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "MemImportLayer.hpp"
@@ -44,7 +44,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MemImportLayer");
 }
diff --git a/src/armnn/layers/MergeLayer.cpp b/src/armnn/layers/MergeLayer.cpp
index a3b098a..e8f9217 100644
--- a/src/armnn/layers/MergeLayer.cpp
+++ b/src/armnn/layers/MergeLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "MergeLayer.hpp"
@@ -40,14 +40,23 @@
         GetInputSlot(1).GetTensorInfo().GetShape(),
     });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "MergeLayer");
 }
 
 std::vector<TensorShape> MergeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
 
     ConditionalThrowIfNotEqual<LayerValidationException>(
         "MergeLayer: TensorShapes set on inputs do not match",
diff --git a/src/armnn/layers/NormalizationLayer.cpp b/src/armnn/layers/NormalizationLayer.cpp
index 24b6788..b604b05 100644
--- a/src/armnn/layers/NormalizationLayer.cpp
+++ b/src/armnn/layers/NormalizationLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "NormalizationLayer.hpp"
@@ -41,7 +41,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "NormalizationLayer");
 }
diff --git a/src/armnn/layers/PadLayer.cpp b/src/armnn/layers/PadLayer.cpp
index 0024ba5..4b0b0e1 100644
--- a/src/armnn/layers/PadLayer.cpp
+++ b/src/armnn/layers/PadLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -41,12 +41,28 @@
 
 std::vector<TensorShape> PadLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
+
     const TensorShape& inputShape = inputShapes[0];
 
     unsigned int rank = inputShape.GetNumDimensions();
-    ARMNN_ASSERT(m_Param.m_PadList.size() == rank);
-    ARMNN_ASSERT(rank != 0);
+
+    if (m_Param.m_PadList.size() != rank)
+    {
+        throw armnn::Exception("Mismatch in size of mPadList and rank (\""
+                               + std::to_string(m_Param.m_PadList.size()) +
+                               "\" vs "
+                               + std::to_string(rank) + ")");
+    }
+
+    if (rank == 0)
+    {
+        throw armnn::Exception("rank must not equal 0.");
+    }
 
     std::vector<unsigned int> outputDimensionSizes(rank);
     for (unsigned int i = 0; i < rank; ++i)
@@ -68,7 +84,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PadLayer");
 }
diff --git a/src/armnn/layers/PermuteLayer.cpp b/src/armnn/layers/PermuteLayer.cpp
index f8803a1..3d3efc3 100644
--- a/src/armnn/layers/PermuteLayer.cpp
+++ b/src/armnn/layers/PermuteLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,7 +37,12 @@
 
 std::vector<TensorShape> PermuteLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
+
     const TensorShape& inShape = inputShapes[0];
     return std::vector<TensorShape> ({armnnUtils::Permuted(inShape, m_Param.m_DimMappings)});
 }
@@ -52,7 +57,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PermuteLayer");
 }
diff --git a/src/armnn/layers/Pooling2dLayer.cpp b/src/armnn/layers/Pooling2dLayer.cpp
index e423b8b..1003867 100644
--- a/src/armnn/layers/Pooling2dLayer.cpp
+++ b/src/armnn/layers/Pooling2dLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -39,12 +39,20 @@
 
 std::vector<TensorShape> Pooling2dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
+
     const TensorShape& inputShape = inputShapes[0];
     const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
 
     // If we support multiple batch dimensions in the future, then this assert will need to change.
-    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Pooling2dLayer will always have 4D input.");
+    if (inputShape.GetNumDimensions() != 4)
+    {
+        throw armnn::Exception("Pooling2dLayer will always have 4D input.");
+    }
 
     unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
     unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
@@ -56,8 +64,10 @@
     unsigned int outHeight = 1;
     if (!isGlobalPooling)
     {
-        ARMNN_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0,
-                         "Stride can only be zero when performing global pooling");
+        if (!m_Param.m_StrideX || !m_Param.m_StrideY)
+        {
+            throw armnn::Exception("Stride can only be zero when performing global pooling");
+        }
 
         auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
             {
@@ -74,7 +84,7 @@
                         size = static_cast<unsigned int>(floor(div)) + 1;
                         break;
                     default:
-                        ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
+                        throw armnn::Exception("Unsupported Output Shape Rounding");
                 }
 
                 // MakeS sure that border operations will start from inside the input and not the padded area.
@@ -112,7 +122,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling2dLayer");
 }
diff --git a/src/armnn/layers/Pooling3dLayer.cpp b/src/armnn/layers/Pooling3dLayer.cpp
index ec1ec80..0506efa 100644
--- a/src/armnn/layers/Pooling3dLayer.cpp
+++ b/src/armnn/layers/Pooling3dLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -39,12 +39,20 @@
 
 std::vector<TensorShape> Pooling3dLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
+
     const TensorShape& inputShape = inputShapes[0];
     const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
 
     // If we support multiple batch dimensions in the future, then this assert will need to change.
-    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 5, "Pooling3dLayer will always have 5D input.");
+    if (inputShape.GetNumDimensions() != 5)
+    {
+        throw armnn::Exception("Pooling3dLayer will always have 5D input.");
+    }
 
     unsigned int inWidth = inputShape[dimensionIndices.GetWidthIndex()];
     unsigned int inHeight = inputShape[dimensionIndices.GetHeightIndex()];
@@ -58,8 +66,10 @@
     unsigned int outDepth = 1;
     if (!isGlobalPooling)
     {
-        ARMNN_ASSERT_MSG(m_Param.m_StrideX!=0 && m_Param.m_StrideY!=0 && m_Param.m_StrideZ!=0,
-                         "Stride can only be zero when performing global pooling");
+        if (!m_Param.m_StrideX || !m_Param.m_StrideY || !m_Param.m_StrideZ)
+        {
+            throw armnn::Exception("Stride can only be zero when performing global pooling");
+        }
 
         auto CalcSize = [](auto inSize, auto lowPad, auto highPad, auto poolSize, auto stride, auto outputShapeRounding)
             {
@@ -76,7 +86,7 @@
                         size = static_cast<unsigned int>(floor(div)) + 1;
                         break;
                     default:
-                        ARMNN_ASSERT_MSG(false, "Unsupported Output Shape Rounding");
+                        throw armnn::Exception("Unsupported Output Shape Rounding");
                 }
 
                 // Makes sure that border operations will start from inside the input and not the padded area.
@@ -116,7 +126,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "Pooling3dLayer");
 }
diff --git a/src/armnn/layers/PreluLayer.cpp b/src/armnn/layers/PreluLayer.cpp
index a302640..874ee6b 100644
--- a/src/armnn/layers/PreluLayer.cpp
+++ b/src/armnn/layers/PreluLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,7 +37,11 @@
 
 std::vector<TensorShape> PreluLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
 
     const TensorShape& inputShape = inputShapes[0];
     const TensorShape& alphaShape = inputShapes[1];
@@ -45,8 +49,16 @@
     const unsigned int inputShapeDimensions = inputShape.GetNumDimensions();
     const unsigned int alphaShapeDimensions = alphaShape.GetNumDimensions();
 
-    ARMNN_ASSERT(inputShapeDimensions > 0);
-    ARMNN_ASSERT(alphaShapeDimensions > 0);
+    if (inputShapeDimensions == 0)
+    {
+        throw armnn::Exception("inputShapeDimensions must be greater than 0.");
+    }
+
+    if (alphaShapeDimensions == 0)
+    {
+       throw armnn::Exception("alphaShapeDimensions must be not be zero (\""
+                              + std::to_string(alphaShapeDimensions) + "\")");
+    }
 
     // The size of the output is the maximum size along each dimension of the input operands,
     // it starts with the trailing dimensions, and works its way forward
@@ -66,8 +78,10 @@
         unsigned int alphaDimension = alphaShape[armnn::numeric_cast<unsigned int>(alphaShapeIndex)];
 
         // Check that the inputs are broadcast compatible
-        ARMNN_ASSERT_MSG(inputDimension == alphaDimension || inputDimension == 1 || alphaDimension == 1,
-                         "PreluLayer: Dimensions should either match or one should be of size 1");
+        if (inputDimension != alphaDimension && inputDimension != 1 && alphaDimension != 1)
+        {
+            throw armnn::Exception("PreluLayer: Dimensions should either match or one should be of size 1");
+        }
 
         outputShape[outputShapeIndex] = std::max(inputDimension, alphaDimension);
 
@@ -111,7 +125,12 @@
         GetInputSlot(1).GetTensorInfo().GetShape()
     });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "PreluLayer");
 }
diff --git a/src/armnn/layers/QLstmLayer.cpp b/src/armnn/layers/QLstmLayer.cpp
index eeb01db..e98deb6 100644
--- a/src/armnn/layers/QLstmLayer.cpp
+++ b/src/armnn/layers/QLstmLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "QLstmLayer.hpp"
@@ -152,7 +152,11 @@
 
 std::vector<TensorShape> QLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 3);
+    if (inputShapes.size() != 3)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"3\".");
+    }
 
     // Get input values for validation
     unsigned int batchSize = inputShapes[0][0];
@@ -182,70 +186,147 @@
         GetInputSlot(2).GetTensorInfo().GetShape()  // previousCellStateIn
     });
 
-    ARMNN_ASSERT(inferredShapes.size() == 3);
+    if (inferredShapes.size() != 3)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " element(s) - should only have 3.");
+    }
 
     // Check if the weights are nullptr for basic params
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
-            "QLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
-            "QLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
-            "QLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
-            "QLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
-            "QLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
-            "QLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
-            "QLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
-            "QLstmLayer: m_BasicParameters.m_CellBias should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
-            "QLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
+    if (!m_BasicParameters.m_InputToForgetWeights)
+    {
+        throw armnn::LayerValidationException("QLstmLayer: "
+                                              "m_BasicParameters.m_InputToForgetWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_InputToCellWeights)
+    {
+        throw armnn::LayerValidationException("QLstmLayer: "
+                                              "m_BasicParameters.m_InputToCellWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_InputToOutputWeights)
+    {
+        throw armnn::LayerValidationException("QLstmLayer: "
+                                              "m_BasicParameters.m_InputToOutputWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_RecurrentToForgetWeights)
+    {
+        throw armnn::LayerValidationException("QLstmLayer: "
+                                              "m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_RecurrentToCellWeights)
+    {
+        throw armnn::LayerValidationException("QLstmLayer: "
+                                              "m_BasicParameters.m_RecurrentToCellWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_RecurrentToOutputWeights)
+    {
+        throw armnn::LayerValidationException("QLstmLayer: "
+                                              "m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_ForgetGateBias)
+    {
+        throw armnn::LayerValidationException("QLstmLayer: "
+                                              "m_BasicParameters.m_ForgetGateBias should not be null.");
+    }
+
+    if (!m_BasicParameters.m_CellBias)
+    {
+        throw armnn::LayerValidationException("QLstmLayer: "
+                                              "m_BasicParameters.m_CellBias should not be null.");
+    }
+
+    if (!m_BasicParameters.m_OutputGateBias)
+    {
+        throw armnn::LayerValidationException("QLstmLayer: "
+                                              "m_BasicParameters.m_OutputGateBias should not be null.");
+    }
 
     if (!m_Param.m_CifgEnabled)
     {
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
-                "QLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
-                "QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
-                "QLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
+        if (!m_CifgParameters.m_InputToInputWeights)
+        {
+            throw armnn::LayerValidationException("QLstmLayer: "
+                                                  "m_CifgParameters.m_InputToInputWeights should not be null.");
+        }
+
+        if (!m_CifgParameters.m_RecurrentToInputWeights)
+        {
+            throw armnn::LayerValidationException("QLstmLayer: "
+                                                  "m_CifgParameters.m_RecurrentToInputWeights should not be null.");
+        }
+
+        if (!m_CifgParameters.m_InputGateBias)
+        {
+            throw armnn::LayerValidationException("QLstmLayer: "
+                                                  "m_CifgParameters.m_InputGateBias should not be null.");
+        }
 
         ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
     }
     else
     {
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
-                "QLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value when CIFG is enabled.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
-                "QLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should "
-                             "not have a value when CIFG is enabled.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
-                "QLstmLayer: m_CifgParameters.m_InputGateBias should not have a value when CIFG is enabled.");
+        if (m_CifgParameters.m_InputToInputWeights)
+        {
+                throw armnn::LayerValidationException("QLstmLayer: "
+                                                      "m_CifgParameters.m_InputToInputWeights "
+                                                      "should not have a value when CIFG is enabled.");
+        }
+
+        if (m_CifgParameters.m_RecurrentToInputWeights)
+        {
+                throw armnn::LayerValidationException("QLstmLayer: "
+                                                      "m_CifgParameters.m_RecurrentToInputWeights "
+                                                      "should not have a value when CIFG is enabled.");
+        }
+
+        if (m_CifgParameters.m_InputGateBias)
+        {
+                throw armnn::LayerValidationException("QLstmLayer: "
+                                                      "m_CifgParameters.m_InputGateBias "
+                                                      "should not have a value when CIFG is enabled.");
+        }
 
         ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QLstmLayer");
     }
 
     if (m_Param.m_ProjectionEnabled)
     {
-        ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
-                         "QLstmLayer: m_ProjectionParameters.m_ProjectionWeights should not be null.");
+        if (!m_ProjectionParameters.m_ProjectionWeights)
+        {
+            throw armnn::LayerValidationException("QLstmLayer: "
+                                                  "m_ProjectionParameters.m_ProjectionWeights should not be null.");
+        }
     }
 
     if (m_Param.m_PeepholeEnabled)
     {
         if (!m_Param.m_CifgEnabled) {
-            ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
-                    "QLstmLayer: m_PeepholeParameters.m_CellToInputWeights should not be null "
-                    "when Peephole is enabled and CIFG is disabled.");
+            if (!m_PeepholeParameters.m_CellToInputWeights)
+            {
+                throw armnn::LayerValidationException("QLstmLayer: "
+                                                      "m_PeepholeParameters.m_CellToInputWeights should not be null "
+                                                      "when Peephole is enabled and CIFG is disabled.");
+            }
         }
 
-        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
-                         "QLstmLayer: m_PeepholeParameters.m_CellToForgetWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
-                         "QLstmLayer: m_PeepholeParameters.m_CellToOutputWeights should not be null.");
+        if (!m_PeepholeParameters.m_CellToForgetWeights)
+        {
+            throw armnn::LayerValidationException("QLstmLayer: "
+                                                  "m_PeepholeParameters.m_CellToForgetWeights should not be null.");
+        }
+
+        if (!m_PeepholeParameters.m_CellToOutputWeights)
+        {
+            throw armnn::LayerValidationException("QLstmLayer: "
+                                                  "m_PeepholeParameters.m_CellToOutputWeights should not be null.");
+        }
     }
 
     ValidateAndCopyShape(
@@ -255,17 +336,32 @@
 
     if (m_Param.m_LayerNormEnabled)
     {
-        if(!m_Param.m_CifgEnabled)
+        if (!m_Param.m_CifgEnabled)
         {
-            ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
-                             "QLstmLayer: m_LayerNormParameters.m_InputLayerNormWeights should not be null.");
+            if (!m_LayerNormParameters.m_InputLayerNormWeights)
+            {
+                throw armnn::LayerValidationException("QLstmLayer: m_LayerNormParameters.m_InputLayerNormWeights "
+                                                      "should not be null.");
+            }
         }
-        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
-                         "QLstmLayer: m_LayerNormParameters.m_ForgetLayerNormWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
-                         "QLstmLayer: m_LayerNormParameters.m_CellLayerNormWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
-                         "QLstmLayer: m_LayerNormParameters.m_UutputLayerNormWeights should not be null.");
+
+        if (!m_LayerNormParameters.m_ForgetLayerNormWeights)
+        {
+            throw armnn::LayerValidationException("QLstmLayer: "
+                                                  "m_LayerNormParameters.m_ForgetLayerNormWeights should not be null.");
+        }
+
+        if (!m_LayerNormParameters.m_CellLayerNormWeights)
+        {
+            throw armnn::LayerValidationException("QLstmLayer: "
+                                                  "m_LayerNormParameters.m_CellLayerNormWeights should not be null.");
+        }
+
+        if (!m_LayerNormParameters.m_OutputLayerNormWeights)
+        {
+            throw armnn::LayerValidationException("QLstmLayer: "
+                                                  "m_LayerNormParameters.m_UutputLayerNormWeights should not be null.");
+        }
     }
 }
 
diff --git a/src/armnn/layers/QuantizedLstmLayer.cpp b/src/armnn/layers/QuantizedLstmLayer.cpp
index c82e34f..ebe3207 100644
--- a/src/armnn/layers/QuantizedLstmLayer.cpp
+++ b/src/armnn/layers/QuantizedLstmLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017,2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017,2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "QuantizedLstmLayer.hpp"
@@ -80,7 +80,11 @@
 
 std::vector<TensorShape> QuantizedLstmLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 3);
+    if (inputShapes.size() != 3)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"3\".");
+    }
 
     // Get input values for validation
     unsigned int numBatches = inputShapes[0][0];
@@ -108,35 +112,97 @@
         GetInputSlot(2).GetTensorInfo().GetShape()  // previousOutputIn
     });
 
-    ARMNN_ASSERT(inferredShapes.size() == 2);
+    if (inferredShapes.size() != 2)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " element(s) - should only have 2.");
+    }
 
     // Check weights and bias for nullptr
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToInputWeights != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToInputWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToForgetWeights != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToForgetWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToCellWeights != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToCellWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputToOutputWeights != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputToOutputWeights should not be null.");
+    if (!m_QuantizedLstmParameters.m_InputToInputWeights)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_InputToInputWeights "
+                                              "should not be null.");
+    }
 
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToInputWeights != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToInputWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToForgetWeights != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToForgetWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToCellWeights != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToCellWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_RecurrentToOutputWeights != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_RecurrentToOutputWeights should not be null.");
+    if (!m_QuantizedLstmParameters.m_InputToForgetWeights)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_InputToForgetWeights "
+                                              "should not be null.");
+    }
 
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_InputGateBias != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_InputGateBias should not be null.");
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_ForgetGateBias != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_ForgetGateBias should not be null.");
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_CellBias != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_CellBias should not be null.");
-    ARMNN_ASSERT_MSG(m_QuantizedLstmParameters.m_OutputGateBias != nullptr,
-                     "QuantizedLstmLayer: m_QuantizedLstmParameters.m_OutputGateBias should not be null.");
+    if (!m_QuantizedLstmParameters.m_InputToCellWeights)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_InputToCellWeights "
+                                              "should not be null.");
+    }
+
+    if (!m_QuantizedLstmParameters.m_InputToOutputWeights)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_InputToOutputWeights "
+                                              "should not be null.");
+    }
+
+    if (!m_QuantizedLstmParameters.m_RecurrentToInputWeights)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_RecurrentToInputWeights "
+                                              "should not be null.");
+    }
+
+    if (!m_QuantizedLstmParameters.m_RecurrentToForgetWeights)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_RecurrentToForgetWeights "
+                                              "should not be null.");
+    }
+
+    if (!m_QuantizedLstmParameters.m_RecurrentToCellWeights)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_RecurrentToCellWeights "
+                                              "should not be null.");
+    }
+
+    if (!m_QuantizedLstmParameters.m_RecurrentToOutputWeights)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_RecurrentToOutputWeights "
+                                              "should not be null.");
+    }
+
+    if (!m_QuantizedLstmParameters.m_InputGateBias)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_InputGateBias "
+                                              "should not be null.");
+    }
+
+    if (!m_QuantizedLstmParameters.m_ForgetGateBias)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_ForgetGateBias "
+                                              "should not be null.");
+    }
+
+    if (!m_QuantizedLstmParameters.m_CellBias)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_CellBias "
+                                              "should not be null.");
+    }
+
+    if (!m_QuantizedLstmParameters.m_OutputGateBias)
+    {
+        throw armnn::LayerValidationException("QuantizedLstmLayer: "
+                                              "m_QuantizedLstmParameters.m_OutputGateBias "
+                                              "should not be null.");
+    }
 
     // Check output TensorShape(s) match inferred shape
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "QuantizedLstmLayer");
diff --git a/src/armnn/layers/ReduceLayer.cpp b/src/armnn/layers/ReduceLayer.cpp
index 21095dd..bebd043 100644
--- a/src/armnn/layers/ReduceLayer.cpp
+++ b/src/armnn/layers/ReduceLayer.cpp
@@ -1,6 +1,6 @@
 //
 // Copyright © 2020 Samsung Electronics Co Ltd and Contributors. All rights reserved.
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -51,8 +51,10 @@
 
     const TensorInfo& input = GetInputSlot(0).GetTensorInfo();
 
-    ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
-                     "ReduceLayer: Reduce supports up to 4D input.");
+    if (auto inputDims = input.GetNumDimensions(); inputDims != std::clamp(inputDims, 1u, 4u))
+    {
+        throw armnn::LayerValidationException("ReduceLayer: Reduce supports up to 4D input.");
+    }
 
     std::vector<TensorShape> inferredShapes = InferOutputShapes( {input.GetShape() });
 
@@ -61,11 +63,18 @@
 
 std::vector<TensorShape> ReduceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
+
     const TensorShape& input = inputShapes[0];
 
-    ARMNN_ASSERT_MSG(input.GetNumDimensions() > 0 && input.GetNumDimensions() <= 4,
-                     "ReduceLayer: Reduce supports up to 4D input.");
+    if (auto inputDims = input.GetNumDimensions(); inputDims != std::clamp(inputDims, 1u, 4u))
+    {
+        throw armnn::Exception("ReduceLayer: Reduce supports up to 4D input.");
+    }
 
     unsigned int rank = input.GetNumDimensions();
     unsigned int outputRank = 0;
diff --git a/src/armnn/layers/ReshapeLayer.cpp b/src/armnn/layers/ReshapeLayer.cpp
index b786f54..f6480b0 100644
--- a/src/armnn/layers/ReshapeLayer.cpp
+++ b/src/armnn/layers/ReshapeLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "ReshapeLayer.hpp"
@@ -48,8 +48,17 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
-    ARMNN_ASSERT(inferredShapes[0].GetDimensionality() == Dimensionality::Specified);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
+
+    if (inferredShapes[0].GetDimensionality() != Dimensionality::Specified)
+    {
+        throw armnn::LayerValidationException("inferredShapes' dimensionality has not been specified.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReshapeLayer");
 }
diff --git a/src/armnn/layers/ResizeLayer.cpp b/src/armnn/layers/ResizeLayer.cpp
index 734df0a..0b60db2 100644
--- a/src/armnn/layers/ResizeLayer.cpp
+++ b/src/armnn/layers/ResizeLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -38,7 +38,11 @@
 
 std::vector<TensorShape> ResizeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
 
     const TensorShape& inputShape = inputShapes[0];
     const DataLayoutIndexed dimensionIndices = m_Param.m_DataLayout;
@@ -70,7 +74,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ResizeLayer");
 }
diff --git a/src/armnn/layers/ReverseV2Layer.cpp b/src/armnn/layers/ReverseV2Layer.cpp
index e1160b6..1c46b79 100644
--- a/src/armnn/layers/ReverseV2Layer.cpp
+++ b/src/armnn/layers/ReverseV2Layer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -32,7 +32,11 @@
 
 std::vector<TensorShape> ReverseV2Layer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
 
     const auto inputDims = inputShapes[0].GetNumDimensions();
 
@@ -59,7 +63,12 @@
         GetInputSlot(0).GetTensorInfo().GetShape(),
         GetInputSlot(1).GetTensorInfo().GetShape()});
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "ReverseV2Layer");
 }
diff --git a/src/armnn/layers/RsqrtLayer.cpp b/src/armnn/layers/RsqrtLayer.cpp
index 91f6d10..10c05c4 100644
--- a/src/armnn/layers/RsqrtLayer.cpp
+++ b/src/armnn/layers/RsqrtLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -43,7 +43,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "RsqrtLayer");
 }
@@ -53,4 +58,4 @@
     strategy.ExecuteStrategy(this, GetParameters(), {}, GetName());
 }
 
-} // namespace armnn
\ No newline at end of file
+} // namespace armnn
diff --git a/src/armnn/layers/ShapeLayer.cpp b/src/armnn/layers/ShapeLayer.cpp
index e7e343c..d810bef 100644
--- a/src/armnn/layers/ShapeLayer.cpp
+++ b/src/armnn/layers/ShapeLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -44,15 +44,23 @@
 
     auto inferredShape = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShape.size() == 1);
+    if (inferredShape.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShape has "
+                                              + std::to_string(inferredShape.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShape[0], m_ShapeInferenceMethod, "ShapeLayer");
 }
 
 std::vector<TensorShape> ShapeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    IgnoreUnused(inputShapes);
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
 
     TensorShape outputShape({ inputShapes[0].GetNumDimensions()} );
 
diff --git a/src/armnn/layers/SliceLayer.cpp b/src/armnn/layers/SliceLayer.cpp
index a9327c6..428e672 100644
--- a/src/armnn/layers/SliceLayer.cpp
+++ b/src/armnn/layers/SliceLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -44,7 +44,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SliceLayer");
 }
@@ -52,7 +57,12 @@
 std::vector<TensorShape> SliceLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
     IgnoreUnused(inputShapes);
-    ARMNN_ASSERT(inputShapes.size() == 1);
+
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
 
     TensorShape outputShape(armnn::numeric_cast<unsigned int>(m_Param.m_Size.size()), m_Param.m_Size.data());
 
diff --git a/src/armnn/layers/SoftmaxLayer.cpp b/src/armnn/layers/SoftmaxLayer.cpp
index 5f68278..f0d5e4a 100644
--- a/src/armnn/layers/SoftmaxLayer.cpp
+++ b/src/armnn/layers/SoftmaxLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "SoftmaxLayer.hpp"
@@ -41,7 +41,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SoftmaxLayer");
 }
diff --git a/src/armnn/layers/SpaceToBatchNdLayer.cpp b/src/armnn/layers/SpaceToBatchNdLayer.cpp
index 277fc44..80728d0 100644
--- a/src/armnn/layers/SpaceToBatchNdLayer.cpp
+++ b/src/armnn/layers/SpaceToBatchNdLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -71,7 +71,12 @@
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToBatchNdLayer");
 }
diff --git a/src/armnn/layers/SpaceToDepthLayer.cpp b/src/armnn/layers/SpaceToDepthLayer.cpp
index c86758f..0083ad9 100644
--- a/src/armnn/layers/SpaceToDepthLayer.cpp
+++ b/src/armnn/layers/SpaceToDepthLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -43,7 +43,11 @@
 
 std::vector<TensorShape> SpaceToDepthLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
 
     TensorShape inputShape = inputShapes[0];
     TensorShape outputShape(inputShape);
@@ -72,7 +76,12 @@
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SpaceToDepthLayer");
 }
diff --git a/src/armnn/layers/SplitterLayer.cpp b/src/armnn/layers/SplitterLayer.cpp
index f8a2ae0..8a24e0d 100644
--- a/src/armnn/layers/SplitterLayer.cpp
+++ b/src/armnn/layers/SplitterLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "SplitterLayer.hpp"
@@ -188,7 +188,10 @@
     else
     {
         ITensorHandleFactory* handleFactory = registry.GetFactory(factoryId);
-        ARMNN_ASSERT(handleFactory);
+        if (!handleFactory)
+        {
+            throw armnn::NullPointerException("handleFactory is returning a nullptr.");
+        }
         CreateTensors(registry, *handleFactory, isMemoryManaged);
     }
 }
@@ -200,8 +203,14 @@
 
 std::vector<TensorShape> SplitterLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    IgnoreUnused(inputShapes);
-    ARMNN_ASSERT(inputShapes.size() ==  m_Param.GetNumViews());
+    if (inputShapes.size() != m_Param.GetNumViews())
+    {
+        throw armnn::Exception("inputShapes' and m_NumViews' sizes do not match (\""
+                               + std::to_string(inputShapes.size()) +
+                               "\" vs \""
+                               + std::to_string(m_Param.GetNumViews()) + "\")");
+    }
+
     std::vector<TensorShape> outShapes;
     //Output shapes must match View shapes.
     for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
@@ -228,7 +237,13 @@
 
     auto inferredShapes = InferOutputShapes(views);
 
-    ARMNN_ASSERT(inferredShapes.size() == m_Param.GetNumViews());
+    if (inferredShapes.size() != m_Param.GetNumViews())
+    {
+        throw armnn::LayerValidationException("inferredShapes' size and m_NumViews do not match (\""
+                                              + std::to_string(inferredShapes.size()) +
+                                              "\" vs \""
+                                              + std::to_string(m_Param.GetNumViews()) + "\")");
+    }
 
     for (unsigned int viewIdx = 0; viewIdx < m_Param.GetNumViews(); viewIdx++)
     {
diff --git a/src/armnn/layers/StackLayer.cpp b/src/armnn/layers/StackLayer.cpp
index 3c5a216..ea49949 100644
--- a/src/armnn/layers/StackLayer.cpp
+++ b/src/armnn/layers/StackLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "StackLayer.hpp"
@@ -32,15 +32,19 @@
     return CloneBase<StackLayer>(graph, m_Param, GetName());
 }
 
-std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
+std::vector<TensorShape> StackLayer::InferOutputShapes(const std::vector<TensorShape>&) const
 {
-    IgnoreUnused(inputShapes);
-
     const TensorShape& inputShape = m_Param.m_InputShape;
     const unsigned int inputNumDimensions = inputShape.GetNumDimensions();
     const unsigned int axis = m_Param.m_Axis;
 
-    ARMNN_ASSERT(axis <= inputNumDimensions);
+    if (axis > inputNumDimensions)
+    {
+        throw armnn::Exception("axis must not be greater than input dimensions (\""
+                               + std::to_string(axis) +
+                               "\" vs \""
+                               + std::to_string(inputNumDimensions) + "\").");
+    }
 
     std::vector<unsigned int> dimensionSizes(inputNumDimensions + 1, 0);
     for (unsigned int i = 0; i < axis; ++i)
@@ -90,7 +94,12 @@
 
     auto inferredShapes = InferOutputShapes(inputShapes);
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StackLayer");
 }
diff --git a/src/armnn/layers/StridedSliceLayer.cpp b/src/armnn/layers/StridedSliceLayer.cpp
index 16aeab5..c348951 100644
--- a/src/armnn/layers/StridedSliceLayer.cpp
+++ b/src/armnn/layers/StridedSliceLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2018-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2018-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "StridedSliceLayer.hpp"
@@ -47,7 +47,11 @@
 std::vector<TensorShape> StridedSliceLayer::InferOutputShapes(
     const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
 
     TensorShape inputShape = inputShapes[0];
     std::vector<unsigned int> outputShape;
@@ -106,7 +110,12 @@
 
     auto inferredShapes = InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape()});
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "StridedSliceLayer");
 }
diff --git a/src/armnn/layers/SwitchLayer.cpp b/src/armnn/layers/SwitchLayer.cpp
index 031dcec..afb2753 100644
--- a/src/armnn/layers/SwitchLayer.cpp
+++ b/src/armnn/layers/SwitchLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "SwitchLayer.hpp"
@@ -37,14 +37,22 @@
 
     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
-    ARMNN_ASSERT_MSG(GetNumOutputSlots() == 2, "SwitchLayer: The layer should return 2 outputs.");
+    if (GetNumOutputSlots() != 2)
+    {
+        throw armnn::LayerValidationException("SwitchLayer: The layer should return 2 outputs.");
+    }
 
     // Assuming first input is the Input and second input is the Constant
     std::vector<TensorShape> inferredShapes = InferOutputShapes({
         GetInputSlot(0).GetTensorInfo().GetShape(),
         GetInputSlot(1).GetTensorInfo().GetShape()});
 
-    ARMNN_ASSERT(inferredShapes.size() == 2);
+    if (inferredShapes.size() != 2)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " element(s) - should only have 2.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "SwitchLayer");
 
diff --git a/src/armnn/layers/TileLayer.cpp b/src/armnn/layers/TileLayer.cpp
index d362900..8e07478 100644
--- a/src/armnn/layers/TileLayer.cpp
+++ b/src/armnn/layers/TileLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2023-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -31,7 +31,12 @@
 
 std::vector<TensorShape> TileLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
+
     const TensorShape& inputShape = inputShapes[0];
 
     uint32_t numberOfDimensions = inputShape.GetNumDimensions();
@@ -64,9 +69,14 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TileLayer");
 }
 
-}
\ No newline at end of file
+}
diff --git a/src/armnn/layers/TransposeConvolution2dLayer.cpp b/src/armnn/layers/TransposeConvolution2dLayer.cpp
index 3a7e8b8..21dcf1f 100644
--- a/src/armnn/layers/TransposeConvolution2dLayer.cpp
+++ b/src/armnn/layers/TransposeConvolution2dLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2019-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -24,14 +24,20 @@
 
 std::unique_ptr<IWorkload> TransposeConvolution2dLayer::CreateWorkload(const IWorkloadFactory& factory) const
 {
-    ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weights data should not be null.");
+    if (!m_Weight)
+    {
+        throw armnn::NullPointerException("TransposeConvolution2dLayer: Weights data should not be null.");
+    }
 
     TransposeConvolution2dQueueDescriptor descriptor;
     descriptor.m_Weight = m_Weight.get();
 
     if (m_Param.m_BiasEnabled)
     {
-        ARMNN_ASSERT_MSG(m_Bias != nullptr, "TransposeConvolution2dLayer: Bias data should not be null.");
+        if (!m_Bias)
+        {
+            throw armnn::NullPointerException("TransposeConvolution2dLayer: Bias data should not be null.");
+        }
         descriptor.m_Bias = m_Bias.get();
     }
 
@@ -57,11 +63,19 @@
 std::vector<TensorShape> TransposeConvolution2dLayer::InferOutputShapes(
     const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 2);
+    if (inputShapes.size() != 2)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"2\".");
+    }
+
     const TensorShape& inputShape  = inputShapes[0];
     const TensorShape& kernelShape = inputShapes[1];
 
-    ARMNN_ASSERT_MSG(inputShape.GetNumDimensions() == 4, "Transpose convolutions will always have 4D input");
+    if (inputShape.GetNumDimensions() != 4)
+    {
+        throw armnn::Exception("Transpose convolutions will always have 4D input");
+    }
 
     DataLayoutIndexed dataLayoutIndex(m_Param.m_DataLayout);
 
@@ -95,7 +109,10 @@
 
     VerifyShapeInferenceType(outputShape, m_ShapeInferenceMethod);
 
-    ARMNN_ASSERT_MSG(m_Weight != nullptr, "TransposeConvolution2dLayer: Weight data cannot be null.");
+    if (!m_Weight)
+    {
+        throw armnn::LayerValidationException("TransposeConvolution2dLayer: Weight data cannot be null.");
+    }
 
     std::vector<TensorShape> expectedOutputShape;
     std::vector<TensorShape> outputShapeGivenAsInput;
@@ -103,7 +120,12 @@
     expectedOutputShape = InferOutputShapes({GetInputSlot(0).GetTensorInfo().GetShape(),
                                              m_Weight->GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(expectedOutputShape.size() == 1);
+    if (expectedOutputShape.size() != 1)
+    {
+        throw armnn::LayerValidationException("expectedOutputShape' size is "
+                                              + std::to_string(expectedOutputShape.size()) +
+                                              " - should be \"1\".");
+    }
 
     // If output_shape was specified then use it rather than calculate an inferred output shape.
     if (m_Param.m_OutputShapeEnabled)
@@ -112,10 +134,19 @@
             m_Param.m_OutputShape.data());
         outputShapeGivenAsInput.push_back(shapeAsTensorShape);
 
-        ARMNN_ASSERT(outputShapeGivenAsInput.size() == 1);
-        ARMNN_ASSERT_MSG(expectedOutputShape == outputShapeGivenAsInput,
-                         "TransposeConvolution2dLayer: output calculated by InferOutputShapes and "
-                         "the output given as an input parameter to the layer are not matching");
+        if (outputShapeGivenAsInput.size() != 1)
+        {
+            throw armnn::LayerValidationException("outputShapeGivenAsInput' size is "
+                                                  + std::to_string(outputShapeGivenAsInput.size()) +
+                                                  " - should be \"1\".");
+        }
+
+        if (expectedOutputShape != outputShapeGivenAsInput)
+        {
+            throw armnn::LayerValidationException("TransposeConvolution2dLayer: "
+                                                  "output calculated by InferOutputShapes and the output given "
+                                                  "as an input parameter to the layer are not matching");
+        }
     }
 
     ValidateAndCopyShape(outputShape, expectedOutputShape[0], m_ShapeInferenceMethod, "TransposeConvolution2dLayer");
diff --git a/src/armnn/layers/TransposeLayer.cpp b/src/armnn/layers/TransposeLayer.cpp
index 58e570a..f0b7139 100644
--- a/src/armnn/layers/TransposeLayer.cpp
+++ b/src/armnn/layers/TransposeLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -37,7 +37,12 @@
 
 std::vector<TensorShape> TransposeLayer::InferOutputShapes(const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 1);
+    if (inputShapes.size() != 1)
+    {
+        throw armnn::Exception("inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                               "\" - should be \"1\".");
+    }
+
     const TensorShape& inShape = inputShapes[0];
     return std::vector<TensorShape> ({armnnUtils::TransposeTensorShape(inShape, m_Param.m_DimMappings)});
 }
@@ -52,7 +57,12 @@
 
     auto inferredShapes = InferOutputShapes({ GetInputSlot(0).GetTensorInfo().GetShape() });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "TransposeLayer");
 }
diff --git a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
index 75f027e..68a0d8e 100644
--- a/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
+++ b/src/armnn/layers/UnidirectionalSequenceLstmLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2021-2023 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2021-2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "UnidirectionalSequenceLstmLayer.hpp"
@@ -150,7 +150,9 @@
 std::vector<TensorShape> UnidirectionalSequenceLstmLayer::InferOutputShapes(
     const std::vector<TensorShape>& inputShapes) const
 {
-    ARMNN_ASSERT(inputShapes.size() == 3);
+    ARMNN_THROW_INVALIDARG_MSG_IF_FALSE(inputShapes.size() == 3,
+                                        "inputShapes' size is \"" + std::to_string(inputShapes.size()) +
+                                        "\" - should be \"3\".");
 
     // Get input values for validation
     unsigned int outputSize = inputShapes[1][1];
@@ -181,94 +183,178 @@
         GetInputSlot(2).GetTensorInfo().GetShape()
     });
 
-    ARMNN_ASSERT(inferredShapes.size() == 1);
+    if (inferredShapes.size() != 1)
+    {
+        throw armnn::LayerValidationException("inferredShapes has "
+                                              + std::to_string(inferredShapes.size()) +
+                                              " elements - should only have 1.");
+    }
 
     // Check if the weights are nullptr
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToForgetWeights != nullptr,
-                     "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToForgetWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToCellWeights != nullptr,
-                     "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToCellWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_InputToOutputWeights != nullptr,
-                     "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_InputToOutputWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToForgetWeights != nullptr,
-                     "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToForgetWeights "
-                     "should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToCellWeights != nullptr,
-                     "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToCellWeights should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_RecurrentToOutputWeights != nullptr,
-                     "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_RecurrentToOutputWeights "
-                     "should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_ForgetGateBias != nullptr,
-                     "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_ForgetGateBias should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_CellBias != nullptr,
-                     "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_CellBias should not be null.");
-    ARMNN_ASSERT_MSG(m_BasicParameters.m_OutputGateBias != nullptr,
-                     "UnidirectionalSequenceLstmLayer: m_BasicParameters.m_OutputGateBias should not be null.");
+    if (!m_BasicParameters.m_InputToForgetWeights)
+    {
+        throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                              "m_BasicParameters.m_InputToForgetWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_InputToCellWeights)
+    {
+        throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                              "m_BasicParameters.m_InputToCellWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_InputToOutputWeights)
+    {
+        throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                              "m_BasicParameters.m_InputToOutputWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_RecurrentToForgetWeights)
+    {
+        throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                              "m_BasicParameters.m_RecurrentToForgetWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_RecurrentToCellWeights)
+    {
+        throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                              "m_BasicParameters.m_RecurrentToCellWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_RecurrentToOutputWeights)
+    {
+        throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                              "m_BasicParameters.m_RecurrentToOutputWeights should not be null.");
+    }
+
+    if (!m_BasicParameters.m_ForgetGateBias)
+    {
+        throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                              "m_BasicParameters.m_ForgetGateBias should not be null.");
+    }
+
+    if (!m_BasicParameters.m_CellBias)
+    {
+        throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                              "m_BasicParameters.m_CellBias should not be null.");
+    }
+
+    if (!m_BasicParameters.m_OutputGateBias)
+    {
+        throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                              "m_BasicParameters.m_OutputGateBias should not be null.");
+    }
 
     if (!m_Param.m_CifgEnabled)
     {
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights != nullptr,
-                         "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputToInputWeights should not be null.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights != nullptr,
-                         "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_RecurrentToInputWeights "
-                         "should not be null.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias != nullptr,
-                         "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputGateBias should not be null.");
+        if (!m_CifgParameters.m_InputToInputWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_CifgParameters.m_InputToInputWeights should not be null.");
+        }
+
+        if (!m_CifgParameters.m_RecurrentToInputWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_CifgParameters.m_RecurrentToInputWeights should not be null.");
+        }
+
+        if (!m_CifgParameters.m_InputGateBias)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_CifgParameters.m_InputGateBias should not be null.");
+        }
     }
     else
     {
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputToInputWeights == nullptr,
-            "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputToInputWeights should not have a value "
-            "when CIFG is enabled.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_RecurrentToInputWeights == nullptr,
-            "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_RecurrentToInputWeights should not have a value "
-            "when CIFG is enabled.");
-        ARMNN_ASSERT_MSG(m_CifgParameters.m_InputGateBias == nullptr,
-            "UnidirectionalSequenceLstmLayer: m_CifgParameters.m_InputGateBias should not have a value "
-            "when CIFG is enabled.");
+        if (m_CifgParameters.m_InputToInputWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_CifgParameters.m_InputToInputWeights should not have a value "
+                                                  "when CIFG is enabled.");
+        }
+
+        if (m_CifgParameters.m_RecurrentToInputWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_CifgParameters.m_RecurrentToInputWeights should not have a value "
+                                                  "when CIFG is enabled.");
+        }
+
+        if (m_CifgParameters.m_InputGateBias)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_CifgParameters.m_InputGateBias should not have a value "
+                                                  "when CIFG is enabled.");
+        }
     }
 
     if (m_Param.m_ProjectionEnabled)
     {
-        ARMNN_ASSERT_MSG(m_ProjectionParameters.m_ProjectionWeights != nullptr,
-                         "UnidirectionalSequenceLstmLayer: m_ProjectionParameters.m_ProjectionWeights "
-                         "should not be null.");
+        if (!m_ProjectionParameters.m_ProjectionWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_ProjectionParameters.m_ProjectionWeights should not be null.");
+        }
     }
 
     if (m_Param.m_PeepholeEnabled)
     {
         if (!m_Param.m_CifgEnabled)
         {
-            ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToInputWeights != nullptr,
-                             "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToInputWeights "
-                             "should not be null "
-                             "when Peephole is enabled and CIFG is disabled.");
+            if (!m_PeepholeParameters.m_CellToInputWeights)
+            {
+                throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                      "m_PeepholeParameters.m_CellToInputWeights should not be null "
+                                                      "when Peephole is enabled and CIFG is disabled.");
+            }
         }
-        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToForgetWeights != nullptr,
-                         "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToForgetWeights "
-                         "should not be null.");
-        ARMNN_ASSERT_MSG(m_PeepholeParameters.m_CellToOutputWeights != nullptr,
-                         "UnidirectionalSequenceLstmLayer: m_PeepholeParameters.m_CellToOutputWeights "
-                         "should not be null.");
+
+        if (!m_PeepholeParameters.m_CellToForgetWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_PeepholeParameters.m_CellToForgetWeights should not be null.");
+        }
+
+        if (!m_PeepholeParameters.m_CellToOutputWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_PeepholeParameters.m_CellToOutputWeights should not be null.");
+        }
     }
 
     if (m_Param.m_LayerNormEnabled)
     {
         if(!m_Param.m_CifgEnabled)
         {
-            ARMNN_ASSERT_MSG(m_LayerNormParameters.m_InputLayerNormWeights != nullptr,
-                             "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_inputLayerNormWeights "
-                             "should not be null.");
+            if (!m_LayerNormParameters.m_InputLayerNormWeights)
+            {
+                throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                      "m_LayerNormParameters.m_inputLayerNormWeights "
+                                                      "should not be null.");
+            }
         }
-        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_ForgetLayerNormWeights != nullptr,
-                         "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_forgetLayerNormWeights "
-                         "should not be null.");
-        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_CellLayerNormWeights != nullptr,
-                         "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_cellLayerNormWeights "
-                         "should not be null.");
-        ARMNN_ASSERT_MSG(m_LayerNormParameters.m_OutputLayerNormWeights != nullptr,
-                         "UnidirectionalSequenceLstmLayer: m_LayerNormParameters.m_outputLayerNormWeights "
-                         "should not be null.");
+
+        if (!m_LayerNormParameters.m_ForgetLayerNormWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_LayerNormParameters.m_forgetLayerNormWeights "
+                                                  "should not be null.");
+        }
+
+        if (!m_LayerNormParameters.m_CellLayerNormWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_LayerNormParameters.m_cellLayerNormWeights "
+                                                  "should not be null.");
+        }
+
+        if (!m_LayerNormParameters.m_OutputLayerNormWeights)
+        {
+            throw armnn::LayerValidationException("UnidirectionalSequenceLstmLayer: "
+                                                  "m_LayerNormParameters.m_outputLayerNormWeights "
+                                                  "should not be null.");
+        }
     }
 
     ValidateAndCopyShape(outputShape, inferredShapes[0], m_ShapeInferenceMethod, "UnidirectionalSequenceLstmLayer");
diff --git a/src/armnn/layers/UnmapLayer.cpp b/src/armnn/layers/UnmapLayer.cpp
index cfbde21..a62536f 100644
--- a/src/armnn/layers/UnmapLayer.cpp
+++ b/src/armnn/layers/UnmapLayer.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020,2024 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 #include "UnmapLayer.hpp"
@@ -38,7 +38,11 @@
 {
     // validates that the input is connected.
     VerifyLayerConnections(1, CHECK_LOCATION());
-    ARMNN_ASSERT(GetNumOutputSlots() == 0);
+    if (GetNumOutputSlots() != 0)
+    {
+        throw armnn::LayerValidationException("Output slots must be \"0\" - currently \""
+                                              + std::to_string(GetNumOutputSlots()) + "\".");
+    }
 }
 
 void UnmapLayer::ExecuteStrategy(IStrategy& strategy) const