IVGCVSW-7855 removed ASSERTs from shim code

Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Change-Id: I8b34e74800ebdb43e1b3f996eacd6c3360a331eb
diff --git a/shim/sl/canonical/CanonicalUtils.cpp b/shim/sl/canonical/CanonicalUtils.cpp
index 08a728c..5afd8bd 100644
--- a/shim/sl/canonical/CanonicalUtils.cpp
+++ b/shim/sl/canonical/CanonicalUtils.cpp
@@ -366,7 +366,10 @@
         return;
     }
 
-    ARMNN_ASSERT(profiler);
+    if (profiler == nullptr)
+    {
+        throw armnn::InvalidArgumentException("DumpJsonProfilingIfRequired: pointer to profiler handed in is null");
+    }
 
     // Set the name of the output profiling file.
     fs::path dumpPath = dumpDir;
@@ -499,7 +502,7 @@
     return !tensorInfo.GetShape().AreAllDimensionsSpecified();
 }
 
-bool AreDynamicTensorsSupported() 
+bool AreDynamicTensorsSupported()
 {
     return true;
 }
diff --git a/shim/sl/canonical/ConversionUtils.cpp b/shim/sl/canonical/ConversionUtils.cpp
index 8c31a92..ebe3bc4 100644
--- a/shim/sl/canonical/ConversionUtils.cpp
+++ b/shim/sl/canonical/ConversionUtils.cpp
@@ -4,6 +4,7 @@
 //
 
 #include "ConversionUtils.hpp"
+#include <armnn/Exceptions.hpp>
 #include <armnnUtils/Permute.hpp>
 
 ///
@@ -31,7 +32,10 @@
 
 void LayerInputHandle::Connect(armnn::IInputSlot& inputSlot)
 {
-    ARMNN_ASSERT(IsValid());
+    if (!IsValid())
+    {
+        throw armnn::Exception("cannot invoke Connect on an invalid LayerInputHandle");
+    }
     if (m_OutputSlot)
     {
         m_OutputSlot->Connect(inputSlot);
@@ -40,7 +44,10 @@
 
 void LayerInputHandle::Disconnect(armnn::IInputSlot& inputSlot)
 {
-    ARMNN_ASSERT(IsValid());
+    if (!IsValid())
+    {
+        throw armnn::Exception("cannot invoke Disconnect on an invalid LayerInputHandle");
+    }
     if (m_OutputSlot)
     {
         m_OutputSlot->Disconnect(inputSlot);
@@ -643,8 +650,11 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
+    if (layer == nullptr)
+    {
+        throw armnn::NullPointerException("failed to add activation layer to network");
+    }
     layer->SetBackendId(setBackend);
-    ARMNN_ASSERT(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
     return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
@@ -692,7 +702,10 @@
         }
 
         const Operand* operand = GetInputOperand(operationIt, 0, model);
-        ARMNN_ASSERT(operand);
+        if (operand == nullptr)
+        {
+            throw armnn::Exception("failed to get input operand 0");
+        }
 
         if (!IsQSymm8(*operand))
         {
@@ -716,7 +729,10 @@
         for (size_t i = 0; i < dequantizedBufferLength; ++i)
         {
             float* dstPtr = dequantizedBuffer.get();
-            ARMNN_ASSERT(dstPtr);
+            if (dstPtr == nullptr)
+            {
+                throw armnn::NullPointerException("dequantizedBuffer unique pointer is null");
+            }
             *dstPtr++ = quantizedBuffer[i] * quantizationScale;
         }
 
@@ -892,7 +908,10 @@
                                             armnn::IConnectableLayer* prevLayer,
                                             ConversionData& data)
 {
-    ARMNN_ASSERT(prevLayer->GetNumOutputSlots() == 1);
+    if (prevLayer->GetNumOutputSlots() != 1)
+    {
+        throw armnn::Exception("ProcessActivation: previous layer does not have a single output slot");
+    }
 
     prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
 
diff --git a/shim/sl/canonical/ConversionUtils.hpp b/shim/sl/canonical/ConversionUtils.hpp
index d4b4d92..6ee3dbc 100644
--- a/shim/sl/canonical/ConversionUtils.hpp
+++ b/shim/sl/canonical/ConversionUtils.hpp
@@ -9,6 +9,7 @@
 
 #include <armnn/ArmNN.hpp>
 #include <armnn/BackendHelper.hpp>
+#include <armnn/Exceptions.hpp>
 #include <armnn/utility/Assert.hpp>
 #include <armnn/utility/IgnoreUnused.hpp>
 #include <armnn/utility/NumericCast.hpp>
@@ -23,6 +24,7 @@
 #include <armnnUtils/FloatingPointComparison.hpp>
 
 #include <log/log.h>
+#include <sstream>
 #include <vector>
 
 inline const android::nn::Model::Subgraph& getMainModel(const android::nn::Model& model) { return model.main; }
@@ -233,7 +235,10 @@
     reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
 
     armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
-    ARMNN_ASSERT(reshapeLayer != nullptr);
+    if (reshapeLayer == nullptr)
+    {
+        throw armnn::Exception("failed to add reshape layer to network");
+    }
 
     // Attach the input layer to the reshape layer
     inputLayer.Connect(reshapeLayer->GetInputSlot(0));
@@ -280,7 +285,10 @@
                      armnn::IConnectableLayer* startLayer,
                      ConversionData& data)
 {
-    ARMNN_ASSERT(startLayer != nullptr);
+    if (startLayer == nullptr)
+    {
+        throw armnn::InvalidArgumentException("BroadcastTensor: startLayer pointer handed in is null");
+    }
 
     const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
     const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
@@ -337,7 +345,11 @@
         return false;
     }
 
-    ARMNN_ASSERT(data.m_Network != nullptr);
+    if (data.m_Network == nullptr)
+    {
+        throw armnn::InvalidArgumentException(
+            "BroadcastTensor: the conversion data handed in has a null network pointer");
+    }
     armnn::IConnectableLayer& reshapeLayer = AddReshapeLayer(*data.m_Network, smallInputHandle, reshapedInfo);
     reshapeLayer.SetBackendId(setBackend);
 
@@ -468,7 +480,10 @@
     // Add swizzle layer
     armnn::IConnectableLayer* const layer = network.AddTransposeLayer(mappings);
 
-    ARMNN_ASSERT(layer != nullptr);
+    if (layer == nullptr)
+    {
+        throw armnn::Exception("failed to add transpose layer to network");
+    }
 
     // Connect input to swizzle layer
     input.Connect(layer->GetInputSlot(0));
@@ -596,7 +611,11 @@
                                        std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
 {
     bool needPermute = false;
-    ARMNN_ASSERT(numberOfDimensions >= 3);
+    if (numberOfDimensions < 3)
+    {
+        throw armnn::InvalidArgumentException(
+            "CreateConcatPermutationParameters: numberOfDimensions handed in cannot be less than three");
+    }
 
     // ArmNN uses Compute Library subtensors to perform concatenation
     // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
@@ -655,7 +674,14 @@
     }
 
     // Model should have been validated beforehand
-    ARMNN_ASSERT(operation.inputs[inputIndex] < getMainModel(model).operands.size());
+    if (!(operation.inputs[inputIndex] < getMainModel(model).operands.size()))
+    {
+        std::ostringstream os;
+        os << "GetInputOperand: inputIndex [" << inputIndex << "]";
+        os << " is too large. The number of main model operands is [";
+        os <<  getMainModel(model).operands.size() << "]";
+        throw armnn::InvalidArgumentException(os.str());
+    }
     return &getMainModel(model).operands[operation.inputs[inputIndex]];
 }
 
@@ -670,7 +696,14 @@
     }
 
     // Model should have been validated beforehand
-    ARMNN_ASSERT(operation.outputs[outputIndex] < getMainModel(model).operands.size());
+    if (!(operation.outputs[outputIndex] < getMainModel(model).operands.size()))
+    {
+        std::ostringstream os;
+        os << "GetOutputOperand: outputIndex [" << outputIndex << "]";
+        os << " is too large. The number of main model operands is [";
+        os <<  getMainModel(model).operands.size() << "]";
+        throw armnn::InvalidArgumentException(os.str());
+    }
 
     return &getMainModel(model).operands[operation.outputs[outputIndex]];
 }
diff --git a/shim/sl/canonical/Converter.cpp b/shim/sl/canonical/Converter.cpp
index a00a0af..4ef29a1 100644
--- a/shim/sl/canonical/Converter.cpp
+++ b/shim/sl/canonical/Converter.cpp
@@ -5,6 +5,7 @@
 
 #include "Converter.hpp"
 #include <half/half.hpp>
+#include <armnn/Exceptions.hpp>
 #include <armnnUtils/TensorUtils.hpp>
 
 namespace armnn_driver
@@ -890,7 +891,11 @@
         if (isDynamicTensor)
         {
             // Infer the output shapes of concat if outputs are type 1 dynamic
-            ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
+            if (!layer->GetOutputSlot(0).IsTensorInfoSet())
+            {
+                throw armnn::Exception(
+                    "tensor info is not set on output slot, cannot process dynamic tensor after input reshape");
+            }
             if (!ValidateConcatOutputShape(inputShapes,
                                            layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
                                            concatDim))
@@ -4534,8 +4539,11 @@
     }
 
     armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
+    if (layer == nullptr)
+    {
+        throw armnn::NullPointerException("failed to add Activation Layer to network");
+    }
     layer->SetBackendId(setBackend);
-    ARMNN_ASSERT(layer != nullptr);
     input.Connect(layer->GetInputSlot(0));
 
     return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);