IVGCVSW-6430 Clear up coverity issues

 * Removed unreachable code
    * break after if else where both branches return
 * Removed unused operations
    * Result of dstPtr++ is unused
 * Fixed possible overflow
    * Axis dimensionSequence assignment can result in overflow where rank
      is not 4
 * Removed use of old-style casts
 * Fixed spelling mistakes in error messages

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: If2a7ab63fc1d200cb18b494d99a67bbddb42f0f8
diff --git a/ConversionUtils.hpp b/ConversionUtils.hpp
index 1d182fa..8f7d5b9 100644
--- a/ConversionUtils.hpp
+++ b/ConversionUtils.hpp
@@ -1276,7 +1276,6 @@
                     Fail("%s: invalid operand tensor", __func__);
                     return LayerInputHandle();
                 }
-                break;
             }
             default:
             {
@@ -2139,7 +2138,7 @@
 
     if (inputShapes.size() != inputHandles.size())
     {
-        return Fail("%s: invalid model input shapes size doesn't match input handles sise: %i != %i", __func__,
+        return Fail("%s: invalid model input shapes size doesn't match input handles size: %i != %i", __func__,
                     inputShapes.size(), inputHandles.size());
     }
 
@@ -2258,13 +2257,13 @@
 
     if (static_cast<std::size_t>(numInputSlots) != inputHandles.size())
     {
-        return Fail("%s: invalid model input slots size doesn't match input handles sise: %i != %i", __func__,
+        return Fail("%s: invalid model input slots size doesn't match input handles size: %i != %i", __func__,
                     static_cast<std::size_t>(numInputSlots), inputHandles.size());
     }
     for (int i = 0; i < numInputSlots; ++i)
     {
         // connect the input directly to the merge (concat) layer
-        inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
+        inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(static_cast<unsigned int>(i)));
     }
 
     // Transpose the output shape
@@ -3019,7 +3018,7 @@
             {
                 return { nullptr, 0, armnn::TensorInfo(), DequantizeStatus::INVALID_OPERAND };
             }
-            *dstPtr++ = quantizedBuffer[i] * quantizationScale;
+            *dstPtr = quantizedBuffer[i] * quantizationScale;
         }
 
         // Construct tensor info for dequantized ConstTensor
@@ -3812,13 +3811,13 @@
     // if the operand index is out of bounds.
     const HalOperand* axisOperand = GetInputOperand<HalPolicy>(operation, 1, model, false);
 
-    const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
-
     std::vector<int32_t> axis;
     if (!axisOperand)
     {
-        axis.assign(dimensionSequence,
-                    dimensionSequence + rank);
+        for (unsigned int i = 0; i < rank; ++i)
+        {
+            axis.push_back(static_cast<unsigned int>(i));
+        }
     }
     else if (!GetTensorInt32Values<HalPolicy>(*axisOperand, axis, model, data))
     {
@@ -4260,7 +4259,8 @@
             return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
         }
 
-        paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
+        paddingList.emplace_back(static_cast<unsigned int>(paddingBeforeInput),
+                                 static_cast<unsigned int>(paddingAfterInput));
     }
 
     armnn::SpaceToBatchNdDescriptor descriptor;
diff --git a/ConversionUtils_1_2.hpp b/ConversionUtils_1_2.hpp
index acf787f..155fdf4 100644
--- a/ConversionUtils_1_2.hpp
+++ b/ConversionUtils_1_2.hpp
@@ -2396,7 +2396,7 @@
     IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
     if (!layer)
     {
-        return Fail("%s: Could not add the SpaceToDephLayer", __func__);
+        return Fail("%s: Could not add the SpaceToDepthLayer", __func__);
     }
     input.Connect(layer->GetInputSlot(0));