IVGCVSW-5829 Segfault in tflite-parser, int8 models

 * Updated ParseSplit TfLiteParser function to read correct axis data.
 * Improved validation in ParseSplit and ParseSplitV function.
 * Added TensorFlow BOOL support to TfLiteParser.
 * Added supported ElementWiseUnary operators to TfLiteParser
   E.g. ABS, LOGICAL_NOT and RSQRT.
 * Removed ParseExp and ParseNeg function implementation in favour
   of reusable ParseElementWiseUnary function.
 * Removed Exp.cpp and Neg.cpp files and moved tests to ElementWiseUnary.cpp.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Change-Id: Ibce36e3ce4d95755dda88abc2ddde1e07e62c5e2
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 5f8b08b..a68839c 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -394,6 +394,9 @@
         case tflite::TensorType_INT64:
             type = armnn::DataType::Signed64;
             break;
+        case tflite::TensorType_BOOL:
+            type = armnn::DataType::Boolean;
+            break;
         default:
         {
             CheckLocation location = CHECK_LOCATION();
@@ -603,6 +606,7 @@
 , m_ParserFunctions(tflite::BuiltinOperator_MAX+1, &TfLiteParserImpl::ParseUnsupportedOperator)
 {
     // register supported operators
+    m_ParserFunctions[tflite::BuiltinOperator_ABS]                     = &TfLiteParserImpl::ParseAbs;
     m_ParserFunctions[tflite::BuiltinOperator_ADD]                     = &TfLiteParserImpl::ParseAdd;
     m_ParserFunctions[tflite::BuiltinOperator_ARG_MIN]                 = &TfLiteParserImpl::ParseArgMin;
     m_ParserFunctions[tflite::BuiltinOperator_ARG_MAX]                 = &TfLiteParserImpl::ParseArgMax;
@@ -622,6 +626,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_GATHER]                  = &TfLiteParserImpl::ParseGather;
     m_ParserFunctions[tflite::BuiltinOperator_HARD_SWISH]              = &TfLiteParserImpl::ParseHardSwish;
     m_ParserFunctions[tflite::BuiltinOperator_LEAKY_RELU]              = &TfLiteParserImpl::ParseLeakyRelu;
+    m_ParserFunctions[tflite::BuiltinOperator_LOGICAL_NOT]             = &TfLiteParserImpl::ParseLogicalNot;
     m_ParserFunctions[tflite::BuiltinOperator_LOGISTIC]                = &TfLiteParserImpl::ParseLogistic;
     m_ParserFunctions[tflite::BuiltinOperator_L2_NORMALIZATION]        = &TfLiteParserImpl::ParseL2Normalization;
     m_ParserFunctions[tflite::BuiltinOperator_MAX_POOL_2D]             = &TfLiteParserImpl::ParseMaxPool2D;
@@ -640,6 +645,7 @@
     m_ParserFunctions[tflite::BuiltinOperator_RESHAPE]                 = &TfLiteParserImpl::ParseReshape;
     m_ParserFunctions[tflite::BuiltinOperator_RESIZE_BILINEAR]         = &TfLiteParserImpl::ParseResizeBilinear;
     m_ParserFunctions[tflite::BuiltinOperator_RESIZE_NEAREST_NEIGHBOR] = &TfLiteParserImpl::ParseResizeNearestNeighbor;
+    m_ParserFunctions[tflite::BuiltinOperator_RSQRT]                   = &TfLiteParserImpl::ParseRsqrt;
     m_ParserFunctions[tflite::BuiltinOperator_SLICE]                   = &TfLiteParserImpl::ParseSlice;
     m_ParserFunctions[tflite::BuiltinOperator_SOFTMAX]                 = &TfLiteParserImpl::ParseSoftmax;
     m_ParserFunctions[tflite::BuiltinOperator_SPACE_TO_BATCH_ND]       = &TfLiteParserImpl::ParseSpaceToBatchND;
@@ -1090,33 +1096,6 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
-void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
-{
-    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
-
-    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
-    CHECK_VALID_SIZE(inputs.size(), 1);
-
-    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
-    CHECK_VALID_SIZE(outputs.size(), 1);
-
-    auto layerName = fmt::format("Exp:{}:{}", subgraphIndex, operatorIndex);
-
-    ElementwiseUnaryDescriptor desc;
-    desc.m_Operation = UnaryOperation::Exp;
-    IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerName.c_str());
-    ARMNN_ASSERT(layer != nullptr);
-
-    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
-    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
-    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
-    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
-
-    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
-    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
-}
-
 void TfLiteParserImpl::ParseTranspose(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
@@ -1917,31 +1896,6 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, {outputTensorIndexes[0]});
 }
 
-void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
-{
-  CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
-
-  auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
-  CHECK_VALID_SIZE(inputs.size(), 1);
-
-  auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
-  CHECK_VALID_SIZE(outputs.size(), 1);
-
-  auto layerName = fmt::format("Neg:{}:{}", subgraphIndex, operatorIndex);
-  armnn::ElementwiseUnaryDescriptor descriptor(armnn::UnaryOperation::Neg);
-  IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(descriptor, layerName.c_str());
-  ARMNN_ASSERT(layer != nullptr);
-
-  TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
-  layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
-
-  auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
-  RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
-
-  auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
-  RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
-}
-
 void TfLiteParserImpl::ParsePad(size_t subgraphIndex, size_t operatorIndex)
 {
     CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
@@ -2758,15 +2712,35 @@
     auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
     CHECK_VALID_SIZE(outputs.size(), numSplits);
 
-    armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[1]);
-    armnn::TensorInfo axisTensorInfo = ToTensorInfo(inputs[0]);
+    armnn::TensorInfo inputTensorInfo = ToTensorInfo(inputs[1]);
+    armnn::TensorInfo axisTensorInfo  = ToTensorInfo(inputs[0]);
+    ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
 
     BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[0]->buffer);
-    std::vector<unsigned int> axisData(axisTensorInfo.GetNumElements());
-    ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
+    if (axisBufferPtr == nullptr)
+    {
+        throw ParseException(
+                fmt::format("Operation has invalid inputs. Failed to read axis. {}",
+                            CHECK_LOCATION().AsString()));
+    }
 
-    ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
-    const unsigned int splitDim = axisData[0];
+    std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
+    ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
+    int32_t axis = axisData[0];
+
+    auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+    if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+    {
+        // Square bracket denotes inclusive n while parenthesis denotes exclusive n
+        // E.g. Rank 4 tensor can have axis in range [-4, 3)
+        // -1 == 3, -2 == 2, -3 == 1, -4 == 0
+        throw ParseException(
+                fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
+                            axis,
+                            CHECK_LOCATION().AsString()));
+    }
+
+    const unsigned int splitDim = armnnUtils::GetUnsignedAxis(inputTensorInfo.GetNumDimensions(), axis);
 
     auto inputDimSize = inputTensorInfo.GetNumDimensions();
     if (inputDimSize > MaxNumOfTensorDimensions)
@@ -2863,9 +2837,29 @@
 
     // Get split axis
     BufferRawPtr axisBufferPtr = GetBuffer(m_Model, axisTensor->buffer);
+    if (axisBufferPtr == nullptr)
+    {
+        throw ParseException(
+                fmt::format("Operation has invalid inputs. Failed to read axis. {}",
+                            CHECK_LOCATION().AsString()));
+    }
+
     std::vector<int> axisData(axisTensorInfo.GetNumElements());
     ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
-    const unsigned int splitDim = ComputeWrappedIndex(axisData[0], inputTensorInfo.GetNumDimensions());
+    int32_t axis = axisData[0];
+
+    auto inputDimensions = static_cast<int32_t>(inputTensorInfo.GetNumDimensions());
+    if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
+    {
+        // Square bracket denotes inclusive n while parenthesis denotes exclusive n
+        // E.g. Rank 4 tensor can have axis in range [-4, 3)
+        // -1 == 3, -2 == 2, -3 == 1, -4 == 0
+        throw ParseException(
+                fmt::format("Operation has invalid axis: {}. Axis must be in range [-n, n) {}",
+                            axis,
+                            CHECK_LOCATION().AsString()));
+    }
+    const unsigned int splitDim = ComputeWrappedIndex(axis, inputTensorInfo.GetNumDimensions());
 
     // Set split sizes
     CHECK_VALID_SIZE(splitsInfo.GetNumDimensions(), 1);
@@ -2988,6 +2982,7 @@
     armnn::TensorInfo inputTensorInfo  = ToTensorInfo(inputs[0]);
     armnn::TensorInfo axisTensorInfo   = ToTensorInfo(inputs[1]);
     armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0]);
+    ARMNN_ASSERT(axisTensorInfo.GetNumElements() == 1);
 
     // Check if output tensor type is Signed32 or Signed64
     if (outputTensorInfo.GetDataType() != armnn::DataType::Signed32 &&
@@ -3210,6 +3205,59 @@
     RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
 }
 
+void TfLiteParserImpl::ParseAbs(size_t subgraphIndex, size_t operatorIndex)
+{
+    ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Abs);
+}
+
+void TfLiteParserImpl::ParseExp(size_t subgraphIndex, size_t operatorIndex)
+{
+    ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Exp);
+}
+
+void TfLiteParserImpl::ParseLogicalNot(size_t subgraphIndex, size_t operatorIndex)
+{
+    ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::LogicalNot);
+}
+
+void TfLiteParserImpl::ParseNeg(size_t subgraphIndex, size_t operatorIndex)
+{
+    ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Neg);
+}
+
+void TfLiteParserImpl::ParseRsqrt(size_t subgraphIndex, size_t operatorIndex)
+{
+    ParseElementwiseUnary(subgraphIndex, operatorIndex, armnn::UnaryOperation::Rsqrt);
+}
+
+void TfLiteParserImpl::ParseElementwiseUnary(size_t subgraphIndex, size_t operatorIndex, UnaryOperation unaryOperation)
+{
+    CHECK_MODEL(m_Model, subgraphIndex, operatorIndex);
+
+    auto inputs = GetInputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(inputs.size(), 1);
+
+    auto outputs = GetOutputs(m_Model, subgraphIndex, operatorIndex);
+    CHECK_VALID_SIZE(outputs.size(), 1);
+
+    std::string layerName = std::string(GetUnaryOperationAsCString(unaryOperation)) + ":{}:{}";
+    std::string layerNameFormatted = fmt::format(layerName, subgraphIndex, operatorIndex);
+
+    ElementwiseUnaryDescriptor desc;
+    desc.m_Operation = unaryOperation;
+    IConnectableLayer* layer = m_Network->AddElementwiseUnaryLayer(desc, layerNameFormatted.c_str());
+    ARMNN_ASSERT(layer != nullptr);
+
+    TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
+    layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+
+    auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
+
+    auto outputTensorIndexes = AsUnsignedVector(GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex));
+    RegisterOutputSlots(subgraphIndex, operatorIndex, layer, outputTensorIndexes);
+}
+
 armnn::IConnectableLayer* TfLiteParserImpl::AddFusedActivationLayer(armnn::IConnectableLayer* prevLayer,
                                                                     unsigned int outputSlot,
                                                                     tflite::ActivationFunctionType activationType)