IVGCVSW-7404 Out of bounds detection

 * Added test to ensure that all inputs and outputs do not go out of
   bounds.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ia97e85f71e46cd2203306243e4dcbc23e0f29ec1
diff --git a/ArmnnPreparedModel.cpp b/ArmnnPreparedModel.cpp
index 4174043..d87f9f8 100644
--- a/ArmnnPreparedModel.cpp
+++ b/ArmnnPreparedModel.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -218,6 +218,7 @@
         NotifyCallbackAndCheck(callback, V1_0::ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel::execute");
         return V1_0::ErrorStatus::GENERAL_FAILURE;
     }
+
     // add the inputs and outputs with their data
     try
     {
@@ -225,11 +226,19 @@
         for (unsigned int i = 0; i < request.inputs.size(); i++)
         {
             const auto& inputArg = request.inputs[i];
-
             armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
             // pInputTensors (of type InputTensors) is composed of a vector of ConstTensors.
             // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
             inputTensorInfo.SetConstant();
+            auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+                                                                                    inputTensorInfo,
+                                                                                    inputArg,
+                                                                                    "input");
+            if (result != V1_0::ErrorStatus::NONE)
+            {
+                return result;
+            }
+
             const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, *pMemPools);
             if (inputTensor.GetMemoryArea() == nullptr)
             {
@@ -244,8 +253,17 @@
         for (unsigned int i = 0; i < request.outputs.size(); i++)
         {
             const auto& outputArg = request.outputs[i];
-
             const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+            auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+                                                                                    outputTensorInfo,
+                                                                                    outputArg,
+                                                                                    "output");
+
+            if (result != V1_0::ErrorStatus::NONE)
+            {
+                return result;
+            }
+
             const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, *pMemPools);
             if (outputTensor.GetMemoryArea() == nullptr)
             {
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index c54ee35..a401b30 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -312,11 +312,20 @@
     for (unsigned int i = 0; i < request.inputs.size(); i++)
     {
         const auto& inputArg = request.inputs[i];
-
         armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
         // inputs (of type InputTensors) is composed of a vector of ConstTensors.
         // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
         inputTensorInfo.SetConstant();
+        auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+                                                                                inputTensorInfo,
+                                                                                inputArg,
+                                                                                "input");
+
+        if (result != V1_0::ErrorStatus::NONE)
+        {
+            return result;
+        }
+
         const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
 
         if (inputTensor.GetMemoryArea() == nullptr)
@@ -342,8 +351,17 @@
     for (unsigned int i = 0; i < request.outputs.size(); i++)
     {
         const auto& outputArg = request.outputs[i];
+        armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+        auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+                                                                                outputTensorInfo,
+                                                                                outputArg,
+                                                                                "output");
 
-        const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+        if (result != V1_0::ErrorStatus::NONE)
+        {
+            return result;
+        }
+
         const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
         if (outputTensor.GetMemoryArea() == nullptr)
         {
diff --git a/ArmnnPreparedModel_1_3.cpp b/ArmnnPreparedModel_1_3.cpp
index 20b49f5..ceeb3c4 100644
--- a/ArmnnPreparedModel_1_3.cpp
+++ b/ArmnnPreparedModel_1_3.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2020-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 // Note: the ArmnnFencedExecutionCallback and code snippet in the executeFenced() function
@@ -510,11 +510,20 @@
     for (unsigned int i = 0; i < request.inputs.size(); i++)
     {
         const auto& inputArg = request.inputs[i];
-
         armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
         // inputs (of type InputTensors) is composed of a vector of ConstTensors.
         // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
         inputTensorInfo.SetConstant();
+        auto result = ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(request,
+                                                                                inputTensorInfo,
+                                                                                inputArg,
+                                                                                "input");
+
+        if (result != V1_3::ErrorStatus::NONE)
+        {
+            return result;
+        }
+
         const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
 
         if (inputTensor.GetMemoryArea() == nullptr)
@@ -540,15 +549,24 @@
     for (unsigned int i = 0; i < request.outputs.size(); i++)
     {
         const auto& outputArg = request.outputs[i];
-
         armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+        auto result = ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(request,
+                                                                                outputTensorInfo,
+                                                                                outputArg,
+                                                                                "output");
+
+        if (result != V1_3::ErrorStatus::NONE)
+        {
+            return result;
+        }
+
         const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
+
         if (outputTensor.GetMemoryArea() == nullptr)
         {
             ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
             return V1_3::ErrorStatus::GENERAL_FAILURE;
         }
-
         const size_t outputSize = outputTensorInfo.GetNumBytes();
 
         unsigned int count = 0;
diff --git a/Utils.cpp b/Utils.cpp
index 884bed0..13eb84d 100644
--- a/Utils.cpp
+++ b/Utils.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -767,4 +767,67 @@
 #endif
     }
 }
+
+size_t GetSize(const V1_0::Request& request, const V1_0::RequestArgument& requestArgument)
+{
+    return request.pools[requestArgument.location.poolIndex].size();
+}
+
+#ifdef ARMNN_ANDROID_NN_V1_3
+size_t GetSize(const V1_3::Request& request, const V1_0::RequestArgument& requestArgument)
+{
+    if (request.pools[requestArgument.location.poolIndex].getDiscriminator() ==
+        V1_3::Request::MemoryPool::hidl_discriminator::hidlMemory)
+    {
+        return request.pools[requestArgument.location.poolIndex].hidlMemory().size();
+    }
+    else
+    {
+        return 0;
+    }
+}
+#endif
+
+template <typename ErrorStatus, typename Request>
+ErrorStatus ValidateRequestArgument(const Request& request,
+                                    const armnn::TensorInfo& tensorInfo,
+                                    const V1_0::RequestArgument& requestArgument,
+                                    std::string descString)
+{
+    if (requestArgument.location.poolIndex >= request.pools.size())
+    {
+        std::string err = fmt::format("Invalid {} pool at index {} the pool index is greater than the number "
+                                      "of available pools {}",
+                                      descString, requestArgument.location.poolIndex, request.pools.size());
+        ALOGE(err.c_str());
+        return ErrorStatus::GENERAL_FAILURE;
+    }
+    const size_t size = GetSize(request, requestArgument);
+    size_t totalLength = tensorInfo.GetNumBytes();
+
+    if (static_cast<size_t>(requestArgument.location.offset) + totalLength > size)
+    {
+        std::string err = fmt::format("Invalid {} pool at index {} the offset {} and length {} are greater "
+                                      "than the pool size {}", descString, requestArgument.location.poolIndex,
+                                      requestArgument.location.offset, totalLength, size);
+        ALOGE(err.c_str());
+        return ErrorStatus::GENERAL_FAILURE;
+    }
+    return ErrorStatus::NONE;
+}
+
+template V1_0::ErrorStatus ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(
+        const V1_0::Request& request,
+        const armnn::TensorInfo& tensorInfo,
+        const V1_0::RequestArgument& requestArgument,
+        std::string descString);
+
+#ifdef ARMNN_ANDROID_NN_V1_3
+template V1_3::ErrorStatus ValidateRequestArgument<V1_3::ErrorStatus, V1_3::Request>(
+        const V1_3::Request& request,
+        const armnn::TensorInfo& tensorInfo,
+        const V1_0::RequestArgument& requestArgument,
+        std::string descString);
+#endif
+
 } // namespace armnn_driver
diff --git a/Utils.hpp b/Utils.hpp
index 6e733a2..81be984 100644
--- a/Utils.hpp
+++ b/Utils.hpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd. All rights reserved.
+// Copyright © 2017-2021,2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -11,6 +11,8 @@
 #include <NeuralNetworks.h>
 #include <Utils.h>
 
+#include <fmt/format.h>
+
 #include <vector>
 #include <string>
 #include <fstream>
@@ -194,4 +196,9 @@
 
 void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);
 
+template <typename ErrorStatus, typename Request>
+ErrorStatus ValidateRequestArgument(const Request& request,
+                                    const armnn::TensorInfo& tensorInfo,
+                                    const V1_0::RequestArgument& requestArgument,
+                                    std::string descString);
 } // namespace armnn_driver