IVGCVSW-7404 Out of bounds detection

 * Added test to ensure that all inputs and outputs do not go out of
   bounds.

Signed-off-by: Mike Kelly <mike.kelly@arm.com>
Change-Id: Ia97e85f71e46cd2203306243e4dcbc23e0f29ec1
diff --git a/ArmnnPreparedModel_1_2.cpp b/ArmnnPreparedModel_1_2.cpp
index c54ee35..a401b30 100644
--- a/ArmnnPreparedModel_1_2.cpp
+++ b/ArmnnPreparedModel_1_2.cpp
@@ -1,5 +1,5 @@
 //
-// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
+// Copyright © 2017-2023 Arm Ltd and Contributors. All rights reserved.
 // SPDX-License-Identifier: MIT
 //
 
@@ -312,11 +312,20 @@
     for (unsigned int i = 0; i < request.inputs.size(); i++)
     {
         const auto& inputArg = request.inputs[i];
-
         armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
         // inputs (of type InputTensors) is composed of a vector of ConstTensors.
         // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
         inputTensorInfo.SetConstant();
+        auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+                                                                                inputTensorInfo,
+                                                                                inputArg,
+                                                                                "input");
+
+        if (result != V1_0::ErrorStatus::NONE)
+        {
+            return result;
+        }
+
         const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
 
         if (inputTensor.GetMemoryArea() == nullptr)
@@ -342,8 +351,17 @@
     for (unsigned int i = 0; i < request.outputs.size(); i++)
     {
         const auto& outputArg = request.outputs[i];
+        armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+        auto result = ValidateRequestArgument<V1_0::ErrorStatus, V1_0::Request>(request,
+                                                                                outputTensorInfo,
+                                                                                outputArg,
+                                                                                "output");
 
-        const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
+        if (result != V1_0::ErrorStatus::NONE)
+        {
+            return result;
+        }
+
         const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
         if (outputTensor.GetMemoryArea() == nullptr)
         {