Fix some memory overruns / undefined behaviour in ShapeInferenceTests

In several cases the address of a single float value on the stack was
passed as a pointer to the constructor of ScopedTensor (which needs
a backing-store of size equal to GetNumBytes()).

Replace by using a std::vector to explicitly create and initialize
the right number of elements.

Signed-off-by: Matthew Bentham <matthew.bentham@arm.com>
Change-Id: I8a1f4bf169bd89983f2d68047173ec901a21e1fb
diff --git a/src/armnn/test/ShapeInferenceTests.cpp b/src/armnn/test/ShapeInferenceTests.cpp
index 1035a3b..333d12a 100644
--- a/src/armnn/test/ShapeInferenceTests.cpp
+++ b/src/armnn/test/ShapeInferenceTests.cpp
@@ -239,8 +239,8 @@
     TensorShape outputShape{ 1, 1, 3, 3 };
     auto layer = BuildGraph<ConstantLayer>(&graph, {}, "constant");
 
-    const float Datum = 0.0f;
-    ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, &Datum);
+    std::vector<float> data(9, 0.0f);
+    ConstTensor output0({outputShape, DataType::Float32, 0.0f, 0, true}, data);
     layer->m_LayerOutput = std::make_unique<ScopedTensorHandle>(output0);
 
     layer->GetOutputSlot(0).SetTensorInfo({{1, 1, 3, 3}, DataType::Float32});
@@ -343,8 +343,8 @@
     descriptor.m_ScaleH = 5.0;
     descriptor.m_ScaleW = 5.0;
 
-    const float Datum = 0.0f;
-    ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, &Datum);
+    std::vector<float> data(9, 0.0f);
+    ConstTensor anchorsTensor({{1, 1, 3, 3}, DataType::Float32, 0.0f, 0, true}, data);
 
     Graph graph;
 
@@ -432,8 +432,8 @@
     Graph graph;
     auto layer = BuildGraph<LstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "lstm");
 
-    float Datum = 0.0f;
-    ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
+    std::vector<float> data(60, 0.0f);
+    ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data);
 
     layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
     layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
@@ -520,8 +520,8 @@
     Graph graph;
     auto layer = BuildGraph<QLstmLayer>(&graph, {inputShape, inputCellState, inputCellState}, descriptor, "qlstm");
 
-    float Datum = 0.0f;
-    ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
+    std::vector<float> data(60, 0.0f);
+    ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data);
 
     layer->m_BasicParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
     layer->m_BasicParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
@@ -548,8 +548,8 @@
     Graph graph;
     auto layer = BuildGraph<QuantizedLstmLayer>(&graph, {inputShape, inputCellState, inputCellState},  "quatizedlstm");
 
-    float Datum = 0.0f;
-    ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, &Datum);
+    std::vector<float> data(60, 0.0f);
+    ConstTensor constTensor({{ 2, 5, 3, 2 }, DataType::Float32, 0.0f, 0, true}, data);
 
     layer->m_QuantizedLstmParameters.m_InputToCellWeights = std::make_unique<ScopedTensorHandle>(constTensor);
     layer->m_QuantizedLstmParameters.m_InputToForgetWeights = std::make_unique<ScopedTensorHandle>(constTensor);
@@ -713,4 +713,4 @@
 }
 
 }
-}
\ No newline at end of file
+}