IVGCVSW-5962 Remove boost::multi_array

 * Replaced all instances of boost::multi_array with flat vectors.
 * Updated LayerTestResult struct with new member variables.
 * Updated CompareTensor function to compare flat vectors and the shape.
 * Removed MakeTensor function from TensorHelpers.hpp.
 * Removed GetTensorShapeAsArray function from LayerTestResult.hpp.
 * Removed boost::array usage.
 * Removed boost::extents usages.
 * Removed boost::random usages.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Iccde9d6640b534940292ff048fb80c00b38c4743
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
index 5fbec56..9946801 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToFp16TestImpl.cpp
@@ -22,14 +22,19 @@
     const armnn::TensorInfo inputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float32);
     const armnn::TensorInfo outputTensorInfo({1, 3, 2, 3}, armnn::DataType::Float16);
 
-    auto input = MakeTensor<float, 4>(inputTensorInfo,
-        { -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
-          1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f });
+    std::vector<float> input =
+        {
+            -37.5f, -15.2f, -8.76f, -2.0f, -1.5f, -1.3f, -0.5f, -0.4f, 0.0f,
+            1.0f, 0.4f, 0.5f, 1.3f, 1.5f, 2.0f, 8.76f, 15.2f, 37.5f
+        };
 
-    LayerTestResult<armnn::Half, 4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<armnn::Half, 4>(outputTensorInfo,
-        { -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
-          1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h });
+    std::vector<armnn::Half> expectedOutput =
+        {
+            -37.5_h, -15.2_h, -8.76_h, -2.0_h, -1.5_h, -1.3_h, -0.5_h, -0.4_h, 0.0_h,
+            1.0_h, 0.4_h, 0.5_h, 1.3_h, 1.5_h, 2.0_h, 8.76_h, 15.2_h, 37.5_h
+        };
+
+    std::vector<armnn::Half> actualOutput(outputTensorInfo.GetNumElements());
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -44,11 +49,14 @@
     inputHandle->Allocate();
     outputHandle->Allocate();
 
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle.get(), input.data());
 
     workload->Execute();
 
-    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
 
-    return ret;
+    return LayerTestResult<armnn::Half, 4>(actualOutput,
+                                           expectedOutput,
+                                           outputHandle->GetShape(),
+                                           outputTensorInfo.GetShape());
 }