IVGCVSW-5962 Remove boost::multi_array

 * Replaced all instances of boost::multi_array with flat vectors.
 * Updated LayerTestResult struct with new member variables.
 * Updated CompareTensor function to compare flat vectors and the shape.
 * Removed MakeTensor function from TensorHelpers.hpp.
 * Removed GetTensorShapeAsArray function from LayerTestResult.hpp.
 * Removed boost::array usage.
 * Removed boost::extents usages.
 * Removed boost::random usages.

Signed-off-by: Matthew Sloyan <matthew.sloyan@arm.com>
Signed-off-by: Sadik Armagan <sadik.armagan@arm.com>
Change-Id: Iccde9d6640b534940292ff048fb80c00b38c4743
diff --git a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
index db83259..9ab3746 100644
--- a/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/ConvertFp32ToBf16TestImpl.cpp
@@ -20,8 +20,9 @@
     const armnn::TensorInfo inputTensorInfo({1, 2, 4, 3}, armnn::DataType::Float32);
     const armnn::TensorInfo outputTensorInfo({1, 2, 4, 3}, armnn::DataType::BFloat16);
 
-    auto input = MakeTensor<float, 4>(inputTensorInfo,
-        { -37.5f, -15.2f, -8.76f,
+    std::vector<float> input =
+        {
+          -37.5f, -15.2f, -8.76f,
           -2.0f, -1.5f, -1.3f,
           -0.5f, -0.4f, 0.0f,
           1.0f, 0.4f, 0.5f,
@@ -33,13 +34,13 @@
           -3.8f, // 0xC0733333 Round down
           -3.1055E+29f, // 0xF07ADC3C Round up
           -9.149516E-10f // 0xB07B7FFF Round down
-        });
+        };
 
-    std::vector<armnn::BFloat16> outputValues = armnnUtils::QuantizedVector<armnn::BFloat16>(
+    std::vector<armnn::BFloat16> expectedOutput = armnnUtils::QuantizedVector<armnn::BFloat16>(
         {
-            -37.5f, -15.2f, -8.76f,
-            -2.0f, -1.5f, -1.3f,
-            -0.5f, -0.4f, 0.0f,
+          -37.5f, -15.2f, -8.76f,
+          -2.0f, -1.5f, -1.3f,
+          -0.5f, -0.4f, 0.0f,
           1.0f, 0.4f, 0.5f,
           1.3f, 1.5f, 2.0f,
           8.76f, 15.2f, 37.5f,
@@ -52,8 +53,7 @@
         },
         1.0f, 0);
 
-    LayerTestResult<armnn::BFloat16, 4> ret(outputTensorInfo);
-    ret.outputExpected = MakeTensor<armnn::BFloat16, 4>(outputTensorInfo, outputValues);
+    std::vector<armnn::BFloat16> actualOutput(outputTensorInfo.GetNumElements());
 
     std::unique_ptr<armnn::ITensorHandle> inputHandle = tensorHandleFactory.CreateTensorHandle(inputTensorInfo);
     std::unique_ptr<armnn::ITensorHandle> outputHandle = tensorHandleFactory.CreateTensorHandle(outputTensorInfo);
@@ -68,11 +68,15 @@
     inputHandle->Allocate();
     outputHandle->Allocate();
 
-    CopyDataToITensorHandle(inputHandle.get(), &input[0][0][0][0]);
+    CopyDataToITensorHandle(inputHandle.get(), input.data());
 
     workload->Execute();
 
-    CopyDataFromITensorHandle(&ret.output[0][0][0][0], outputHandle.get());
+    CopyDataFromITensorHandle(actualOutput.data(), outputHandle.get());
 
-    return ret;
+    return LayerTestResult<armnn::BFloat16, 4>(actualOutput,
+                                               expectedOutput,
+                                               outputHandle->GetShape(),
+                                               outputTensorInfo.GetShape());
+
 }