Add BF16 support to reference model

* Upgrade Eigen to 3.4.0 (for bfloat16 support) and add work-
  arounds for reduce.any() and reduce.all() bugs (introduced
  between 3.3.7 and 3.4.0)
* Truncation to bfloat16 now performed in eval() methods

Signed-off-by: James Ward <james.ward@arm.com>
Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
Change-Id: If5f5c988d76d3d30790acf3b97081726b89205fe
diff --git a/reference_model/src/subgraph_traverser.cc b/reference_model/src/subgraph_traverser.cc
index ae216d8..112e641 100644
--- a/reference_model/src/subgraph_traverser.cc
+++ b/reference_model/src/subgraph_traverser.cc
@@ -15,6 +15,7 @@
 
 #include "subgraph_traverser.h"
 #include "tosa_model_types.h"
+#include "arith_util.h"
 
 #ifndef SUBGRAPH_ERROR_IF
 #define SUBGRAPH_ERROR_IF(COND, fmt, ...)                                                                              \
@@ -403,6 +404,16 @@
                     tensor->setTensorValueFloat(f16_data.size(), f16_data.data());
                 }
                 break;
+                case DType_BF16:
+                {
+                    std::vector<float> fp32_data;
+                    TosaSerializationHandler::ConvertU8toF32(ts->GetData(), tensor->getElementCount(), fp32_data);
+                    // Ensure valid bfloat16 stored in each float
+                    for (auto f : fp32_data)
+                        ASSERT_MSG(checkValidBFloat(f), "Float value %f not valid bfloat16", f);
+                    tensor->setTensorValueFloat(fp32_data.size(), fp32_data.data());
+                }
+                break;
                 case DType_FP32:
                 {
                     std::vector<float> fp32_data;