Using ref kernels

Tensorflow reference kernels are bit exact and should be used by the
run_platform.py script to generate the expected OFM data.

Change-Id: I90e688e753e5330aaaf9002abed23df0493ff99b
diff --git a/scripts/run_platform.py b/scripts/run_platform.py
index 0600828..93ba7cd 100755
--- a/scripts/run_platform.py
+++ b/scripts/run_platform.py
@@ -29,8 +29,7 @@
 import sys
 
 os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
-from tensorflow.lite.python.interpreter import Interpreter
-
+from tensorflow.lite.python.interpreter import Interpreter, OpResolverType
 
 CORE_PLATFORM_PATH = pathlib.Path(__file__).resolve().parents[1]
 
@@ -50,11 +49,11 @@
 
     run_cmd(cmake_cmd)
 
-    make_cmd = ["make", "-C", build_folder, f"-j{multiprocessing.cpu_count()}"]
+    make_cmd = ["make", "-C", build_folder, f"-j{multiprocessing.cpu_count()}", "baremetal_custom"]
     run_cmd(make_cmd)
 
 def generate_reference_data(output_folder, non_optimized_model_path, input_path, expected_output_path):
-    interpreter = Interpreter(model_path=str(non_optimized_model_path.resolve()))
+    interpreter = Interpreter(model_path=str(non_optimized_model_path.resolve()), experimental_op_resolver_type=OpResolverType.BUILTIN_REF)
 
     interpreter.allocate_tensors()
     input_detail  = interpreter.get_input_details()[0]