This application allow to benchmark tflite models by providing average inference time.

Usage: armnn_tfl_benchmark -m <model .tflite>
-m --model_file <.tflite file path>:  .tflite model to be executed
-b --backend <device>:                preferred backend device to run
                                      layers on by default. Possible
                                      choices: CpuAcc, CpuRef
-l --loops <int>:                     provide the number of time the
                                      inference will be executed
                                      (by default nb_loops=1)

Signed-off-by: Vincent ABRIOU <vincent.abriou@st.com>
Signed-off-by: Jim Flynn <jim.flynn@arm.com>
Change-Id: Ia26fafd4f382f0ad03856436dcae6e71b5abbd26
diff --git a/tests/CMakeLists.txt b/tests/CMakeLists.txt
index edea34d..135f649 100644
--- a/tests/CMakeLists.txt
+++ b/tests/CMakeLists.txt
@@ -137,6 +137,10 @@
         addDllCopyCommands(${testName})
     endmacro()
 
+    set(TfLiteBenchmark-Armnn_sources
+        TfLiteBenchmark-Armnn/TfLiteBenchmark-Armnn.cpp)
+    TfLiteParserTest(TfLiteBenchmark-Armnn "${TfLiteBenchmark-Armnn_sources}")
+
     set(TfLiteMobilenetQuantized-Armnn_sources
         TfLiteMobilenetQuantized-Armnn/TfLiteMobilenetQuantized-Armnn.cpp
         ImagePreprocessor.hpp