blob: 00507e0c49cb06f3bc3506bdc8dfa2a9441fb158 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Sadik Armagana9c2ce12020-07-14 10:02:22 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Jan Eilers45274902020-10-15 18:34:43 +01006#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
7#include "ExecuteNetworkProgramOptions.hpp"
8
9#include <armnn/Logging.hpp>
10#include <Filesystem.hpp>
11#include <InferenceTest.hpp>
12
13#if defined(ARMNN_SERIALIZER)
14#include "armnnDeserializer/IDeserializer.hpp"
15#endif
16#if defined(ARMNN_CAFFE_PARSER)
17#include "armnnCaffeParser/ICaffeParser.hpp"
18#endif
19#if defined(ARMNN_TF_PARSER)
20#include "armnnTfParser/ITfParser.hpp"
21#endif
22#if defined(ARMNN_TF_LITE_PARSER)
23#include "armnnTfLiteParser/ITfLiteParser.hpp"
24#endif
25#if defined(ARMNN_ONNX_PARSER)
26#include "armnnOnnxParser/IOnnxParser.hpp"
27#endif
Sadik Armagan5d03e312020-11-17 16:43:56 +000028#if defined(ARMNN_TFLITE_DELEGATE)
29#include <armnn_delegate.hpp>
30#include <DelegateOptions.hpp>
31
32#include <tensorflow/lite/builtin_ops.h>
33#include <tensorflow/lite/c/builtin_op_data.h>
34#include <tensorflow/lite/c/common.h>
35#include <tensorflow/lite/optional_debug_tools.h>
36#include <tensorflow/lite/kernels/builtin_op_kernels.h>
37#include <tensorflow/lite/interpreter.h>
38#include <tensorflow/lite/kernels/register.h>
39#endif
Jan Eilers45274902020-10-15 18:34:43 +010040
41#include <future>
Sadik Armagan5d03e312020-11-17 16:43:56 +000042#if defined(ARMNN_TFLITE_DELEGATE)
43int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
44 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
45{
46 using namespace tflite;
Jan Eilers45274902020-10-15 18:34:43 +010047
Sadik Armagan5d03e312020-11-17 16:43:56 +000048 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str());
49
50 auto tfLiteInterpreter = std::make_unique<Interpreter>();
51 tflite::ops::builtin::BuiltinOpResolver resolver;
52
53 tflite::InterpreterBuilder builder(*model, resolver);
54 builder(&tfLiteInterpreter);
55 tfLiteInterpreter->AllocateTensors();
56
57 // Create the Armnn Delegate
58 armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
59 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
60 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
61 armnnDelegate::TfLiteArmnnDelegateDelete);
62 // Register armnn_delegate to TfLiteInterpreter
63 int status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
64
65 std::vector<std::string> inputBindings;
66 for (const std::string& inputName: params.m_InputNames)
67 {
68 inputBindings.push_back(inputName);
69 }
70
71 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData
72 ? armnn::EmptyOptional()
73 : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
74
75 const size_t numInputs = inputBindings.size();
76
77 for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
78 {
79 int input = tfLiteInterpreter->inputs()[inputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +000080 TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
81
82 long inputSize = 1;
83 for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
84 {
85 inputSize *= inputDims->data[dim];
86 }
87
Sadik Armagan5d03e312020-11-17 16:43:56 +000088 if (params.m_InputTypes[inputIndex].compare("float") == 0)
89 {
90 auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +000091
92 if(tfLiteInterpreter == NULL)
93 {
94 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
95 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
96 return EXIT_FAILURE;
97 }
98
Finn Williams56870182020-11-20 13:57:53 +000099 std::vector<float> tensorData;
100 PopulateTensorWithDataGeneric<float>(tensorData,
101 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
102 dataFile,
103 [](const std::string& s)
104 { return std::stof(s); });
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000105
Finn Williams56870182020-11-20 13:57:53 +0000106 std::copy(tensorData.begin(), tensorData.end(), inputData);
107 }
108 else if (params.m_InputTypes[inputIndex].compare("int8") == 0)
109 {
110 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000111
112 if(tfLiteInterpreter == NULL)
113 {
114 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
115 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
116 return EXIT_FAILURE;
117 }
118
Finn Williams56870182020-11-20 13:57:53 +0000119 std::vector<int8_t> tensorData;
120 PopulateTensorWithDataGeneric<int8_t>(tensorData,
121 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
122 dataFile,
123 [](const std::string& s)
124 { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
125
126 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000127 }
128 else if (params.m_InputTypes[inputIndex].compare("int") == 0)
129 {
130 auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000131
132 if(tfLiteInterpreter == NULL)
133 {
134 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
135 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
136 return EXIT_FAILURE;
137 }
138
Finn Williams56870182020-11-20 13:57:53 +0000139 std::vector<int32_t> tensorData;
140 PopulateTensorWithDataGeneric<int32_t>(tensorData,
141 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
142 dataFile,
143 [](const std::string& s)
144 { return std::stoi(s); });
145
146 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000147 }
148 else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
149 {
150 auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000151
152 if(tfLiteInterpreter == NULL)
153 {
154 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
155 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
156 return EXIT_FAILURE;
157 }
158
Finn Williams56870182020-11-20 13:57:53 +0000159 std::vector<uint8_t> tensorData;
160 PopulateTensorWithDataGeneric<uint8_t>(tensorData,
161 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
162 dataFile,
163 [](const std::string& s)
164 { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
165
166 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000167 }
168 else
169 {
170 ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
171 return EXIT_FAILURE;
172 }
173 }
174
175 for (size_t x = 0; x < params.m_Iterations; x++)
176 {
177 // Run the inference
178 tfLiteInterpreter->Invoke();
179
180 // Print out the output
181 for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
182 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000183 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000184 TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000185
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000186 long outputSize = 1;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000187 for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
188 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000189 outputSize *= outputDims->data[dim];
Sadik Armagan5d03e312020-11-17 16:43:56 +0000190 }
191
192 std::cout << params.m_OutputNames[outputIndex] << ": ";
193 if (params.m_OutputTypes[outputIndex].compare("float") == 0)
194 {
195 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000196 if(tfLiteDelageOutputData == NULL)
197 {
198 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
199 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
200 return EXIT_FAILURE;
201 }
202
203 for (int i = 0; i < outputSize; ++i)
204 {
205 std::cout << tfLiteDelageOutputData[i] << ", ";
206 if (i % 60 == 0)
207 {
208 std::cout << std::endl;
209 }
210 }
211 }
212 else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
213 {
214 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000215 if(tfLiteDelageOutputData == NULL)
216 {
217 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
218 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
219 return EXIT_FAILURE;
220 }
221
222 for (int i = 0; i < outputSize; ++i)
223 {
224 std::cout << tfLiteDelageOutputData[i] << ", ";
225 if (i % 60 == 0)
226 {
227 std::cout << std::endl;
228 }
229 }
230 }
Finn Williams56870182020-11-20 13:57:53 +0000231 else if (params.m_OutputTypes[outputIndex].compare("int8") == 0)
232 {
233 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
234 if(tfLiteDelageOutputData == NULL)
235 {
236 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
237 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
238 return EXIT_FAILURE;
239 }
240
241 for (int i = 0; i < outputSize; ++i)
242 {
243 std::cout << signed(tfLiteDelageOutputData[i]) << ", ";
244 if (i % 60 == 0)
245 {
246 std::cout << std::endl;
247 }
248 }
249 }
Sadik Armagan5d03e312020-11-17 16:43:56 +0000250 else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
251 {
252 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000253 if(tfLiteDelageOutputData == NULL)
254 {
255 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
256 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
257 return EXIT_FAILURE;
258 }
259
260 for (int i = 0; i < outputSize; ++i)
261 {
262 std::cout << unsigned(tfLiteDelageOutputData[i]) << ", ";
263 if (i % 60 == 0)
264 {
265 std::cout << std::endl;
266 }
267 }
268 }
269 else
270 {
271 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
272 "\"" << params.m_OutputTypes[outputIndex] <<
273 "\" may be incorrect. Output type can be specified with -z argument";
274 return EXIT_FAILURE;
275 }
276 std::cout << std::endl;
277 }
278 }
279
280 return status;
281}
282#endif
Jan Eilers45274902020-10-15 18:34:43 +0100283template<typename TParser, typename TDataType>
284int MainImpl(const ExecuteNetworkParams& params,
285 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
286{
287 using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
288
289 std::vector<TContainer> inputDataContainers;
290
291 try
292 {
293 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
294 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
295 inferenceModelParams.m_ModelPath = params.m_ModelPath;
296 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
297 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
298 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
299 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
300 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
301 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
302 inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
303 inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath;
304
305 for(const std::string& inputName: params.m_InputNames)
306 {
307 inferenceModelParams.m_InputBindings.push_back(inputName);
308 }
309
310 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
311 {
312 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
313 }
314
315 for(const std::string& outputName: params.m_OutputNames)
316 {
317 inferenceModelParams.m_OutputBindings.push_back(outputName);
318 }
319
320 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
321 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
322 inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
323
324 InferenceModel<TParser, TDataType> model(inferenceModelParams,
325 params.m_EnableProfiling,
326 params.m_DynamicBackendsPath,
327 runtime);
328
329 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
330 for(unsigned int i = 0; i < numInputs; ++i)
331 {
332 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
333 armnn::MakeOptional<QuantizationParams>(
334 model.GetInputQuantizationParams()) :
335 armnn::EmptyOptional();
336
337 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
338 armnn::EmptyOptional() :
339 armnn::MakeOptional<std::string>(
340 params.m_InputTensorDataFilePaths[i]);
341
342 unsigned int numElements = model.GetInputSize(i);
343 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
344 {
345 // If the user has provided a tensor shape for the current input,
346 // override numElements
347 numElements = params.m_InputTensorShapes[i]->GetNumElements();
348 }
349
350 TContainer tensorData;
351 PopulateTensorWithData(tensorData,
352 numElements,
353 params.m_InputTypes[i],
354 qParams,
355 dataFile);
356
357 inputDataContainers.push_back(tensorData);
358 }
359
360 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
361 std::vector<TContainer> outputDataContainers;
362
363 for (unsigned int i = 0; i < numOutputs; ++i)
364 {
365 if (params.m_OutputTypes[i].compare("float") == 0)
366 {
367 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
368 }
369 else if (params.m_OutputTypes[i].compare("int") == 0)
370 {
371 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
372 }
373 else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
374 {
375 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
376 }
377 else
378 {
379 ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
380 return EXIT_FAILURE;
381 }
382 }
383
384 for (size_t x = 0; x < params.m_Iterations; x++)
385 {
386 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
387 auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
388
389 if (params.m_GenerateTensorData)
390 {
391 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
392 }
393
394 // Print output tensors
395 const auto& infosOut = model.GetOutputBindingInfos();
396 for (size_t i = 0; i < numOutputs; i++)
397 {
398 const armnn::TensorInfo& infoOut = infosOut[i].second;
399 auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
400
401 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
402 infoOut,
403 outputTensorFile,
404 params.m_DequantizeOutput);
405 mapbox::util::apply_visitor(printer, outputDataContainers[i]);
406 }
407
408 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
409 << std::fixed << inference_duration.count() << " ms\n";
410
411 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
412 if (params.m_ThresholdTime != 0.0)
413 {
414 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
415 << std::fixed << params.m_ThresholdTime << " ms";
416 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
417 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
418 << std::fixed << thresholdMinusInference << " ms" << "\n";
419
420 if (thresholdMinusInference < 0)
421 {
422 std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
423 ARMNN_LOG(fatal) << errorMessage;
424 }
425 }
426 }
427 }
428 catch (const armnn::Exception& e)
429 {
430 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
431 return EXIT_FAILURE;
432 }
433
434 return EXIT_SUCCESS;
435}
436
telsoa01c577f2c2018-08-31 09:22:23 +0100437
James Conroy7b4886f2019-04-11 10:23:58 +0100438// MAIN
telsoa01c577f2c2018-08-31 09:22:23 +0100439int main(int argc, const char* argv[])
440{
441 // Configures logging for both the ARMNN library and this test program.
Jan Eilers45274902020-10-15 18:34:43 +0100442 #ifdef NDEBUG
telsoa01c577f2c2018-08-31 09:22:23 +0100443 armnn::LogSeverity level = armnn::LogSeverity::Info;
Jan Eilers45274902020-10-15 18:34:43 +0100444 #else
telsoa01c577f2c2018-08-31 09:22:23 +0100445 armnn::LogSeverity level = armnn::LogSeverity::Debug;
Jan Eilers45274902020-10-15 18:34:43 +0100446 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100447 armnn::ConfigureLogging(true, true, level);
telsoa01c577f2c2018-08-31 09:22:23 +0100448
telsoa01c577f2c2018-08-31 09:22:23 +0100449
Jan Eilers45274902020-10-15 18:34:43 +0100450 // Get ExecuteNetwork parameters and runtime options from command line
451 ProgramOptions ProgramOptions(argc, argv);
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000452
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100453 // Create runtime
Jan Eilers45274902020-10-15 18:34:43 +0100454 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions));
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100455
Jan Eilers45274902020-10-15 18:34:43 +0100456 std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat;
457
458 // Forward to implementation based on the parser type
459 if (modelFormat.find("armnn") != std::string::npos)
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100460 {
Jan Eilers45274902020-10-15 18:34:43 +0100461 #if defined(ARMNN_SERIALIZER)
462 return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime);
463 #else
464 ARMNN_LOG(fatal) << "Not built with serialization support.";
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100465 return EXIT_FAILURE;
Jan Eilers45274902020-10-15 18:34:43 +0100466 #endif
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100467 }
Jan Eilers45274902020-10-15 18:34:43 +0100468 else if (modelFormat.find("caffe") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100469 {
Jan Eilers45274902020-10-15 18:34:43 +0100470 #if defined(ARMNN_CAFFE_PARSER)
471 return MainImpl<armnnCaffeParser::ICaffeParser, float>(ProgramOptions.m_ExNetParams, runtime);
472 #else
473 ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
474 return EXIT_FAILURE;
475 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100476 }
Jan Eilers45274902020-10-15 18:34:43 +0100477 else if (modelFormat.find("onnx") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100478 {
Jan Eilers45274902020-10-15 18:34:43 +0100479 #if defined(ARMNN_ONNX_PARSER)
480 return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime);
481 #else
482 ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
483 return EXIT_FAILURE;
484 #endif
485 }
486 else if (modelFormat.find("tensorflow") != std::string::npos)
487 {
488 #if defined(ARMNN_TF_PARSER)
489 return MainImpl<armnnTfParser::ITfParser, float>(ProgramOptions.m_ExNetParams, runtime);
490 #else
491 ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
492 return EXIT_FAILURE;
493 #endif
494 }
495 else if(modelFormat.find("tflite") != std::string::npos)
496 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000497
498 if (ProgramOptions.m_ExNetParams.m_EnableDelegate)
499 {
500 #if defined(ARMNN_TF_LITE_DELEGATE)
501 return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
502 #else
Finn Williamsbbbefec2020-11-25 14:32:42 +0000503 ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support.";
Sadik Armagan5d03e312020-11-17 16:43:56 +0000504 return EXIT_FAILURE;
505 #endif
506 }
Jan Eilers45274902020-10-15 18:34:43 +0100507 #if defined(ARMNN_TF_LITE_PARSER)
508 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
509 #else
510 ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
511 return EXIT_FAILURE;
512 #endif
513 }
514 else
515 {
516 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
517 << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
518 return EXIT_FAILURE;
telsoa014fcda012018-03-09 14:13:49 +0000519 }
520}