blob: e3ca22e0ff18c27a521061d6a888acac94e1c4bd [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Sadik Armagana9c2ce12020-07-14 10:02:22 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Jan Eilers45274902020-10-15 18:34:43 +01006#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
7#include "ExecuteNetworkProgramOptions.hpp"
8
9#include <armnn/Logging.hpp>
10#include <Filesystem.hpp>
11#include <InferenceTest.hpp>
12
13#if defined(ARMNN_SERIALIZER)
14#include "armnnDeserializer/IDeserializer.hpp"
15#endif
16#if defined(ARMNN_CAFFE_PARSER)
17#include "armnnCaffeParser/ICaffeParser.hpp"
18#endif
19#if defined(ARMNN_TF_PARSER)
20#include "armnnTfParser/ITfParser.hpp"
21#endif
22#if defined(ARMNN_TF_LITE_PARSER)
23#include "armnnTfLiteParser/ITfLiteParser.hpp"
24#endif
25#if defined(ARMNN_ONNX_PARSER)
26#include "armnnOnnxParser/IOnnxParser.hpp"
27#endif
Sadik Armagan5d03e312020-11-17 16:43:56 +000028#if defined(ARMNN_TFLITE_DELEGATE)
29#include <armnn_delegate.hpp>
30#include <DelegateOptions.hpp>
31
32#include <tensorflow/lite/builtin_ops.h>
33#include <tensorflow/lite/c/builtin_op_data.h>
34#include <tensorflow/lite/c/common.h>
35#include <tensorflow/lite/optional_debug_tools.h>
36#include <tensorflow/lite/kernels/builtin_op_kernels.h>
37#include <tensorflow/lite/interpreter.h>
38#include <tensorflow/lite/kernels/register.h>
39#endif
Jan Eilers45274902020-10-15 18:34:43 +010040
41#include <future>
Sadik Armagan5d03e312020-11-17 16:43:56 +000042#if defined(ARMNN_TFLITE_DELEGATE)
43int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
44 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
45{
46 using namespace tflite;
Jan Eilers45274902020-10-15 18:34:43 +010047
Sadik Armagan5d03e312020-11-17 16:43:56 +000048 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str());
49
50 auto tfLiteInterpreter = std::make_unique<Interpreter>();
51 tflite::ops::builtin::BuiltinOpResolver resolver;
52
53 tflite::InterpreterBuilder builder(*model, resolver);
54 builder(&tfLiteInterpreter);
55 tfLiteInterpreter->AllocateTensors();
56
57 // Create the Armnn Delegate
58 armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
59 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
60 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
61 armnnDelegate::TfLiteArmnnDelegateDelete);
62 // Register armnn_delegate to TfLiteInterpreter
63 int status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
Sadik Armagan19a1c032021-01-20 12:17:00 +000064 if (status == kTfLiteError)
65 {
66 ARMNN_LOG(fatal) << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
67 return EXIT_FAILURE;
68 }
Sadik Armagan5d03e312020-11-17 16:43:56 +000069
70 std::vector<std::string> inputBindings;
71 for (const std::string& inputName: params.m_InputNames)
72 {
73 inputBindings.push_back(inputName);
74 }
75
76 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData
77 ? armnn::EmptyOptional()
78 : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
79
80 const size_t numInputs = inputBindings.size();
81
82 for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
83 {
84 int input = tfLiteInterpreter->inputs()[inputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +000085 TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
86
87 long inputSize = 1;
88 for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
89 {
90 inputSize *= inputDims->data[dim];
91 }
92
Sadik Armagan5d03e312020-11-17 16:43:56 +000093 if (params.m_InputTypes[inputIndex].compare("float") == 0)
94 {
95 auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +000096
Matthew Sloyanf00f6c22020-12-07 13:33:24 +000097 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +000098 {
99 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
100 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
101 return EXIT_FAILURE;
102 }
103
Finn Williams56870182020-11-20 13:57:53 +0000104 std::vector<float> tensorData;
105 PopulateTensorWithDataGeneric<float>(tensorData,
106 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
107 dataFile,
108 [](const std::string& s)
109 { return std::stof(s); });
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000110
Finn Williams56870182020-11-20 13:57:53 +0000111 std::copy(tensorData.begin(), tensorData.end(), inputData);
112 }
113 else if (params.m_InputTypes[inputIndex].compare("int8") == 0)
114 {
115 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000116
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000117 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000118 {
119 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
120 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
121 return EXIT_FAILURE;
122 }
123
Finn Williams56870182020-11-20 13:57:53 +0000124 std::vector<int8_t> tensorData;
125 PopulateTensorWithDataGeneric<int8_t>(tensorData,
126 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
127 dataFile,
128 [](const std::string& s)
129 { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
130
131 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000132 }
133 else if (params.m_InputTypes[inputIndex].compare("int") == 0)
134 {
135 auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000136
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000137 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000138 {
139 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
140 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
141 return EXIT_FAILURE;
142 }
143
Finn Williams56870182020-11-20 13:57:53 +0000144 std::vector<int32_t> tensorData;
145 PopulateTensorWithDataGeneric<int32_t>(tensorData,
146 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
147 dataFile,
148 [](const std::string& s)
149 { return std::stoi(s); });
150
151 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000152 }
153 else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
154 {
155 auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000156
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000157 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000158 {
159 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
160 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
161 return EXIT_FAILURE;
162 }
163
Finn Williams56870182020-11-20 13:57:53 +0000164 std::vector<uint8_t> tensorData;
165 PopulateTensorWithDataGeneric<uint8_t>(tensorData,
166 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
167 dataFile,
168 [](const std::string& s)
169 { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
170
171 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000172 }
173 else
174 {
175 ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
176 return EXIT_FAILURE;
177 }
178 }
179
180 for (size_t x = 0; x < params.m_Iterations; x++)
181 {
182 // Run the inference
183 tfLiteInterpreter->Invoke();
184
185 // Print out the output
186 for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
187 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000188 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000189 TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000190
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000191 long outputSize = 1;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000192 for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
193 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000194 outputSize *= outputDims->data[dim];
Sadik Armagan5d03e312020-11-17 16:43:56 +0000195 }
196
197 std::cout << params.m_OutputNames[outputIndex] << ": ";
198 if (params.m_OutputTypes[outputIndex].compare("float") == 0)
199 {
200 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000201 if(tfLiteDelageOutputData == NULL)
202 {
203 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
204 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
205 return EXIT_FAILURE;
206 }
207
208 for (int i = 0; i < outputSize; ++i)
209 {
210 std::cout << tfLiteDelageOutputData[i] << ", ";
211 if (i % 60 == 0)
212 {
213 std::cout << std::endl;
214 }
215 }
216 }
217 else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
218 {
219 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000220 if(tfLiteDelageOutputData == NULL)
221 {
222 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
223 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
224 return EXIT_FAILURE;
225 }
226
227 for (int i = 0; i < outputSize; ++i)
228 {
229 std::cout << tfLiteDelageOutputData[i] << ", ";
230 if (i % 60 == 0)
231 {
232 std::cout << std::endl;
233 }
234 }
235 }
Finn Williams56870182020-11-20 13:57:53 +0000236 else if (params.m_OutputTypes[outputIndex].compare("int8") == 0)
237 {
238 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
239 if(tfLiteDelageOutputData == NULL)
240 {
241 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
242 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
243 return EXIT_FAILURE;
244 }
245
246 for (int i = 0; i < outputSize; ++i)
247 {
248 std::cout << signed(tfLiteDelageOutputData[i]) << ", ";
249 if (i % 60 == 0)
250 {
251 std::cout << std::endl;
252 }
253 }
254 }
Sadik Armagan5d03e312020-11-17 16:43:56 +0000255 else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
256 {
257 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000258 if(tfLiteDelageOutputData == NULL)
259 {
260 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
261 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
262 return EXIT_FAILURE;
263 }
264
265 for (int i = 0; i < outputSize; ++i)
266 {
267 std::cout << unsigned(tfLiteDelageOutputData[i]) << ", ";
268 if (i % 60 == 0)
269 {
270 std::cout << std::endl;
271 }
272 }
273 }
274 else
275 {
276 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
277 "\"" << params.m_OutputTypes[outputIndex] <<
278 "\" may be incorrect. Output type can be specified with -z argument";
279 return EXIT_FAILURE;
280 }
281 std::cout << std::endl;
282 }
283 }
284
285 return status;
286}
287#endif
Jan Eilers45274902020-10-15 18:34:43 +0100288template<typename TParser, typename TDataType>
289int MainImpl(const ExecuteNetworkParams& params,
290 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
291{
292 using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
293
294 std::vector<TContainer> inputDataContainers;
295
296 try
297 {
298 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
299 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
300 inferenceModelParams.m_ModelPath = params.m_ModelPath;
301 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
302 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
303 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
304 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
305 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
306 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
307 inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
308 inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath;
Matthew Sloyan42432112021-01-08 10:30:51 +0000309 inferenceModelParams.m_SaveCachedNetwork = params.m_SaveCachedNetwork;
310 inferenceModelParams.m_CachedNetworkFilePath = params.m_CachedNetworkFilePath;
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000311 inferenceModelParams.m_NumberOfThreads = params.m_NumberOfThreads;
Finn Williams40646322021-02-11 16:16:42 +0000312 inferenceModelParams.m_MLGOTuningFilePath = params.m_MLGOTuningFilePath;
Jan Eilers45274902020-10-15 18:34:43 +0100313
314 for(const std::string& inputName: params.m_InputNames)
315 {
316 inferenceModelParams.m_InputBindings.push_back(inputName);
317 }
318
319 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
320 {
321 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
322 }
323
324 for(const std::string& outputName: params.m_OutputNames)
325 {
326 inferenceModelParams.m_OutputBindings.push_back(outputName);
327 }
328
329 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
330 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
331 inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
332
333 InferenceModel<TParser, TDataType> model(inferenceModelParams,
334 params.m_EnableProfiling,
335 params.m_DynamicBackendsPath,
336 runtime);
337
338 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
339 for(unsigned int i = 0; i < numInputs; ++i)
340 {
341 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
342 armnn::MakeOptional<QuantizationParams>(
343 model.GetInputQuantizationParams()) :
344 armnn::EmptyOptional();
345
346 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
347 armnn::EmptyOptional() :
348 armnn::MakeOptional<std::string>(
349 params.m_InputTensorDataFilePaths[i]);
350
351 unsigned int numElements = model.GetInputSize(i);
352 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
353 {
354 // If the user has provided a tensor shape for the current input,
355 // override numElements
356 numElements = params.m_InputTensorShapes[i]->GetNumElements();
357 }
358
359 TContainer tensorData;
360 PopulateTensorWithData(tensorData,
361 numElements,
362 params.m_InputTypes[i],
363 qParams,
364 dataFile);
365
366 inputDataContainers.push_back(tensorData);
367 }
368
369 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
370 std::vector<TContainer> outputDataContainers;
371
372 for (unsigned int i = 0; i < numOutputs; ++i)
373 {
374 if (params.m_OutputTypes[i].compare("float") == 0)
375 {
376 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
377 }
378 else if (params.m_OutputTypes[i].compare("int") == 0)
379 {
380 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
381 }
382 else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
383 {
384 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
385 }
386 else
387 {
388 ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
389 return EXIT_FAILURE;
390 }
391 }
392
393 for (size_t x = 0; x < params.m_Iterations; x++)
394 {
395 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
396 auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
397
398 if (params.m_GenerateTensorData)
399 {
400 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
401 }
402
403 // Print output tensors
404 const auto& infosOut = model.GetOutputBindingInfos();
405 for (size_t i = 0; i < numOutputs; i++)
406 {
407 const armnn::TensorInfo& infoOut = infosOut[i].second;
408 auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
409
410 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
411 infoOut,
412 outputTensorFile,
413 params.m_DequantizeOutput);
414 mapbox::util::apply_visitor(printer, outputDataContainers[i]);
415 }
416
417 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
418 << std::fixed << inference_duration.count() << " ms\n";
419
420 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
421 if (params.m_ThresholdTime != 0.0)
422 {
423 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
424 << std::fixed << params.m_ThresholdTime << " ms";
425 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
426 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
427 << std::fixed << thresholdMinusInference << " ms" << "\n";
428
429 if (thresholdMinusInference < 0)
430 {
431 std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
432 ARMNN_LOG(fatal) << errorMessage;
433 }
434 }
435 }
436 }
437 catch (const armnn::Exception& e)
438 {
439 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
440 return EXIT_FAILURE;
441 }
442
443 return EXIT_SUCCESS;
444}
445
telsoa01c577f2c2018-08-31 09:22:23 +0100446
James Conroy7b4886f2019-04-11 10:23:58 +0100447// MAIN
telsoa01c577f2c2018-08-31 09:22:23 +0100448int main(int argc, const char* argv[])
449{
450 // Configures logging for both the ARMNN library and this test program.
Jan Eilers45274902020-10-15 18:34:43 +0100451 #ifdef NDEBUG
telsoa01c577f2c2018-08-31 09:22:23 +0100452 armnn::LogSeverity level = armnn::LogSeverity::Info;
Jan Eilers45274902020-10-15 18:34:43 +0100453 #else
telsoa01c577f2c2018-08-31 09:22:23 +0100454 armnn::LogSeverity level = armnn::LogSeverity::Debug;
Jan Eilers45274902020-10-15 18:34:43 +0100455 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100456 armnn::ConfigureLogging(true, true, level);
telsoa01c577f2c2018-08-31 09:22:23 +0100457
telsoa01c577f2c2018-08-31 09:22:23 +0100458
Jan Eilers45274902020-10-15 18:34:43 +0100459 // Get ExecuteNetwork parameters and runtime options from command line
460 ProgramOptions ProgramOptions(argc, argv);
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000461
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100462 // Create runtime
Jan Eilers45274902020-10-15 18:34:43 +0100463 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions));
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100464
Jan Eilers45274902020-10-15 18:34:43 +0100465 std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat;
466
467 // Forward to implementation based on the parser type
468 if (modelFormat.find("armnn") != std::string::npos)
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100469 {
Jan Eilers45274902020-10-15 18:34:43 +0100470 #if defined(ARMNN_SERIALIZER)
471 return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime);
472 #else
473 ARMNN_LOG(fatal) << "Not built with serialization support.";
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100474 return EXIT_FAILURE;
Jan Eilers45274902020-10-15 18:34:43 +0100475 #endif
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100476 }
Jan Eilers45274902020-10-15 18:34:43 +0100477 else if (modelFormat.find("caffe") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100478 {
Jan Eilers45274902020-10-15 18:34:43 +0100479 #if defined(ARMNN_CAFFE_PARSER)
480 return MainImpl<armnnCaffeParser::ICaffeParser, float>(ProgramOptions.m_ExNetParams, runtime);
481 #else
482 ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
483 return EXIT_FAILURE;
484 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100485 }
Jan Eilers45274902020-10-15 18:34:43 +0100486 else if (modelFormat.find("onnx") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100487 {
Jan Eilers45274902020-10-15 18:34:43 +0100488 #if defined(ARMNN_ONNX_PARSER)
489 return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime);
490 #else
491 ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
492 return EXIT_FAILURE;
493 #endif
494 }
495 else if (modelFormat.find("tensorflow") != std::string::npos)
496 {
497 #if defined(ARMNN_TF_PARSER)
498 return MainImpl<armnnTfParser::ITfParser, float>(ProgramOptions.m_ExNetParams, runtime);
499 #else
500 ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
501 return EXIT_FAILURE;
502 #endif
503 }
504 else if(modelFormat.find("tflite") != std::string::npos)
505 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000506
507 if (ProgramOptions.m_ExNetParams.m_EnableDelegate)
508 {
509 #if defined(ARMNN_TF_LITE_DELEGATE)
510 return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
511 #else
Finn Williamsbbbefec2020-11-25 14:32:42 +0000512 ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support.";
Sadik Armagan5d03e312020-11-17 16:43:56 +0000513 return EXIT_FAILURE;
514 #endif
515 }
Jan Eilers45274902020-10-15 18:34:43 +0100516 #if defined(ARMNN_TF_LITE_PARSER)
517 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
518 #else
519 ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
520 return EXIT_FAILURE;
521 #endif
522 }
523 else
524 {
525 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
526 << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
527 return EXIT_FAILURE;
telsoa014fcda012018-03-09 14:13:49 +0000528 }
529}