blob: 8ab286b16b501d7b5d1aabae2e778d70d5927c84 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Sadik Armagana9c2ce12020-07-14 10:02:22 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Jan Eilers45274902020-10-15 18:34:43 +01006#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
7#include "ExecuteNetworkProgramOptions.hpp"
8
9#include <armnn/Logging.hpp>
10#include <Filesystem.hpp>
11#include <InferenceTest.hpp>
12
13#if defined(ARMNN_SERIALIZER)
14#include "armnnDeserializer/IDeserializer.hpp"
15#endif
Jan Eilers45274902020-10-15 18:34:43 +010016#if defined(ARMNN_TF_PARSER)
17#include "armnnTfParser/ITfParser.hpp"
18#endif
19#if defined(ARMNN_TF_LITE_PARSER)
20#include "armnnTfLiteParser/ITfLiteParser.hpp"
21#endif
22#if defined(ARMNN_ONNX_PARSER)
23#include "armnnOnnxParser/IOnnxParser.hpp"
24#endif
Sadik Armagan5d03e312020-11-17 16:43:56 +000025#if defined(ARMNN_TFLITE_DELEGATE)
26#include <armnn_delegate.hpp>
27#include <DelegateOptions.hpp>
28
29#include <tensorflow/lite/builtin_ops.h>
30#include <tensorflow/lite/c/builtin_op_data.h>
31#include <tensorflow/lite/c/common.h>
32#include <tensorflow/lite/optional_debug_tools.h>
33#include <tensorflow/lite/kernels/builtin_op_kernels.h>
34#include <tensorflow/lite/interpreter.h>
35#include <tensorflow/lite/kernels/register.h>
36#endif
Jan Eilers45274902020-10-15 18:34:43 +010037
38#include <future>
Sadik Armagan5d03e312020-11-17 16:43:56 +000039#if defined(ARMNN_TFLITE_DELEGATE)
40int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
41 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
42{
43 using namespace tflite;
Jan Eilers45274902020-10-15 18:34:43 +010044
Sadik Armagan5d03e312020-11-17 16:43:56 +000045 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str());
46
47 auto tfLiteInterpreter = std::make_unique<Interpreter>();
48 tflite::ops::builtin::BuiltinOpResolver resolver;
49
50 tflite::InterpreterBuilder builder(*model, resolver);
51 builder(&tfLiteInterpreter);
52 tfLiteInterpreter->AllocateTensors();
53
Finn Williamsf806c4d2021-02-22 15:13:12 +000054 int status = 0;
55 if (params.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate)
Sadik Armagan19a1c032021-01-20 12:17:00 +000056 {
Finn Williamsf806c4d2021-02-22 15:13:12 +000057 // Create the Armnn Delegate
58 armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
59 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
60 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
61 armnnDelegate::TfLiteArmnnDelegateDelete);
62 // Register armnn_delegate to TfLiteInterpreter
63 status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
64 if (status == kTfLiteError)
65 {
66 ARMNN_LOG(fatal) << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
67 return EXIT_FAILURE;
68 }
Sadik Armagan19a1c032021-01-20 12:17:00 +000069 }
Finn Williamsf806c4d2021-02-22 15:13:12 +000070 else
71 {
72 std::cout << "Running on TfLite without ArmNN delegate\n";
73 }
74
Sadik Armagan5d03e312020-11-17 16:43:56 +000075
76 std::vector<std::string> inputBindings;
77 for (const std::string& inputName: params.m_InputNames)
78 {
79 inputBindings.push_back(inputName);
80 }
81
82 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData
83 ? armnn::EmptyOptional()
84 : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
85
86 const size_t numInputs = inputBindings.size();
87
88 for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
89 {
90 int input = tfLiteInterpreter->inputs()[inputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +000091 TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
92
93 long inputSize = 1;
94 for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
95 {
96 inputSize *= inputDims->data[dim];
97 }
98
Sadik Armagan5d03e312020-11-17 16:43:56 +000099 if (params.m_InputTypes[inputIndex].compare("float") == 0)
100 {
101 auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000102
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000103 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000104 {
105 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
106 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
107 return EXIT_FAILURE;
108 }
109
Finn Williams56870182020-11-20 13:57:53 +0000110 std::vector<float> tensorData;
111 PopulateTensorWithDataGeneric<float>(tensorData,
112 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
113 dataFile,
114 [](const std::string& s)
115 { return std::stof(s); });
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000116
Finn Williams56870182020-11-20 13:57:53 +0000117 std::copy(tensorData.begin(), tensorData.end(), inputData);
118 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000119 else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0)
Finn Williams56870182020-11-20 13:57:53 +0000120 {
121 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000122
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000123 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000124 {
125 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
126 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
127 return EXIT_FAILURE;
128 }
129
Finn Williams56870182020-11-20 13:57:53 +0000130 std::vector<int8_t> tensorData;
131 PopulateTensorWithDataGeneric<int8_t>(tensorData,
132 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
133 dataFile,
134 [](const std::string& s)
135 { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
136
137 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000138 }
139 else if (params.m_InputTypes[inputIndex].compare("int") == 0)
140 {
141 auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000142
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000143 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000144 {
145 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
146 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
147 return EXIT_FAILURE;
148 }
149
Finn Williams56870182020-11-20 13:57:53 +0000150 std::vector<int32_t> tensorData;
151 PopulateTensorWithDataGeneric<int32_t>(tensorData,
152 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
153 dataFile,
154 [](const std::string& s)
155 { return std::stoi(s); });
156
157 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000158 }
159 else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
160 {
161 auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000162
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000163 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000164 {
165 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
166 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
167 return EXIT_FAILURE;
168 }
169
Finn Williams56870182020-11-20 13:57:53 +0000170 std::vector<uint8_t> tensorData;
171 PopulateTensorWithDataGeneric<uint8_t>(tensorData,
172 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
173 dataFile,
174 [](const std::string& s)
175 { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
176
177 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000178 }
179 else
180 {
181 ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
182 return EXIT_FAILURE;
183 }
184 }
185
186 for (size_t x = 0; x < params.m_Iterations; x++)
187 {
188 // Run the inference
Finn Williamsf806c4d2021-02-22 15:13:12 +0000189 status = tfLiteInterpreter->Invoke();
Sadik Armagan5d03e312020-11-17 16:43:56 +0000190
191 // Print out the output
192 for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
193 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000194 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000195 TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000196
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000197 long outputSize = 1;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000198 for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
199 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000200 outputSize *= outputDims->data[dim];
Sadik Armagan5d03e312020-11-17 16:43:56 +0000201 }
202
203 std::cout << params.m_OutputNames[outputIndex] << ": ";
204 if (params.m_OutputTypes[outputIndex].compare("float") == 0)
205 {
206 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000207 if(tfLiteDelageOutputData == NULL)
208 {
209 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
210 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
211 return EXIT_FAILURE;
212 }
213
214 for (int i = 0; i < outputSize; ++i)
215 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000216 printf("%f ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000217 }
218 }
219 else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
220 {
221 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000222 if(tfLiteDelageOutputData == NULL)
223 {
224 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
225 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
226 return EXIT_FAILURE;
227 }
228
229 for (int i = 0; i < outputSize; ++i)
230 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000231 printf("%d ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000232 }
233 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000234 else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0)
Finn Williams56870182020-11-20 13:57:53 +0000235 {
236 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
237 if(tfLiteDelageOutputData == NULL)
238 {
239 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
240 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
241 return EXIT_FAILURE;
242 }
243
244 for (int i = 0; i < outputSize; ++i)
245 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000246 printf("%d ", tfLiteDelageOutputData[i]);
Finn Williams56870182020-11-20 13:57:53 +0000247 }
248 }
Sadik Armagan5d03e312020-11-17 16:43:56 +0000249 else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
250 {
251 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000252 if(tfLiteDelageOutputData == NULL)
253 {
254 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
255 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
256 return EXIT_FAILURE;
257 }
258
259 for (int i = 0; i < outputSize; ++i)
260 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000261 printf("%u ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000262 }
263 }
264 else
265 {
266 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
267 "\"" << params.m_OutputTypes[outputIndex] <<
268 "\" may be incorrect. Output type can be specified with -z argument";
269 return EXIT_FAILURE;
270 }
271 std::cout << std::endl;
272 }
273 }
274
275 return status;
276}
277#endif
Jan Eilers45274902020-10-15 18:34:43 +0100278template<typename TParser, typename TDataType>
279int MainImpl(const ExecuteNetworkParams& params,
280 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
281{
Finn Williamsf806c4d2021-02-22 15:13:12 +0000282 using TContainer =
283 mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
Jan Eilers45274902020-10-15 18:34:43 +0100284
285 std::vector<TContainer> inputDataContainers;
286
287 try
288 {
289 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
290 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
291 inferenceModelParams.m_ModelPath = params.m_ModelPath;
292 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
293 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
294 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
295 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
296 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
297 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
298 inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
299 inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath;
Matthew Sloyan42432112021-01-08 10:30:51 +0000300 inferenceModelParams.m_SaveCachedNetwork = params.m_SaveCachedNetwork;
301 inferenceModelParams.m_CachedNetworkFilePath = params.m_CachedNetworkFilePath;
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000302 inferenceModelParams.m_NumberOfThreads = params.m_NumberOfThreads;
Finn Williams40646322021-02-11 16:16:42 +0000303 inferenceModelParams.m_MLGOTuningFilePath = params.m_MLGOTuningFilePath;
Jan Eilers45274902020-10-15 18:34:43 +0100304
305 for(const std::string& inputName: params.m_InputNames)
306 {
307 inferenceModelParams.m_InputBindings.push_back(inputName);
308 }
309
310 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
311 {
312 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
313 }
314
315 for(const std::string& outputName: params.m_OutputNames)
316 {
317 inferenceModelParams.m_OutputBindings.push_back(outputName);
318 }
319
320 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
321 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
322 inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
323
324 InferenceModel<TParser, TDataType> model(inferenceModelParams,
325 params.m_EnableProfiling,
326 params.m_DynamicBackendsPath,
327 runtime);
328
329 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
330 for(unsigned int i = 0; i < numInputs; ++i)
331 {
332 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
333 armnn::MakeOptional<QuantizationParams>(
334 model.GetInputQuantizationParams()) :
335 armnn::EmptyOptional();
336
337 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
338 armnn::EmptyOptional() :
339 armnn::MakeOptional<std::string>(
340 params.m_InputTensorDataFilePaths[i]);
341
342 unsigned int numElements = model.GetInputSize(i);
343 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
344 {
345 // If the user has provided a tensor shape for the current input,
346 // override numElements
347 numElements = params.m_InputTensorShapes[i]->GetNumElements();
348 }
349
350 TContainer tensorData;
351 PopulateTensorWithData(tensorData,
352 numElements,
353 params.m_InputTypes[i],
354 qParams,
355 dataFile);
356
357 inputDataContainers.push_back(tensorData);
358 }
359
360 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
361 std::vector<TContainer> outputDataContainers;
362
363 for (unsigned int i = 0; i < numOutputs; ++i)
364 {
365 if (params.m_OutputTypes[i].compare("float") == 0)
366 {
367 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
368 }
369 else if (params.m_OutputTypes[i].compare("int") == 0)
370 {
371 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
372 }
373 else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
374 {
375 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
376 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000377 else if (params.m_OutputTypes[i].compare("qsymms8") == 0)
378 {
379 outputDataContainers.push_back(std::vector<int8_t>(model.GetOutputSize(i)));
380 }
Jan Eilers45274902020-10-15 18:34:43 +0100381 else
382 {
383 ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
384 return EXIT_FAILURE;
385 }
386 }
387
388 for (size_t x = 0; x < params.m_Iterations; x++)
389 {
390 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
391 auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
392
393 if (params.m_GenerateTensorData)
394 {
395 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
396 }
397
398 // Print output tensors
399 const auto& infosOut = model.GetOutputBindingInfos();
400 for (size_t i = 0; i < numOutputs; i++)
401 {
402 const armnn::TensorInfo& infoOut = infosOut[i].second;
403 auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
404
405 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
406 infoOut,
407 outputTensorFile,
408 params.m_DequantizeOutput);
409 mapbox::util::apply_visitor(printer, outputDataContainers[i]);
410 }
411
412 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
413 << std::fixed << inference_duration.count() << " ms\n";
414
415 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
416 if (params.m_ThresholdTime != 0.0)
417 {
418 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
419 << std::fixed << params.m_ThresholdTime << " ms";
420 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
421 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
422 << std::fixed << thresholdMinusInference << " ms" << "\n";
423
424 if (thresholdMinusInference < 0)
425 {
426 std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
427 ARMNN_LOG(fatal) << errorMessage;
428 }
429 }
430 }
431 }
432 catch (const armnn::Exception& e)
433 {
434 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
435 return EXIT_FAILURE;
436 }
437
438 return EXIT_SUCCESS;
439}
440
telsoa01c577f2c2018-08-31 09:22:23 +0100441
James Conroy7b4886f2019-04-11 10:23:58 +0100442// MAIN
telsoa01c577f2c2018-08-31 09:22:23 +0100443int main(int argc, const char* argv[])
444{
445 // Configures logging for both the ARMNN library and this test program.
Jan Eilers45274902020-10-15 18:34:43 +0100446 #ifdef NDEBUG
telsoa01c577f2c2018-08-31 09:22:23 +0100447 armnn::LogSeverity level = armnn::LogSeverity::Info;
Jan Eilers45274902020-10-15 18:34:43 +0100448 #else
telsoa01c577f2c2018-08-31 09:22:23 +0100449 armnn::LogSeverity level = armnn::LogSeverity::Debug;
Jan Eilers45274902020-10-15 18:34:43 +0100450 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100451 armnn::ConfigureLogging(true, true, level);
telsoa01c577f2c2018-08-31 09:22:23 +0100452
telsoa01c577f2c2018-08-31 09:22:23 +0100453
Jan Eilers45274902020-10-15 18:34:43 +0100454 // Get ExecuteNetwork parameters and runtime options from command line
455 ProgramOptions ProgramOptions(argc, argv);
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000456
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100457 // Create runtime
Jan Eilers45274902020-10-15 18:34:43 +0100458 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions));
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100459
Jan Eilers45274902020-10-15 18:34:43 +0100460 std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat;
461
462 // Forward to implementation based on the parser type
463 if (modelFormat.find("armnn") != std::string::npos)
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100464 {
Jan Eilers45274902020-10-15 18:34:43 +0100465 #if defined(ARMNN_SERIALIZER)
466 return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime);
467 #else
468 ARMNN_LOG(fatal) << "Not built with serialization support.";
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100469 return EXIT_FAILURE;
Jan Eilers45274902020-10-15 18:34:43 +0100470 #endif
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100471 }
Jan Eilers45274902020-10-15 18:34:43 +0100472 else if (modelFormat.find("onnx") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100473 {
Jan Eilers45274902020-10-15 18:34:43 +0100474 #if defined(ARMNN_ONNX_PARSER)
475 return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime);
476 #else
477 ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
478 return EXIT_FAILURE;
479 #endif
480 }
481 else if (modelFormat.find("tensorflow") != std::string::npos)
482 {
483 #if defined(ARMNN_TF_PARSER)
484 return MainImpl<armnnTfParser::ITfParser, float>(ProgramOptions.m_ExNetParams, runtime);
485 #else
486 ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
487 return EXIT_FAILURE;
488 #endif
489 }
490 else if(modelFormat.find("tflite") != std::string::npos)
491 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000492 if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser)
493 {
494 #if defined(ARMNN_TF_LITE_PARSER)
495 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
496 #else
497 ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
498 return EXIT_FAILURE;
499 #endif
500 }
501 else if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
502 ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate ||
503 ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
504 ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter)
Sadik Armagan5d03e312020-11-17 16:43:56 +0000505 {
506 #if defined(ARMNN_TF_LITE_DELEGATE)
507 return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
508 #else
Finn Williamsbbbefec2020-11-25 14:32:42 +0000509 ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support.";
Sadik Armagan5d03e312020-11-17 16:43:56 +0000510 return EXIT_FAILURE;
511 #endif
512 }
Jan Eilers45274902020-10-15 18:34:43 +0100513 }
514 else
515 {
516 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
Nikhil Raj6dd178f2021-04-02 22:04:39 +0100517 << "'. Please include 'tensorflow', 'tflite' or 'onnx'";
Jan Eilers45274902020-10-15 18:34:43 +0100518 return EXIT_FAILURE;
telsoa014fcda012018-03-09 14:13:49 +0000519 }
520}