blob: 60e4ec34019b6c1bbb59c06e41dffc42163628ea [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Sadik Armagana9c2ce12020-07-14 10:02:22 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Jan Eilers45274902020-10-15 18:34:43 +01006#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
7#include "ExecuteNetworkProgramOptions.hpp"
8
9#include <armnn/Logging.hpp>
10#include <Filesystem.hpp>
11#include <InferenceTest.hpp>
12
13#if defined(ARMNN_SERIALIZER)
14#include "armnnDeserializer/IDeserializer.hpp"
15#endif
Jan Eilers45274902020-10-15 18:34:43 +010016#if defined(ARMNN_TF_LITE_PARSER)
17#include "armnnTfLiteParser/ITfLiteParser.hpp"
18#endif
19#if defined(ARMNN_ONNX_PARSER)
20#include "armnnOnnxParser/IOnnxParser.hpp"
21#endif
Sadik Armagan5d03e312020-11-17 16:43:56 +000022#if defined(ARMNN_TFLITE_DELEGATE)
23#include <armnn_delegate.hpp>
24#include <DelegateOptions.hpp>
25
26#include <tensorflow/lite/builtin_ops.h>
27#include <tensorflow/lite/c/builtin_op_data.h>
28#include <tensorflow/lite/c/common.h>
29#include <tensorflow/lite/optional_debug_tools.h>
30#include <tensorflow/lite/kernels/builtin_op_kernels.h>
31#include <tensorflow/lite/interpreter.h>
32#include <tensorflow/lite/kernels/register.h>
33#endif
Jan Eilers45274902020-10-15 18:34:43 +010034
35#include <future>
Sadik Armagan5d03e312020-11-17 16:43:56 +000036#if defined(ARMNN_TFLITE_DELEGATE)
37int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
38 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
39{
40 using namespace tflite;
Jan Eilers45274902020-10-15 18:34:43 +010041
Sadik Armagan5d03e312020-11-17 16:43:56 +000042 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str());
43
44 auto tfLiteInterpreter = std::make_unique<Interpreter>();
45 tflite::ops::builtin::BuiltinOpResolver resolver;
46
47 tflite::InterpreterBuilder builder(*model, resolver);
48 builder(&tfLiteInterpreter);
49 tfLiteInterpreter->AllocateTensors();
50
Finn Williamsf806c4d2021-02-22 15:13:12 +000051 int status = 0;
52 if (params.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate)
Sadik Armagan19a1c032021-01-20 12:17:00 +000053 {
Finn Williamsf806c4d2021-02-22 15:13:12 +000054 // Create the Armnn Delegate
55 armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
56 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
57 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
58 armnnDelegate::TfLiteArmnnDelegateDelete);
59 // Register armnn_delegate to TfLiteInterpreter
60 status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
61 if (status == kTfLiteError)
62 {
63 ARMNN_LOG(fatal) << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
64 return EXIT_FAILURE;
65 }
Sadik Armagan19a1c032021-01-20 12:17:00 +000066 }
Finn Williamsf806c4d2021-02-22 15:13:12 +000067 else
68 {
69 std::cout << "Running on TfLite without ArmNN delegate\n";
70 }
71
Sadik Armagan5d03e312020-11-17 16:43:56 +000072
73 std::vector<std::string> inputBindings;
74 for (const std::string& inputName: params.m_InputNames)
75 {
76 inputBindings.push_back(inputName);
77 }
78
79 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData
80 ? armnn::EmptyOptional()
81 : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
82
83 const size_t numInputs = inputBindings.size();
84
85 for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
86 {
87 int input = tfLiteInterpreter->inputs()[inputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +000088 TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
89
90 long inputSize = 1;
91 for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
92 {
93 inputSize *= inputDims->data[dim];
94 }
95
Sadik Armagan5d03e312020-11-17 16:43:56 +000096 if (params.m_InputTypes[inputIndex].compare("float") == 0)
97 {
98 auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +000099
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000100 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000101 {
102 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
103 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
104 return EXIT_FAILURE;
105 }
106
Finn Williams56870182020-11-20 13:57:53 +0000107 std::vector<float> tensorData;
108 PopulateTensorWithDataGeneric<float>(tensorData,
109 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
110 dataFile,
111 [](const std::string& s)
112 { return std::stof(s); });
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000113
Finn Williams56870182020-11-20 13:57:53 +0000114 std::copy(tensorData.begin(), tensorData.end(), inputData);
115 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000116 else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0)
Finn Williams56870182020-11-20 13:57:53 +0000117 {
118 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000119
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000120 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000121 {
122 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
123 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
124 return EXIT_FAILURE;
125 }
126
Finn Williams56870182020-11-20 13:57:53 +0000127 std::vector<int8_t> tensorData;
128 PopulateTensorWithDataGeneric<int8_t>(tensorData,
129 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
130 dataFile,
131 [](const std::string& s)
132 { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
133
134 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000135 }
136 else if (params.m_InputTypes[inputIndex].compare("int") == 0)
137 {
138 auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000139
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000140 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000141 {
142 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
143 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
144 return EXIT_FAILURE;
145 }
146
Finn Williams56870182020-11-20 13:57:53 +0000147 std::vector<int32_t> tensorData;
148 PopulateTensorWithDataGeneric<int32_t>(tensorData,
149 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
150 dataFile,
151 [](const std::string& s)
152 { return std::stoi(s); });
153
154 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000155 }
156 else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
157 {
158 auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000159
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000160 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000161 {
162 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
163 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
164 return EXIT_FAILURE;
165 }
166
Finn Williams56870182020-11-20 13:57:53 +0000167 std::vector<uint8_t> tensorData;
168 PopulateTensorWithDataGeneric<uint8_t>(tensorData,
169 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
170 dataFile,
171 [](const std::string& s)
172 { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
173
174 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000175 }
176 else
177 {
178 ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
179 return EXIT_FAILURE;
180 }
181 }
182
183 for (size_t x = 0; x < params.m_Iterations; x++)
184 {
185 // Run the inference
Finn Williamsf806c4d2021-02-22 15:13:12 +0000186 status = tfLiteInterpreter->Invoke();
Sadik Armagan5d03e312020-11-17 16:43:56 +0000187
188 // Print out the output
189 for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
190 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000191 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000192 TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000193
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000194 long outputSize = 1;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000195 for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
196 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000197 outputSize *= outputDims->data[dim];
Sadik Armagan5d03e312020-11-17 16:43:56 +0000198 }
199
200 std::cout << params.m_OutputNames[outputIndex] << ": ";
201 if (params.m_OutputTypes[outputIndex].compare("float") == 0)
202 {
203 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000204 if(tfLiteDelageOutputData == NULL)
205 {
206 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
207 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
208 return EXIT_FAILURE;
209 }
210
211 for (int i = 0; i < outputSize; ++i)
212 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000213 printf("%f ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000214 }
215 }
216 else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
217 {
218 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000219 if(tfLiteDelageOutputData == NULL)
220 {
221 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
222 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
223 return EXIT_FAILURE;
224 }
225
226 for (int i = 0; i < outputSize; ++i)
227 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000228 printf("%d ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000229 }
230 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000231 else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0)
Finn Williams56870182020-11-20 13:57:53 +0000232 {
233 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
234 if(tfLiteDelageOutputData == NULL)
235 {
236 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
237 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
238 return EXIT_FAILURE;
239 }
240
241 for (int i = 0; i < outputSize; ++i)
242 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000243 printf("%d ", tfLiteDelageOutputData[i]);
Finn Williams56870182020-11-20 13:57:53 +0000244 }
245 }
Sadik Armagan5d03e312020-11-17 16:43:56 +0000246 else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
247 {
248 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000249 if(tfLiteDelageOutputData == NULL)
250 {
251 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
252 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
253 return EXIT_FAILURE;
254 }
255
256 for (int i = 0; i < outputSize; ++i)
257 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000258 printf("%u ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000259 }
260 }
261 else
262 {
263 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
264 "\"" << params.m_OutputTypes[outputIndex] <<
265 "\" may be incorrect. Output type can be specified with -z argument";
266 return EXIT_FAILURE;
267 }
268 std::cout << std::endl;
269 }
270 }
271
272 return status;
273}
274#endif
Jan Eilers45274902020-10-15 18:34:43 +0100275template<typename TParser, typename TDataType>
276int MainImpl(const ExecuteNetworkParams& params,
277 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
278{
Finn Williamsf806c4d2021-02-22 15:13:12 +0000279 using TContainer =
280 mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
Jan Eilers45274902020-10-15 18:34:43 +0100281
282 std::vector<TContainer> inputDataContainers;
283
284 try
285 {
286 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
287 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
288 inferenceModelParams.m_ModelPath = params.m_ModelPath;
289 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
290 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
291 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
292 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
293 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
294 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
295 inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
296 inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath;
Matthew Sloyan42432112021-01-08 10:30:51 +0000297 inferenceModelParams.m_SaveCachedNetwork = params.m_SaveCachedNetwork;
298 inferenceModelParams.m_CachedNetworkFilePath = params.m_CachedNetworkFilePath;
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000299 inferenceModelParams.m_NumberOfThreads = params.m_NumberOfThreads;
Finn Williams40646322021-02-11 16:16:42 +0000300 inferenceModelParams.m_MLGOTuningFilePath = params.m_MLGOTuningFilePath;
Jan Eilers45274902020-10-15 18:34:43 +0100301
302 for(const std::string& inputName: params.m_InputNames)
303 {
304 inferenceModelParams.m_InputBindings.push_back(inputName);
305 }
306
307 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
308 {
309 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
310 }
311
312 for(const std::string& outputName: params.m_OutputNames)
313 {
314 inferenceModelParams.m_OutputBindings.push_back(outputName);
315 }
316
317 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
318 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
319 inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
320
321 InferenceModel<TParser, TDataType> model(inferenceModelParams,
322 params.m_EnableProfiling,
323 params.m_DynamicBackendsPath,
324 runtime);
325
326 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
327 for(unsigned int i = 0; i < numInputs; ++i)
328 {
329 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
330 armnn::MakeOptional<QuantizationParams>(
331 model.GetInputQuantizationParams()) :
332 armnn::EmptyOptional();
333
334 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
335 armnn::EmptyOptional() :
336 armnn::MakeOptional<std::string>(
337 params.m_InputTensorDataFilePaths[i]);
338
339 unsigned int numElements = model.GetInputSize(i);
340 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
341 {
342 // If the user has provided a tensor shape for the current input,
343 // override numElements
344 numElements = params.m_InputTensorShapes[i]->GetNumElements();
345 }
346
347 TContainer tensorData;
348 PopulateTensorWithData(tensorData,
349 numElements,
350 params.m_InputTypes[i],
351 qParams,
352 dataFile);
353
354 inputDataContainers.push_back(tensorData);
355 }
356
357 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
358 std::vector<TContainer> outputDataContainers;
359
360 for (unsigned int i = 0; i < numOutputs; ++i)
361 {
362 if (params.m_OutputTypes[i].compare("float") == 0)
363 {
364 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
365 }
366 else if (params.m_OutputTypes[i].compare("int") == 0)
367 {
368 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
369 }
370 else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
371 {
372 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
373 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000374 else if (params.m_OutputTypes[i].compare("qsymms8") == 0)
375 {
376 outputDataContainers.push_back(std::vector<int8_t>(model.GetOutputSize(i)));
377 }
Jan Eilers45274902020-10-15 18:34:43 +0100378 else
379 {
380 ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
381 return EXIT_FAILURE;
382 }
383 }
384
385 for (size_t x = 0; x < params.m_Iterations; x++)
386 {
387 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
388 auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
389
390 if (params.m_GenerateTensorData)
391 {
392 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
393 }
394
395 // Print output tensors
396 const auto& infosOut = model.GetOutputBindingInfos();
397 for (size_t i = 0; i < numOutputs; i++)
398 {
399 const armnn::TensorInfo& infoOut = infosOut[i].second;
400 auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
401
402 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
403 infoOut,
404 outputTensorFile,
405 params.m_DequantizeOutput);
406 mapbox::util::apply_visitor(printer, outputDataContainers[i]);
407 }
408
409 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
410 << std::fixed << inference_duration.count() << " ms\n";
411
412 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
413 if (params.m_ThresholdTime != 0.0)
414 {
415 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
416 << std::fixed << params.m_ThresholdTime << " ms";
417 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
418 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
419 << std::fixed << thresholdMinusInference << " ms" << "\n";
420
421 if (thresholdMinusInference < 0)
422 {
423 std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
424 ARMNN_LOG(fatal) << errorMessage;
425 }
426 }
427 }
428 }
429 catch (const armnn::Exception& e)
430 {
431 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
432 return EXIT_FAILURE;
433 }
434
435 return EXIT_SUCCESS;
436}
437
telsoa01c577f2c2018-08-31 09:22:23 +0100438
James Conroy7b4886f2019-04-11 10:23:58 +0100439// MAIN
telsoa01c577f2c2018-08-31 09:22:23 +0100440int main(int argc, const char* argv[])
441{
442 // Configures logging for both the ARMNN library and this test program.
Jan Eilers45274902020-10-15 18:34:43 +0100443 #ifdef NDEBUG
telsoa01c577f2c2018-08-31 09:22:23 +0100444 armnn::LogSeverity level = armnn::LogSeverity::Info;
Jan Eilers45274902020-10-15 18:34:43 +0100445 #else
telsoa01c577f2c2018-08-31 09:22:23 +0100446 armnn::LogSeverity level = armnn::LogSeverity::Debug;
Jan Eilers45274902020-10-15 18:34:43 +0100447 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100448 armnn::ConfigureLogging(true, true, level);
telsoa01c577f2c2018-08-31 09:22:23 +0100449
telsoa01c577f2c2018-08-31 09:22:23 +0100450
Jan Eilers45274902020-10-15 18:34:43 +0100451 // Get ExecuteNetwork parameters and runtime options from command line
452 ProgramOptions ProgramOptions(argc, argv);
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000453
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100454 // Create runtime
Jan Eilers45274902020-10-15 18:34:43 +0100455 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions));
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100456
Jan Eilers45274902020-10-15 18:34:43 +0100457 std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat;
458
459 // Forward to implementation based on the parser type
460 if (modelFormat.find("armnn") != std::string::npos)
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100461 {
Jan Eilers45274902020-10-15 18:34:43 +0100462 #if defined(ARMNN_SERIALIZER)
463 return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime);
464 #else
465 ARMNN_LOG(fatal) << "Not built with serialization support.";
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100466 return EXIT_FAILURE;
Jan Eilers45274902020-10-15 18:34:43 +0100467 #endif
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100468 }
Jan Eilers45274902020-10-15 18:34:43 +0100469 else if (modelFormat.find("onnx") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100470 {
Jan Eilers45274902020-10-15 18:34:43 +0100471 #if defined(ARMNN_ONNX_PARSER)
472 return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime);
473 #else
474 ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
475 return EXIT_FAILURE;
476 #endif
477 }
Jan Eilers45274902020-10-15 18:34:43 +0100478 else if(modelFormat.find("tflite") != std::string::npos)
479 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000480 if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser)
481 {
482 #if defined(ARMNN_TF_LITE_PARSER)
483 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
484 #else
485 ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
486 return EXIT_FAILURE;
487 #endif
488 }
489 else if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
490 ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate ||
491 ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
492 ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter)
Sadik Armagan5d03e312020-11-17 16:43:56 +0000493 {
494 #if defined(ARMNN_TF_LITE_DELEGATE)
495 return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
496 #else
Finn Williamsbbbefec2020-11-25 14:32:42 +0000497 ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support.";
Sadik Armagan5d03e312020-11-17 16:43:56 +0000498 return EXIT_FAILURE;
499 #endif
500 }
Jan Eilers45274902020-10-15 18:34:43 +0100501 }
502 else
503 {
504 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
Nikhil Raj5d955cf2021-04-19 16:59:48 +0100505 << "'. Please include 'tflite' or 'onnx'";
Jan Eilers45274902020-10-15 18:34:43 +0100506 return EXIT_FAILURE;
telsoa014fcda012018-03-09 14:13:49 +0000507 }
508}