blob: f812e53e047b849f4d2df1c95c1467f3044784ff [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Sadik Armagana9c2ce12020-07-14 10:02:22 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Jan Eilers45274902020-10-15 18:34:43 +01006#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
7#include "ExecuteNetworkProgramOptions.hpp"
8
9#include <armnn/Logging.hpp>
10#include <Filesystem.hpp>
11#include <InferenceTest.hpp>
12
13#if defined(ARMNN_SERIALIZER)
14#include "armnnDeserializer/IDeserializer.hpp"
15#endif
16#if defined(ARMNN_CAFFE_PARSER)
17#include "armnnCaffeParser/ICaffeParser.hpp"
18#endif
19#if defined(ARMNN_TF_PARSER)
20#include "armnnTfParser/ITfParser.hpp"
21#endif
22#if defined(ARMNN_TF_LITE_PARSER)
23#include "armnnTfLiteParser/ITfLiteParser.hpp"
24#endif
25#if defined(ARMNN_ONNX_PARSER)
26#include "armnnOnnxParser/IOnnxParser.hpp"
27#endif
Sadik Armagan5d03e312020-11-17 16:43:56 +000028#if defined(ARMNN_TFLITE_DELEGATE)
29#include <armnn_delegate.hpp>
30#include <DelegateOptions.hpp>
31
32#include <tensorflow/lite/builtin_ops.h>
33#include <tensorflow/lite/c/builtin_op_data.h>
34#include <tensorflow/lite/c/common.h>
35#include <tensorflow/lite/optional_debug_tools.h>
36#include <tensorflow/lite/kernels/builtin_op_kernels.h>
37#include <tensorflow/lite/interpreter.h>
38#include <tensorflow/lite/kernels/register.h>
39#endif
Jan Eilers45274902020-10-15 18:34:43 +010040
41#include <future>
Sadik Armagan5d03e312020-11-17 16:43:56 +000042#if defined(ARMNN_TFLITE_DELEGATE)
43int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
44 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
45{
46 using namespace tflite;
Jan Eilers45274902020-10-15 18:34:43 +010047
Sadik Armagan5d03e312020-11-17 16:43:56 +000048 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str());
49
50 auto tfLiteInterpreter = std::make_unique<Interpreter>();
51 tflite::ops::builtin::BuiltinOpResolver resolver;
52
53 tflite::InterpreterBuilder builder(*model, resolver);
54 builder(&tfLiteInterpreter);
55 tfLiteInterpreter->AllocateTensors();
56
Finn Williamsf806c4d2021-02-22 15:13:12 +000057 int status = 0;
58 if (params.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate)
Sadik Armagan19a1c032021-01-20 12:17:00 +000059 {
Finn Williamsf806c4d2021-02-22 15:13:12 +000060 // Create the Armnn Delegate
61 armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
62 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
63 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
64 armnnDelegate::TfLiteArmnnDelegateDelete);
65 // Register armnn_delegate to TfLiteInterpreter
66 status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
67 if (status == kTfLiteError)
68 {
69 ARMNN_LOG(fatal) << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
70 return EXIT_FAILURE;
71 }
Sadik Armagan19a1c032021-01-20 12:17:00 +000072 }
Finn Williamsf806c4d2021-02-22 15:13:12 +000073 else
74 {
75 std::cout << "Running on TfLite without ArmNN delegate\n";
76 }
77
Sadik Armagan5d03e312020-11-17 16:43:56 +000078
79 std::vector<std::string> inputBindings;
80 for (const std::string& inputName: params.m_InputNames)
81 {
82 inputBindings.push_back(inputName);
83 }
84
85 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData
86 ? armnn::EmptyOptional()
87 : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
88
89 const size_t numInputs = inputBindings.size();
90
91 for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
92 {
93 int input = tfLiteInterpreter->inputs()[inputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +000094 TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
95
96 long inputSize = 1;
97 for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
98 {
99 inputSize *= inputDims->data[dim];
100 }
101
Sadik Armagan5d03e312020-11-17 16:43:56 +0000102 if (params.m_InputTypes[inputIndex].compare("float") == 0)
103 {
104 auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000105
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000106 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000107 {
108 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
109 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
110 return EXIT_FAILURE;
111 }
112
Finn Williams56870182020-11-20 13:57:53 +0000113 std::vector<float> tensorData;
114 PopulateTensorWithDataGeneric<float>(tensorData,
115 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
116 dataFile,
117 [](const std::string& s)
118 { return std::stof(s); });
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000119
Finn Williams56870182020-11-20 13:57:53 +0000120 std::copy(tensorData.begin(), tensorData.end(), inputData);
121 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000122 else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0)
Finn Williams56870182020-11-20 13:57:53 +0000123 {
124 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000125
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000126 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000127 {
128 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
129 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
130 return EXIT_FAILURE;
131 }
132
Finn Williams56870182020-11-20 13:57:53 +0000133 std::vector<int8_t> tensorData;
134 PopulateTensorWithDataGeneric<int8_t>(tensorData,
135 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
136 dataFile,
137 [](const std::string& s)
138 { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
139
140 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000141 }
142 else if (params.m_InputTypes[inputIndex].compare("int") == 0)
143 {
144 auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000145
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000146 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000147 {
148 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
149 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
150 return EXIT_FAILURE;
151 }
152
Finn Williams56870182020-11-20 13:57:53 +0000153 std::vector<int32_t> tensorData;
154 PopulateTensorWithDataGeneric<int32_t>(tensorData,
155 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
156 dataFile,
157 [](const std::string& s)
158 { return std::stoi(s); });
159
160 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000161 }
162 else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0)
163 {
164 auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000165
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000166 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000167 {
168 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
169 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
170 return EXIT_FAILURE;
171 }
172
Finn Williams56870182020-11-20 13:57:53 +0000173 std::vector<uint8_t> tensorData;
174 PopulateTensorWithDataGeneric<uint8_t>(tensorData,
175 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
176 dataFile,
177 [](const std::string& s)
178 { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
179
180 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000181 }
182 else
183 {
184 ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
185 return EXIT_FAILURE;
186 }
187 }
188
189 for (size_t x = 0; x < params.m_Iterations; x++)
190 {
191 // Run the inference
Finn Williamsf806c4d2021-02-22 15:13:12 +0000192 status = tfLiteInterpreter->Invoke();
Sadik Armagan5d03e312020-11-17 16:43:56 +0000193
194 // Print out the output
195 for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
196 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000197 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000198 TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000199
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000200 long outputSize = 1;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000201 for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
202 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000203 outputSize *= outputDims->data[dim];
Sadik Armagan5d03e312020-11-17 16:43:56 +0000204 }
205
206 std::cout << params.m_OutputNames[outputIndex] << ": ";
207 if (params.m_OutputTypes[outputIndex].compare("float") == 0)
208 {
209 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000210 if(tfLiteDelageOutputData == NULL)
211 {
212 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
213 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
214 return EXIT_FAILURE;
215 }
216
217 for (int i = 0; i < outputSize; ++i)
218 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000219 printf("%f ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000220 }
221 }
222 else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
223 {
224 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000225 if(tfLiteDelageOutputData == NULL)
226 {
227 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
228 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
229 return EXIT_FAILURE;
230 }
231
232 for (int i = 0; i < outputSize; ++i)
233 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000234 printf("%d ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000235 }
236 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000237 else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0)
Finn Williams56870182020-11-20 13:57:53 +0000238 {
239 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
240 if(tfLiteDelageOutputData == NULL)
241 {
242 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
243 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
244 return EXIT_FAILURE;
245 }
246
247 for (int i = 0; i < outputSize; ++i)
248 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000249 printf("%d ", tfLiteDelageOutputData[i]);
Finn Williams56870182020-11-20 13:57:53 +0000250 }
251 }
Sadik Armagan5d03e312020-11-17 16:43:56 +0000252 else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0)
253 {
254 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000255 if(tfLiteDelageOutputData == NULL)
256 {
257 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
258 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
259 return EXIT_FAILURE;
260 }
261
262 for (int i = 0; i < outputSize; ++i)
263 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000264 printf("%u ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000265 }
266 }
267 else
268 {
269 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
270 "\"" << params.m_OutputTypes[outputIndex] <<
271 "\" may be incorrect. Output type can be specified with -z argument";
272 return EXIT_FAILURE;
273 }
274 std::cout << std::endl;
275 }
276 }
277
278 return status;
279}
280#endif
Jan Eilers45274902020-10-15 18:34:43 +0100281template<typename TParser, typename TDataType>
282int MainImpl(const ExecuteNetworkParams& params,
283 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
284{
Finn Williamsf806c4d2021-02-22 15:13:12 +0000285 using TContainer =
286 mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>, std::vector<int8_t>>;
Jan Eilers45274902020-10-15 18:34:43 +0100287
288 std::vector<TContainer> inputDataContainers;
289
290 try
291 {
292 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
293 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
294 inferenceModelParams.m_ModelPath = params.m_ModelPath;
295 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
296 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
297 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
298 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
299 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
300 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
301 inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
302 inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath;
Matthew Sloyan42432112021-01-08 10:30:51 +0000303 inferenceModelParams.m_SaveCachedNetwork = params.m_SaveCachedNetwork;
304 inferenceModelParams.m_CachedNetworkFilePath = params.m_CachedNetworkFilePath;
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000305 inferenceModelParams.m_NumberOfThreads = params.m_NumberOfThreads;
Finn Williams40646322021-02-11 16:16:42 +0000306 inferenceModelParams.m_MLGOTuningFilePath = params.m_MLGOTuningFilePath;
Jan Eilers45274902020-10-15 18:34:43 +0100307
308 for(const std::string& inputName: params.m_InputNames)
309 {
310 inferenceModelParams.m_InputBindings.push_back(inputName);
311 }
312
313 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
314 {
315 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
316 }
317
318 for(const std::string& outputName: params.m_OutputNames)
319 {
320 inferenceModelParams.m_OutputBindings.push_back(outputName);
321 }
322
323 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
324 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
325 inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
326
327 InferenceModel<TParser, TDataType> model(inferenceModelParams,
328 params.m_EnableProfiling,
329 params.m_DynamicBackendsPath,
330 runtime);
331
332 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
333 for(unsigned int i = 0; i < numInputs; ++i)
334 {
335 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
336 armnn::MakeOptional<QuantizationParams>(
337 model.GetInputQuantizationParams()) :
338 armnn::EmptyOptional();
339
340 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
341 armnn::EmptyOptional() :
342 armnn::MakeOptional<std::string>(
343 params.m_InputTensorDataFilePaths[i]);
344
345 unsigned int numElements = model.GetInputSize(i);
346 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
347 {
348 // If the user has provided a tensor shape for the current input,
349 // override numElements
350 numElements = params.m_InputTensorShapes[i]->GetNumElements();
351 }
352
353 TContainer tensorData;
354 PopulateTensorWithData(tensorData,
355 numElements,
356 params.m_InputTypes[i],
357 qParams,
358 dataFile);
359
360 inputDataContainers.push_back(tensorData);
361 }
362
363 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
364 std::vector<TContainer> outputDataContainers;
365
366 for (unsigned int i = 0; i < numOutputs; ++i)
367 {
368 if (params.m_OutputTypes[i].compare("float") == 0)
369 {
370 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
371 }
372 else if (params.m_OutputTypes[i].compare("int") == 0)
373 {
374 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
375 }
376 else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
377 {
378 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
379 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000380 else if (params.m_OutputTypes[i].compare("qsymms8") == 0)
381 {
382 outputDataContainers.push_back(std::vector<int8_t>(model.GetOutputSize(i)));
383 }
Jan Eilers45274902020-10-15 18:34:43 +0100384 else
385 {
386 ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
387 return EXIT_FAILURE;
388 }
389 }
390
391 for (size_t x = 0; x < params.m_Iterations; x++)
392 {
393 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
394 auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
395
396 if (params.m_GenerateTensorData)
397 {
398 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
399 }
400
401 // Print output tensors
402 const auto& infosOut = model.GetOutputBindingInfos();
403 for (size_t i = 0; i < numOutputs; i++)
404 {
405 const armnn::TensorInfo& infoOut = infosOut[i].second;
406 auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
407
408 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
409 infoOut,
410 outputTensorFile,
411 params.m_DequantizeOutput);
412 mapbox::util::apply_visitor(printer, outputDataContainers[i]);
413 }
414
415 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
416 << std::fixed << inference_duration.count() << " ms\n";
417
418 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
419 if (params.m_ThresholdTime != 0.0)
420 {
421 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
422 << std::fixed << params.m_ThresholdTime << " ms";
423 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
424 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
425 << std::fixed << thresholdMinusInference << " ms" << "\n";
426
427 if (thresholdMinusInference < 0)
428 {
429 std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
430 ARMNN_LOG(fatal) << errorMessage;
431 }
432 }
433 }
434 }
435 catch (const armnn::Exception& e)
436 {
437 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
438 return EXIT_FAILURE;
439 }
440
441 return EXIT_SUCCESS;
442}
443
telsoa01c577f2c2018-08-31 09:22:23 +0100444
James Conroy7b4886f2019-04-11 10:23:58 +0100445// MAIN
telsoa01c577f2c2018-08-31 09:22:23 +0100446int main(int argc, const char* argv[])
447{
448 // Configures logging for both the ARMNN library and this test program.
Jan Eilers45274902020-10-15 18:34:43 +0100449 #ifdef NDEBUG
telsoa01c577f2c2018-08-31 09:22:23 +0100450 armnn::LogSeverity level = armnn::LogSeverity::Info;
Jan Eilers45274902020-10-15 18:34:43 +0100451 #else
telsoa01c577f2c2018-08-31 09:22:23 +0100452 armnn::LogSeverity level = armnn::LogSeverity::Debug;
Jan Eilers45274902020-10-15 18:34:43 +0100453 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100454 armnn::ConfigureLogging(true, true, level);
telsoa01c577f2c2018-08-31 09:22:23 +0100455
telsoa01c577f2c2018-08-31 09:22:23 +0100456
Jan Eilers45274902020-10-15 18:34:43 +0100457 // Get ExecuteNetwork parameters and runtime options from command line
458 ProgramOptions ProgramOptions(argc, argv);
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000459
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100460 // Create runtime
Jan Eilers45274902020-10-15 18:34:43 +0100461 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions));
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100462
Jan Eilers45274902020-10-15 18:34:43 +0100463 std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat;
464
465 // Forward to implementation based on the parser type
466 if (modelFormat.find("armnn") != std::string::npos)
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100467 {
Jan Eilers45274902020-10-15 18:34:43 +0100468 #if defined(ARMNN_SERIALIZER)
469 return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime);
470 #else
471 ARMNN_LOG(fatal) << "Not built with serialization support.";
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100472 return EXIT_FAILURE;
Jan Eilers45274902020-10-15 18:34:43 +0100473 #endif
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100474 }
Jan Eilers45274902020-10-15 18:34:43 +0100475 else if (modelFormat.find("caffe") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100476 {
Jan Eilers45274902020-10-15 18:34:43 +0100477 #if defined(ARMNN_CAFFE_PARSER)
478 return MainImpl<armnnCaffeParser::ICaffeParser, float>(ProgramOptions.m_ExNetParams, runtime);
479 #else
480 ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
481 return EXIT_FAILURE;
482 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100483 }
Jan Eilers45274902020-10-15 18:34:43 +0100484 else if (modelFormat.find("onnx") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100485 {
Jan Eilers45274902020-10-15 18:34:43 +0100486 #if defined(ARMNN_ONNX_PARSER)
487 return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime);
488 #else
489 ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
490 return EXIT_FAILURE;
491 #endif
492 }
493 else if (modelFormat.find("tensorflow") != std::string::npos)
494 {
495 #if defined(ARMNN_TF_PARSER)
496 return MainImpl<armnnTfParser::ITfParser, float>(ProgramOptions.m_ExNetParams, runtime);
497 #else
498 ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
499 return EXIT_FAILURE;
500 #endif
501 }
502 else if(modelFormat.find("tflite") != std::string::npos)
503 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000504 if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser)
505 {
506 #if defined(ARMNN_TF_LITE_PARSER)
507 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
508 #else
509 ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
510 return EXIT_FAILURE;
511 #endif
512 }
513 else if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
514 ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate ||
515 ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
516 ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter)
Sadik Armagan5d03e312020-11-17 16:43:56 +0000517 {
518 #if defined(ARMNN_TF_LITE_DELEGATE)
519 return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
520 #else
Finn Williamsbbbefec2020-11-25 14:32:42 +0000521 ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support.";
Sadik Armagan5d03e312020-11-17 16:43:56 +0000522 return EXIT_FAILURE;
523 #endif
524 }
Jan Eilers45274902020-10-15 18:34:43 +0100525 }
526 else
527 {
528 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
529 << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
530 return EXIT_FAILURE;
telsoa014fcda012018-03-09 14:13:49 +0000531 }
532}