blob: 9a4864542ff267e0cc21073c706c5a5bbb4a21f8 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Sadik Armagana9c2ce12020-07-14 10:02:22 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Jan Eilers45274902020-10-15 18:34:43 +01006#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
7#include "ExecuteNetworkProgramOptions.hpp"
Kevin Mayb4b3ac92021-05-21 16:42:21 +01008#include <armnn/IAsyncExecutionCallback.hpp>
9#include <AsyncExecutionCallback.hpp>
Jan Eilers45274902020-10-15 18:34:43 +010010
11#include <armnn/Logging.hpp>
Rob Hughes9542f902021-07-14 09:48:54 +010012#include <armnnUtils/Filesystem.hpp>
Jan Eilers45274902020-10-15 18:34:43 +010013#include <InferenceTest.hpp>
14
15#if defined(ARMNN_SERIALIZER)
16#include "armnnDeserializer/IDeserializer.hpp"
17#endif
Jan Eilers45274902020-10-15 18:34:43 +010018#if defined(ARMNN_TF_LITE_PARSER)
19#include "armnnTfLiteParser/ITfLiteParser.hpp"
20#endif
21#if defined(ARMNN_ONNX_PARSER)
22#include "armnnOnnxParser/IOnnxParser.hpp"
23#endif
Sadik Armagan5d03e312020-11-17 16:43:56 +000024#if defined(ARMNN_TFLITE_DELEGATE)
25#include <armnn_delegate.hpp>
26#include <DelegateOptions.hpp>
27
28#include <tensorflow/lite/builtin_ops.h>
29#include <tensorflow/lite/c/builtin_op_data.h>
30#include <tensorflow/lite/c/common.h>
31#include <tensorflow/lite/optional_debug_tools.h>
32#include <tensorflow/lite/kernels/builtin_op_kernels.h>
33#include <tensorflow/lite/interpreter.h>
34#include <tensorflow/lite/kernels/register.h>
35#endif
Jan Eilers45274902020-10-15 18:34:43 +010036
37#include <future>
Sadik Armagan5d03e312020-11-17 16:43:56 +000038#if defined(ARMNN_TFLITE_DELEGATE)
39int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params,
40 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
41{
42 using namespace tflite;
Jan Eilers45274902020-10-15 18:34:43 +010043
Sadik Armagan5d03e312020-11-17 16:43:56 +000044 std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str());
45
46 auto tfLiteInterpreter = std::make_unique<Interpreter>();
47 tflite::ops::builtin::BuiltinOpResolver resolver;
48
49 tflite::InterpreterBuilder builder(*model, resolver);
50 builder(&tfLiteInterpreter);
51 tfLiteInterpreter->AllocateTensors();
52
Finn Williamsf806c4d2021-02-22 15:13:12 +000053 int status = 0;
54 if (params.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate)
Sadik Armagan19a1c032021-01-20 12:17:00 +000055 {
Finn Williamsf806c4d2021-02-22 15:13:12 +000056 // Create the Armnn Delegate
57 armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices);
58 std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)>
59 theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions),
60 armnnDelegate::TfLiteArmnnDelegateDelete);
61 // Register armnn_delegate to TfLiteInterpreter
62 status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate));
63 if (status == kTfLiteError)
64 {
65 ARMNN_LOG(fatal) << "Could not register ArmNN TfLite Delegate to TfLiteInterpreter!";
66 return EXIT_FAILURE;
67 }
Sadik Armagan19a1c032021-01-20 12:17:00 +000068 }
Finn Williamsf806c4d2021-02-22 15:13:12 +000069 else
70 {
71 std::cout << "Running on TfLite without ArmNN delegate\n";
72 }
73
Sadik Armagan5d03e312020-11-17 16:43:56 +000074
75 std::vector<std::string> inputBindings;
76 for (const std::string& inputName: params.m_InputNames)
77 {
78 inputBindings.push_back(inputName);
79 }
80
81 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData
82 ? armnn::EmptyOptional()
83 : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]);
84
85 const size_t numInputs = inputBindings.size();
86
87 for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex)
88 {
89 int input = tfLiteInterpreter->inputs()[inputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +000090 TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims;
91
92 long inputSize = 1;
93 for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim)
94 {
95 inputSize *= inputDims->data[dim];
96 }
97
Sadik Armagan5d03e312020-11-17 16:43:56 +000098 if (params.m_InputTypes[inputIndex].compare("float") == 0)
99 {
100 auto inputData = tfLiteInterpreter->typed_tensor<float>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000101
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000102 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000103 {
104 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
105 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
106 return EXIT_FAILURE;
107 }
108
Finn Williams56870182020-11-20 13:57:53 +0000109 std::vector<float> tensorData;
110 PopulateTensorWithDataGeneric<float>(tensorData,
111 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
112 dataFile,
113 [](const std::string& s)
114 { return std::stof(s); });
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000115
Finn Williams56870182020-11-20 13:57:53 +0000116 std::copy(tensorData.begin(), tensorData.end(), inputData);
117 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000118 else if (params.m_InputTypes[inputIndex].compare("qsymms8") == 0)
Finn Williams56870182020-11-20 13:57:53 +0000119 {
120 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000121
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000122 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000123 {
124 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
125 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
126 return EXIT_FAILURE;
127 }
128
Finn Williams56870182020-11-20 13:57:53 +0000129 std::vector<int8_t> tensorData;
130 PopulateTensorWithDataGeneric<int8_t>(tensorData,
131 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
132 dataFile,
133 [](const std::string& s)
134 { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
135
136 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000137 }
138 else if (params.m_InputTypes[inputIndex].compare("int") == 0)
139 {
140 auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000141
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000142 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000143 {
144 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
145 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
146 return EXIT_FAILURE;
147 }
148
Finn Williams56870182020-11-20 13:57:53 +0000149 std::vector<int32_t> tensorData;
150 PopulateTensorWithDataGeneric<int32_t>(tensorData,
151 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
152 dataFile,
153 [](const std::string& s)
154 { return std::stoi(s); });
155
156 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000157 }
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100158 else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0 ||
159 params.m_InputTypes[inputIndex].compare("qasymmu8") == 0)
Sadik Armagan5d03e312020-11-17 16:43:56 +0000160 {
161 auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input);
Finn Williamsbbbefec2020-11-25 14:32:42 +0000162
Matthew Sloyanf00f6c22020-12-07 13:33:24 +0000163 if(inputData == NULL)
Finn Williamsbbbefec2020-11-25 14:32:42 +0000164 {
165 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
166 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
167 return EXIT_FAILURE;
168 }
169
Finn Williams56870182020-11-20 13:57:53 +0000170 std::vector<uint8_t> tensorData;
171 PopulateTensorWithDataGeneric<uint8_t>(tensorData,
172 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
173 dataFile,
174 [](const std::string& s)
175 { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
176
177 std::copy(tensorData.begin(), tensorData.end(), inputData);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000178 }
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100179 else if (params.m_InputTypes[inputIndex].compare("qasymms8") == 0)
180 {
181 auto inputData = tfLiteInterpreter->typed_tensor<int8_t>(input);
182
183 if(inputData == NULL)
184 {
185 ARMNN_LOG(fatal) << "Input tensor is null, input type: "
186 "\"" << params.m_InputTypes[inputIndex] << "\" may be incorrect.";
187 return EXIT_FAILURE;
188 }
189
190 std::vector<int8_t> tensorData;
191 PopulateTensorWithDataGeneric<int8_t>(tensorData,
192 params.m_InputTensorShapes[inputIndex]->GetNumElements(),
193 dataFile,
194 [](const std::string& s)
195 { return armnn::numeric_cast<int8_t>(std::stoi(s)); });
196
197 std::copy(tensorData.begin(), tensorData.end(), inputData);
198 }
Sadik Armagan5d03e312020-11-17 16:43:56 +0000199 else
200 {
201 ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". ";
202 return EXIT_FAILURE;
203 }
204 }
205
206 for (size_t x = 0; x < params.m_Iterations; x++)
207 {
208 // Run the inference
Finn Williamsf806c4d2021-02-22 15:13:12 +0000209 status = tfLiteInterpreter->Invoke();
Sadik Armagan5d03e312020-11-17 16:43:56 +0000210
211 // Print out the output
212 for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex)
213 {
Sadik Armagan5d03e312020-11-17 16:43:56 +0000214 auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex];
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000215 TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000216
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000217 long outputSize = 1;
Sadik Armagan5d03e312020-11-17 16:43:56 +0000218 for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim)
219 {
Sadik Armagan15f7fae2020-11-18 09:37:03 +0000220 outputSize *= outputDims->data[dim];
Sadik Armagan5d03e312020-11-17 16:43:56 +0000221 }
222
223 std::cout << params.m_OutputNames[outputIndex] << ": ";
224 if (params.m_OutputTypes[outputIndex].compare("float") == 0)
225 {
226 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000227 if(tfLiteDelageOutputData == NULL)
228 {
229 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
230 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
231 return EXIT_FAILURE;
232 }
233
234 for (int i = 0; i < outputSize; ++i)
235 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000236 printf("%f ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000237 }
238 }
239 else if (params.m_OutputTypes[outputIndex].compare("int") == 0)
240 {
241 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000242 if(tfLiteDelageOutputData == NULL)
243 {
244 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
245 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
246 return EXIT_FAILURE;
247 }
248
249 for (int i = 0; i < outputSize; ++i)
250 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000251 printf("%d ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000252 }
253 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000254 else if (params.m_OutputTypes[outputIndex].compare("qsymms8") == 0)
Finn Williams56870182020-11-20 13:57:53 +0000255 {
256 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int8_t>(tfLiteDelegateOutputId);
257 if(tfLiteDelageOutputData == NULL)
258 {
259 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
260 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
261 return EXIT_FAILURE;
262 }
263
264 for (int i = 0; i < outputSize; ++i)
265 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000266 printf("%d ", tfLiteDelageOutputData[i]);
Finn Williams56870182020-11-20 13:57:53 +0000267 }
268 }
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100269 else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0 ||
270 params.m_OutputTypes[outputIndex].compare("qasymmu8") == 0)
Sadik Armagan5d03e312020-11-17 16:43:56 +0000271 {
272 auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000273 if(tfLiteDelageOutputData == NULL)
274 {
275 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
276 "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect.";
277 return EXIT_FAILURE;
278 }
279
280 for (int i = 0; i < outputSize; ++i)
281 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000282 printf("%u ", tfLiteDelageOutputData[i]);
Sadik Armagan5d03e312020-11-17 16:43:56 +0000283 }
284 }
285 else
286 {
287 ARMNN_LOG(fatal) << "Output tensor is null, output type: "
288 "\"" << params.m_OutputTypes[outputIndex] <<
289 "\" may be incorrect. Output type can be specified with -z argument";
290 return EXIT_FAILURE;
291 }
292 std::cout << std::endl;
293 }
294 }
295
296 return status;
297}
298#endif
Jan Eilers45274902020-10-15 18:34:43 +0100299template<typename TParser, typename TDataType>
300int MainImpl(const ExecuteNetworkParams& params,
301 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
302{
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100303 using namespace std::chrono;
Jan Eilers45274902020-10-15 18:34:43 +0100304
Sadik Armagana04a9d72021-04-27 10:02:10 +0100305 std::vector<std::vector<TContainer>> inputs;
306 std::vector<std::vector<TContainer>> outputs;
Jan Eilers45274902020-10-15 18:34:43 +0100307
308 try
309 {
310 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
311 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
312 inferenceModelParams.m_ModelPath = params.m_ModelPath;
313 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
314 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
315 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
316 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
317 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
318 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
319 inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
320 inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath;
Matthew Sloyan42432112021-01-08 10:30:51 +0000321 inferenceModelParams.m_SaveCachedNetwork = params.m_SaveCachedNetwork;
322 inferenceModelParams.m_CachedNetworkFilePath = params.m_CachedNetworkFilePath;
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000323 inferenceModelParams.m_NumberOfThreads = params.m_NumberOfThreads;
Finn Williams40646322021-02-11 16:16:42 +0000324 inferenceModelParams.m_MLGOTuningFilePath = params.m_MLGOTuningFilePath;
Sadik Armagana04a9d72021-04-27 10:02:10 +0100325 inferenceModelParams.m_AsyncEnabled = params.m_Concurrent;
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100326 inferenceModelParams.m_ThreadPoolSize = params.m_ThreadPoolSize;
Keith Davisf4874862021-08-09 16:49:18 +0100327 inferenceModelParams.m_OutputDetailsToStdOut = params.m_OutputDetailsToStdOut;
Keith Davis4914d0c2021-08-18 17:14:05 +0100328 inferenceModelParams.m_OutputDetailsOnlyToStdOut = params.m_OutputDetailsOnlyToStdOut;
Jan Eilers45274902020-10-15 18:34:43 +0100329
330 for(const std::string& inputName: params.m_InputNames)
331 {
332 inferenceModelParams.m_InputBindings.push_back(inputName);
333 }
334
335 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
336 {
337 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
338 }
339
340 for(const std::string& outputName: params.m_OutputNames)
341 {
342 inferenceModelParams.m_OutputBindings.push_back(outputName);
343 }
344
345 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
346 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
347 inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
348
349 InferenceModel<TParser, TDataType> model(inferenceModelParams,
350 params.m_EnableProfiling,
351 params.m_DynamicBackendsPath,
352 runtime);
353
354 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
Sadik Armagana04a9d72021-04-27 10:02:10 +0100355
356 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
357 armnn::MakeOptional<QuantizationParams>(
358 model.GetInputQuantizationParams()) :
359 armnn::EmptyOptional();
360
Jan Eilersf17fcd52021-07-26 22:20:00 +0100361 if (params.m_InputTensorDataFilePaths.size() > numInputs)
362 {
363 ARMNN_LOG(info) << "Given network has " << numInputs << " input/s. One input-tensor-data file is required "
364 << "for each input. The user provided "
365 << params.m_InputTensorDataFilePaths.size()
366 << " input-tensor-data file/s which will be used to fill the input/s.\n";
367 }
368
369 for(unsigned int j = 0; j < params.m_Iterations ; ++j)
Jan Eilers45274902020-10-15 18:34:43 +0100370 {
Sadik Armagana04a9d72021-04-27 10:02:10 +0100371 std::vector<TContainer> inputDataContainers;
372 for(unsigned int i = 0; i < numInputs; ++i)
Jan Eilers45274902020-10-15 18:34:43 +0100373 {
Jan Eilersf17fcd52021-07-26 22:20:00 +0100374 // If there are less input files given than required for the execution of
375 // params.m_Iterations we simply start with the first input file again
376 size_t inputFileIndex = j * numInputs + i;
377 if (!params.m_InputTensorDataFilePaths.empty())
378 {
379 inputFileIndex = inputFileIndex % params.m_InputTensorDataFilePaths.size();
380 }
381
Sadik Armagana04a9d72021-04-27 10:02:10 +0100382 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
383 armnn::EmptyOptional() :
384 armnn::MakeOptional<std::string>(
Jan Eilersf17fcd52021-07-26 22:20:00 +0100385 params.m_InputTensorDataFilePaths.at(inputFileIndex));
Sadik Armagana04a9d72021-04-27 10:02:10 +0100386
387 unsigned int numElements = model.GetInputSize(i);
388 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
389 {
390 // If the user has provided a tensor shape for the current input,
391 // override numElements
392 numElements = params.m_InputTensorShapes[i]->GetNumElements();
393 }
394
395 TContainer tensorData;
396 PopulateTensorWithData(tensorData,
397 numElements,
398 params.m_InputTypes[i],
399 qParams,
400 dataFile);
401
402 inputDataContainers.push_back(tensorData);
Jan Eilers45274902020-10-15 18:34:43 +0100403 }
Sadik Armagana04a9d72021-04-27 10:02:10 +0100404 inputs.push_back(inputDataContainers);
Jan Eilers45274902020-10-15 18:34:43 +0100405 }
406
407 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
Jan Eilers45274902020-10-15 18:34:43 +0100408
Jan Eilersf17fcd52021-07-26 22:20:00 +0100409 for (unsigned int j = 0; j < params.m_Iterations; ++j)
Jan Eilers45274902020-10-15 18:34:43 +0100410 {
Sadik Armagana04a9d72021-04-27 10:02:10 +0100411 std::vector <TContainer> outputDataContainers;
412 for (unsigned int i = 0; i < numOutputs; ++i)
Jan Eilers45274902020-10-15 18:34:43 +0100413 {
Sadik Armagana04a9d72021-04-27 10:02:10 +0100414 if (params.m_OutputTypes[i].compare("float") == 0)
Jan Eilers45274902020-10-15 18:34:43 +0100415 {
Sadik Armagana04a9d72021-04-27 10:02:10 +0100416 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100417 }
418 else if (params.m_OutputTypes[i].compare("int") == 0)
Sadik Armagana04a9d72021-04-27 10:02:10 +0100419 {
420 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100421 }
422 else if (params.m_OutputTypes[i].compare("qasymm8") == 0 ||
423 params.m_OutputTypes[i].compare("qasymmu8") == 0)
Sadik Armagana04a9d72021-04-27 10:02:10 +0100424 {
425 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100426 }
427 else if (params.m_OutputTypes[i].compare("qasymms8") == 0)
Sadik Armagana04a9d72021-04-27 10:02:10 +0100428 {
429 outputDataContainers.push_back(std::vector<int8_t>(model.GetOutputSize(i)));
430 } else
431 {
432 ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
433 return EXIT_FAILURE;
Jan Eilers45274902020-10-15 18:34:43 +0100434 }
435 }
Sadik Armagana04a9d72021-04-27 10:02:10 +0100436 outputs.push_back(outputDataContainers);
437 }
438
Jan Eilersf17fcd52021-07-26 22:20:00 +0100439 if (params.m_Iterations > 1)
440 {
441 std::stringstream msg;
442 msg << "Network will be executed " << params.m_Iterations;
443 if (params.m_Concurrent)
444 {
445 msg << " times in an asynchronous manner. ";
446 }
447 else
448 {
449 msg << " times successively. ";
450 }
451 msg << "The input-tensor-data files will be reused recursively if the user didn't provide enough to "
452 "cover each execution.";
453 ARMNN_LOG(info) << msg.str();
454 }
455
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100456 // Synchronous execution
Sadik Armagana04a9d72021-04-27 10:02:10 +0100457 if (!params.m_Concurrent)
458 {
Sadik Armagana04a9d72021-04-27 10:02:10 +0100459 for (size_t x = 0; x < params.m_Iterations; x++)
460 {
461 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
Jan Eilersf17fcd52021-07-26 22:20:00 +0100462 auto inference_duration = model.Run(inputs[x], outputs[x]);
Sadik Armagana04a9d72021-04-27 10:02:10 +0100463
464 if (params.m_GenerateTensorData)
465 {
466 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
467 }
468
469 // Print output tensors
470 const auto& infosOut = model.GetOutputBindingInfos();
471 for (size_t i = 0; i < numOutputs; i++)
472 {
473 const armnn::TensorInfo& infoOut = infosOut[i].second;
Jan Eilersf17fcd52021-07-26 22:20:00 +0100474
475 // We've made sure before that the number of output files either equals numOutputs, in which case
476 // we override those files when processing the results of each iteration (only the result of the
477 // last iteration will be stored), or there are enough
478 // output files for each output of each iteration.
479 size_t outputFileIndex = x * numOutputs + i;
480 if (!params.m_OutputTensorFiles.empty())
481 {
482 outputFileIndex = outputFileIndex % params.m_OutputTensorFiles.size();
483 ARMNN_LOG(info) << "Writing output " << i << " named: '"
484 << inferenceModelParams.m_OutputBindings[i]
485 << "' of iteration: " << x+1 << " to file: '"
486 << params.m_OutputTensorFiles[outputFileIndex] << "'";
487 }
488 auto outputTensorFile = params.m_OutputTensorFiles.empty()
489 ? ""
490 : params.m_OutputTensorFiles[outputFileIndex];
Sadik Armagana04a9d72021-04-27 10:02:10 +0100491
492 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
493 infoOut,
494 outputTensorFile,
495 params.m_DequantizeOutput);
Jan Eilersf17fcd52021-07-26 22:20:00 +0100496 mapbox::util::apply_visitor(printer, outputs[x][i]);
Sadik Armagana04a9d72021-04-27 10:02:10 +0100497 }
498
499 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
500 << std::fixed << inference_duration.count() << " ms\n";
501
502 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
503 if (params.m_ThresholdTime != 0.0)
504 {
505 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
506 << std::fixed << params.m_ThresholdTime << " ms";
507 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
508 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
509 << std::fixed << thresholdMinusInference << " ms" << "\n";
510
511 if (thresholdMinusInference < 0)
512 {
513 std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
514 ARMNN_LOG(fatal) << errorMessage;
515 }
516 }
517 }
518 }
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100519 // Asynchronous execution using the Arm NN thread pool
Kevin May94dd4db2021-05-26 16:01:08 +0100520 else if (params.m_ThreadPoolSize >= 1)
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100521 {
522 try
523 {
524 ARMNN_LOG(info) << "Asynchronous execution with Arm NN thread pool... \n";
Finn Williamsf364d532021-06-09 17:07:33 +0100525 armnn::AsyncCallbackManager callbackManager;
526 std::unordered_map<armnn::InferenceId, std::vector<TContainer>&> inferenceOutputMap;
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100527
528 // Declare the latest and earliest inference times here to be used when calculating overall time
529 std::chrono::high_resolution_clock::time_point earliestStartTime;
530 std::chrono::high_resolution_clock::time_point latestEndTime =
531 std::chrono::high_resolution_clock::now();
532
533 // For the asynchronous execution, we are adding a pool of working memory handles (1 per thread) in the
534 // LoadedNetwork with each scheduled inference having a specific priority
Jan Eilersf17fcd52021-07-26 22:20:00 +0100535 for (size_t i = 0; i < params.m_Iterations; ++i)
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100536 {
Finn Williamsf364d532021-06-09 17:07:33 +0100537 std::shared_ptr<armnn::AsyncExecutionCallback> cb = callbackManager.GetNewCallback();
538 inferenceOutputMap.insert({cb->GetInferenceId(), outputs[i]});
539 model.RunAsync(inputs[i], outputs[i], cb);
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100540 }
541
542 // Check the results
543 unsigned int j = 0;
Jan Eilersf17fcd52021-07-26 22:20:00 +0100544 for (size_t iteration = 0; iteration < params.m_Iterations; ++iteration)
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100545 {
Finn Williamsf364d532021-06-09 17:07:33 +0100546 auto cb = callbackManager.GetNotifiedCallback();
547
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100548 // Get the results
549 auto endTime = time_point_cast<std::chrono::milliseconds>(cb->GetEndTime());
550 auto startTime = time_point_cast<std::chrono::milliseconds>(cb->GetStartTime());
551 auto inferenceDuration = endTime - startTime;
552
553 if (latestEndTime < cb->GetEndTime())
554 {
555 latestEndTime = cb->GetEndTime();
556 }
557
558 if (earliestStartTime.time_since_epoch().count() == 0)
559 {
560 earliestStartTime = cb->GetStartTime();
561 }
562 else if (earliestStartTime > cb->GetStartTime())
563 {
564 earliestStartTime = cb->GetStartTime();
565 }
566
567 if (params.m_GenerateTensorData)
568 {
569 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
570 }
571
572 // Print output tensors
573 const auto& infosOut = model.GetOutputBindingInfos();
574 for (size_t i = 0; i < numOutputs; i++)
575 {
Jan Eilersf17fcd52021-07-26 22:20:00 +0100576 // We've made sure before that the number of output files either equals numOutputs, in which
577 // case we override those files when processing the results of each iteration (only the result
578 // of the last iteration will be stored), or there are enough
579 // output files for each output of each iteration.
580 size_t outputFileIndex = iteration * numOutputs + i;
581 if (!params.m_OutputTensorFiles.empty())
582 {
583 outputFileIndex = outputFileIndex % params.m_OutputTensorFiles.size();
584 ARMNN_LOG(info) << "Writing output " << i << " named: '"
585 << inferenceModelParams.m_OutputBindings[i]
586 << "' of iteration: " << iteration+1 << " to file: '"
587 << params.m_OutputTensorFiles[outputFileIndex] << "'";
588 }
589
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100590 const armnn::TensorInfo& infoOut = infosOut[i].second;
591 auto outputTensorFile = params.m_OutputTensorFiles.empty()
592 ? ""
Jan Eilersf17fcd52021-07-26 22:20:00 +0100593 : params.m_OutputTensorFiles[outputFileIndex];
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100594
595 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
596 infoOut,
597 outputTensorFile,
598 params.m_DequantizeOutput);
Finn Williamsf364d532021-06-09 17:07:33 +0100599 mapbox::util::apply_visitor(printer, inferenceOutputMap.at(cb->GetInferenceId())[i]);
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100600 }
601
602 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
603 << std::fixed << inferenceDuration.count() << " ms\n";
604
605 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
606 if (params.m_ThresholdTime != 0.0)
607 {
608 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
609 << std::fixed << params.m_ThresholdTime << " ms";
610 auto thresholdMinusInference =
611 params.m_ThresholdTime - duration<double, std::milli>(inferenceDuration).count();
612 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
613 << std::fixed << thresholdMinusInference << " ms" << "\n";
614
615 if (thresholdMinusInference < 0)
616 {
617 ARMNN_LOG(fatal) << "Elapsed inference time is greater than provided threshold time. \n";
618 }
619 }
620 ++j;
621 }
622 //print duration difference between overallStartTime and overallEndTime
623 auto overallEndTime = time_point_cast<std::chrono::milliseconds>(latestEndTime);
624 auto overallStartTime = time_point_cast<std::chrono::milliseconds>(earliestStartTime);
625 auto totalInferenceDuration = overallEndTime - overallStartTime;
626 ARMNN_LOG(info) << "\nOverall Inference time: " << std::setprecision(2)
627 << std::fixed << totalInferenceDuration.count() << " ms\n";
628 }
629 catch (const armnn::Exception& e)
630 {
631 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
632 return EXIT_FAILURE;
633 }
634 }
635 // Asynchronous execution using std::launch::async
Sadik Armagana04a9d72021-04-27 10:02:10 +0100636 else
637 {
638 try
639 {
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100640 ARMNN_LOG(info) << "Asynchronous Execution with std::launch:async... \n";
Finn Williamsf364d532021-06-09 17:07:33 +0100641 std::vector<std::future<std::tuple<unsigned int,
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100642 std::chrono::duration<double, std::milli>>>> inferenceResults;
Jan Eilersf17fcd52021-07-26 22:20:00 +0100643 inferenceResults.reserve(params.m_Iterations);
Sadik Armagana04a9d72021-04-27 10:02:10 +0100644
645 // Create WorkingMemHandles for each inference
646 std::vector<std::unique_ptr<armnn::experimental::IWorkingMemHandle>> workingMemHandles;
Jan Eilersf17fcd52021-07-26 22:20:00 +0100647 workingMemHandles.reserve(params.m_Iterations);
648 for (unsigned int i = 0; i < params.m_Iterations; ++i)
Sadik Armagana04a9d72021-04-27 10:02:10 +0100649 {
650 workingMemHandles.push_back(model.CreateWorkingMemHandle());
651 }
652
653 // Run each inference in its own thread
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100654 // start a timer
655 const auto start_time = armnn::GetTimeNow();
Jan Eilersf17fcd52021-07-26 22:20:00 +0100656 for (unsigned int i = 0; i < params.m_Iterations; ++i)
Sadik Armagana04a9d72021-04-27 10:02:10 +0100657 {
658 armnn::experimental::IWorkingMemHandle& workingMemHandleRef = *workingMemHandles[i].get();
Finn Williamsf364d532021-06-09 17:07:33 +0100659
Sadik Armagana04a9d72021-04-27 10:02:10 +0100660 inferenceResults.push_back(std::async(
661 std::launch::async, [&model, &workingMemHandleRef, &inputs, &outputs, i]() {
Finn Williamsf364d532021-06-09 17:07:33 +0100662 return model.RunAsync(workingMemHandleRef, inputs[i], outputs[i], i);
Sadik Armagana04a9d72021-04-27 10:02:10 +0100663 }
664 ));
665 }
666
667 // Check the results
668 for (unsigned int j = 0; j < inferenceResults.size(); ++j)
669 {
670 // Get the results
671 auto inferenceResult = inferenceResults[j].get();
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100672 auto inferenceDuration = std::get<1>(inferenceResult);
Sadik Armagana04a9d72021-04-27 10:02:10 +0100673 auto inferenceID = std::get<0>(inferenceResult);
674
675 if (params.m_GenerateTensorData)
676 {
677 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
678 }
679
680 // Print output tensors
681 const auto& infosOut = model.GetOutputBindingInfos();
682 for (size_t i = 0; i < numOutputs; i++)
683 {
Jan Eilersf17fcd52021-07-26 22:20:00 +0100684 // We've made sure before that the number of output files either equals numOutputs, in which
685 // case we override those files when processing the results of each iteration (only the result
686 // of the last iteration will be stored), or there are enough
687 // output files for each output of each iteration.
688 size_t outputFileIndex = j * numOutputs + i;
689 if (!params.m_OutputTensorFiles.empty())
690 {
691 outputFileIndex = outputFileIndex % params.m_OutputTensorFiles.size();
692 ARMNN_LOG(info) << "Writing output " << i << " named: '"
693 << inferenceModelParams.m_OutputBindings[i]
694 << "' of iteration: " << j+1 << " to file: '"
695 << params.m_OutputTensorFiles[outputFileIndex] << "'";
696 }
Sadik Armagana04a9d72021-04-27 10:02:10 +0100697 const armnn::TensorInfo& infoOut = infosOut[i].second;
698 auto outputTensorFile = params.m_OutputTensorFiles.empty()
699 ? ""
Jan Eilersf17fcd52021-07-26 22:20:00 +0100700 : params.m_OutputTensorFiles[outputFileIndex];
Sadik Armagana04a9d72021-04-27 10:02:10 +0100701
702 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
703 infoOut,
704 outputTensorFile,
705 params.m_DequantizeOutput);
706 mapbox::util::apply_visitor(printer, outputs[j][i]);
707 }
708
709 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100710 << std::fixed << inferenceDuration.count() << " ms\n";
Sadik Armagana04a9d72021-04-27 10:02:10 +0100711
712 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
713 if (params.m_ThresholdTime != 0.0)
714 {
715 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
716 << std::fixed << params.m_ThresholdTime << " ms";
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100717 auto thresholdMinusInference = params.m_ThresholdTime - inferenceDuration.count();
Sadik Armagana04a9d72021-04-27 10:02:10 +0100718 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
719 << std::fixed << thresholdMinusInference << " ms" << "\n";
720
721 if (thresholdMinusInference < 0)
722 {
723 ARMNN_LOG(fatal) << "Elapsed inference time is greater than provided threshold time. \n";
724 }
725 }
726 ARMNN_LOG(info) << "Asynchronous Execution is finished for Inference ID: " << inferenceID << " \n";
727
728 }
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100729 // finish timer
730 const auto duration = armnn::GetTimeDuration(start_time);
731 ARMNN_LOG(info) << "\nOverall Inference time: " << std::setprecision(2)
732 << std::fixed << duration.count() << " ms\n";
Sadik Armagana04a9d72021-04-27 10:02:10 +0100733 }
734 catch (const armnn::Exception& e)
735 {
736 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
737 return EXIT_FAILURE;
738 }
Jan Eilers45274902020-10-15 18:34:43 +0100739 }
740 }
741 catch (const armnn::Exception& e)
742 {
743 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
744 return EXIT_FAILURE;
745 }
746
747 return EXIT_SUCCESS;
748}
749
telsoa01c577f2c2018-08-31 09:22:23 +0100750
James Conroy7b4886f2019-04-11 10:23:58 +0100751// MAIN
telsoa01c577f2c2018-08-31 09:22:23 +0100752int main(int argc, const char* argv[])
753{
754 // Configures logging for both the ARMNN library and this test program.
Jan Eilers45274902020-10-15 18:34:43 +0100755 #ifdef NDEBUG
telsoa01c577f2c2018-08-31 09:22:23 +0100756 armnn::LogSeverity level = armnn::LogSeverity::Info;
Jan Eilers45274902020-10-15 18:34:43 +0100757 #else
telsoa01c577f2c2018-08-31 09:22:23 +0100758 armnn::LogSeverity level = armnn::LogSeverity::Debug;
Jan Eilers45274902020-10-15 18:34:43 +0100759 #endif
telsoa01c577f2c2018-08-31 09:22:23 +0100760 armnn::ConfigureLogging(true, true, level);
telsoa01c577f2c2018-08-31 09:22:23 +0100761
telsoa01c577f2c2018-08-31 09:22:23 +0100762
Jan Eilers45274902020-10-15 18:34:43 +0100763 // Get ExecuteNetwork parameters and runtime options from command line
Jan Eilersf17fcd52021-07-26 22:20:00 +0100764 // This might throw an InvalidArgumentException if the user provided invalid inputs
765 ProgramOptions ProgramOptions;
766 try {
767 ProgramOptions.ParseOptions(argc, argv);
768 } catch (const std::exception &e){
769 ARMNN_LOG(fatal) << e.what();
770 return EXIT_FAILURE;
771 }
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000772
Keith Davis4914d0c2021-08-18 17:14:05 +0100773 if ((ProgramOptions.m_ExNetParams.m_OutputDetailsToStdOut ||
774 ProgramOptions.m_ExNetParams.m_OutputDetailsOnlyToStdOut)
775 && !ProgramOptions.m_ExNetParams.m_EnableProfiling)
Keith Davisf4874862021-08-09 16:49:18 +0100776 {
777 ARMNN_LOG(fatal) << "You must enable profiling if you would like to output layer details";
778 return EXIT_FAILURE;
779 }
780
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100781 // Create runtime
Jan Eilers45274902020-10-15 18:34:43 +0100782 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions));
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100783
Jan Eilers45274902020-10-15 18:34:43 +0100784 std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat;
785
786 // Forward to implementation based on the parser type
787 if (modelFormat.find("armnn") != std::string::npos)
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100788 {
Jan Eilers45274902020-10-15 18:34:43 +0100789 #if defined(ARMNN_SERIALIZER)
790 return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime);
791 #else
792 ARMNN_LOG(fatal) << "Not built with serialization support.";
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100793 return EXIT_FAILURE;
Jan Eilers45274902020-10-15 18:34:43 +0100794 #endif
Finn Williamsd7fcafa2020-04-23 17:55:18 +0100795 }
Jan Eilers45274902020-10-15 18:34:43 +0100796 else if (modelFormat.find("onnx") != std::string::npos)
telsoa01c577f2c2018-08-31 09:22:23 +0100797 {
Jan Eilers45274902020-10-15 18:34:43 +0100798 #if defined(ARMNN_ONNX_PARSER)
799 return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime);
800 #else
801 ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
802 return EXIT_FAILURE;
803 #endif
804 }
Jan Eilers45274902020-10-15 18:34:43 +0100805 else if(modelFormat.find("tflite") != std::string::npos)
806 {
Finn Williamsf806c4d2021-02-22 15:13:12 +0000807 if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor == ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser)
808 {
809 #if defined(ARMNN_TF_LITE_PARSER)
810 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime);
811 #else
812 ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support.";
813 return EXIT_FAILURE;
814 #endif
815 }
816 else if (ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
817 ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate ||
818 ProgramOptions.m_ExNetParams.m_TfLiteExecutor ==
819 ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter)
Sadik Armagan5d03e312020-11-17 16:43:56 +0000820 {
821 #if defined(ARMNN_TF_LITE_DELEGATE)
822 return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime);
823 #else
Finn Williamsbbbefec2020-11-25 14:32:42 +0000824 ARMNN_LOG(fatal) << "Not built with Arm NN Tensorflow-Lite delegate support.";
Sadik Armagan5d03e312020-11-17 16:43:56 +0000825 return EXIT_FAILURE;
826 #endif
827 }
Jan Eilers45274902020-10-15 18:34:43 +0100828 }
829 else
830 {
831 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
Nikhil Raj5d955cf2021-04-19 16:59:48 +0100832 << "'. Please include 'tflite' or 'onnx'";
Jan Eilers45274902020-10-15 18:34:43 +0100833 return EXIT_FAILURE;
telsoa014fcda012018-03-09 14:13:49 +0000834 }
835}