blob: ad35092c1dde451df9225dc915fd9eb741313c24 [file] [log] [blame]
Jan Eilers45274902020-10-15 18:34:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ExecuteNetworkProgramOptions.hpp"
7#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
8#include "InferenceTest.hpp"
9
10#include <armnn/BackendRegistry.hpp>
11#include <armnn/Exceptions.hpp>
12#include <armnn/utility/Assert.hpp>
13#include <armnn/utility/StringUtils.hpp>
14#include <armnn/Logging.hpp>
15
16#include <fmt/format.h>
17
18bool CheckOption(const cxxopts::ParseResult& result,
19 const char* option)
20{
21 // Check that the given option is valid.
22 if (option == nullptr)
23 {
24 return false;
25 }
26
27 // Check whether 'option' is provided.
28 return ((result.count(option)) ? true : false);
29}
30
31void CheckOptionDependency(const cxxopts::ParseResult& result,
32 const char* option,
33 const char* required)
34{
35 // Check that the given options are valid.
36 if (option == nullptr || required == nullptr)
37 {
38 throw cxxopts::OptionParseException("Invalid option to check dependency for");
39 }
40
41 // Check that if 'option' is provided, 'required' is also provided.
42 if (CheckOption(result, option) && !result[option].has_default())
43 {
44 if (CheckOption(result, required) == 0 || result[required].has_default())
45 {
46 throw cxxopts::OptionParseException(
47 std::string("Option '") + option + "' requires option '" + required + "'.");
48 }
49 }
50}
51
52void CheckOptionDependencies(const cxxopts::ParseResult& result)
53{
54 CheckOptionDependency(result, "model-path", "model-format");
55 CheckOptionDependency(result, "input-tensor-shape", "model-path");
56 CheckOptionDependency(result, "tuning-level", "tuning-path");
57}
58
59void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
60{
61 // Mark the duplicate devices as 'Undefined'.
62 for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
63 {
64 for (auto j = std::next(i); j != computeDevices.end(); ++j)
65 {
66 if (*j == *i)
67 {
68 *j = armnn::Compute::Undefined;
69 }
70 }
71 }
72
73 // Remove 'Undefined' devices.
74 computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
75 computeDevices.end());
76}
77
Jan Eilersc5b84b52021-02-16 12:40:43 +000078/// Takes a vector of backend strings and returns a vector of backendIDs.
79/// Removes duplicate entries.
80/// Can handle backend strings that contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
81std::vector<armnn::BackendId> GetBackendIDs(const std::vector<std::string>& backendStringsVec)
Jan Eilers45274902020-10-15 18:34:43 +010082{
83 std::vector<armnn::BackendId> backendIDs;
Jan Eilersc5b84b52021-02-16 12:40:43 +000084 for (const auto& backendStrings : backendStringsVec)
Jan Eilers45274902020-10-15 18:34:43 +010085 {
Jan Eilersc5b84b52021-02-16 12:40:43 +000086 // Each backendStrings might contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
87 std::vector<std::string> backendStringVec = ParseStringList(backendStrings, ",");
88 for (const auto& b : backendStringVec)
89 {
90 backendIDs.push_back(armnn::BackendId(b));
91 }
Jan Eilers45274902020-10-15 18:34:43 +010092 }
93
94 RemoveDuplicateDevices(backendIDs);
95
96 return backendIDs;
97}
98
99/// Provides a segfault safe way to get cxxopts option values by checking if the option was defined.
100/// If the option wasn't defined it returns an empty object.
101template<typename optionType>
102optionType GetOptionValue(std::string&& optionName, const cxxopts::ParseResult& result)
103{
104 optionType out;
105 if(result.count(optionName))
106 {
107 out = result[optionName].as<optionType>();
108 }
109 return out;
110}
111
112void LogAndThrowFatal(std::string errorMessage)
113{
114 throw armnn::InvalidArgumentException (errorMessage);
115}
116
117void CheckRequiredOptions(const cxxopts::ParseResult& result)
118{
119
120 // For each option in option-group "a) Required
121 std::vector<std::string> requiredOptions{"compute",
122 "model-format",
123 "model-path",
124 "input-name",
125 "output-name"};
126
127 bool requiredMissing = false;
128 for(auto const& str : requiredOptions)
129 {
130 if(!(result.count(str) > 0))
131 {
132 ARMNN_LOG(error) << fmt::format("The program option '{}' is mandatory but wasn't provided.", str);
133 requiredMissing = true;
134 }
135 }
136 if(requiredMissing)
137 {
138 throw armnn::InvalidArgumentException ("Some required arguments are missing");
139 }
140}
141
Jan Eilersf17fcd52021-07-26 22:20:00 +0100142void CheckForDeprecatedOptions(const cxxopts::ParseResult& result)
143{
144 if(result.count("simultaneous-iterations") > 0)
145 {
146 ARMNN_LOG(warning) << "DEPRECATED: The program option 'simultaneous-iterations' is deprecated and will be "
147 "removed soon. Please use the option 'iterations' combined with 'concurrent' instead.";
148 }
149 if(result.count("armnn-tflite-delegate") > 0)
150 {
151 ARMNN_LOG(warning) << "DEPRECATED: The program option 'armnn-tflite-delegate' is deprecated and will be "
152 "removed soon. Please use the option 'tflite-executor' instead.";
153 }
154}
155
Jan Eilers45274902020-10-15 18:34:43 +0100156void ProgramOptions::ValidateExecuteNetworkParams()
157{
158 m_ExNetParams.ValidateParams();
159}
160
161void ProgramOptions::ValidateRuntimeOptions()
162{
163 if (m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled &&
164 !m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
165 {
166 LogAndThrowFatal("Timeline profiling requires external profiling to be turned on");
167 }
168}
169
170
171ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
172 "Executes a neural network model using the provided input "
173 "tensor. Prints the resulting output tensor."}
174{
175 try
176 {
177 // cxxopts doesn't provide a mechanism to ensure required options are given. There is a
178 // separate function CheckRequiredOptions() for that.
179 m_CxxOptions.add_options("a) Required")
180 ("c,compute",
Jan Eilersc5b84b52021-02-16 12:40:43 +0000181 "Which device to run layers on by default. If a single device doesn't support all layers in the model "
182 "you can specify a second or third to fall back on. Possible choices: "
Jan Eilers45274902020-10-15 18:34:43 +0100183 + armnn::BackendRegistryInstance().GetBackendIdsAsString()
Jan Eilersc5b84b52021-02-16 12:40:43 +0000184 + " NOTE: Multiple compute devices need to be passed as a comma separated list without whitespaces "
185 "e.g. GpuAcc,CpuAcc,CpuRef or by repeating the program option e.g. '-c Cpuacc -c CpuRef'. "
186 "Duplicates are ignored.",
Jan Eilers3dda41d2020-11-11 11:44:14 +0000187 cxxopts::value<std::vector<std::string>>())
Jan Eilers45274902020-10-15 18:34:43 +0100188
189 ("f,model-format",
Nikhil Raj5d955cf2021-04-19 16:59:48 +0100190 "armnn-binary, onnx-binary, onnx-text, tflite-binary",
Jan Eilers45274902020-10-15 18:34:43 +0100191 cxxopts::value<std::string>())
192
193 ("m,model-path",
Nikhil Raj6dd178f2021-04-02 22:04:39 +0100194 "Path to model file, e.g. .armnn, , .prototxt, .tflite, .onnx",
Jan Eilers45274902020-10-15 18:34:43 +0100195 cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
196
197 ("i,input-name",
198 "Identifier of the input tensors in the network separated by comma.",
199 cxxopts::value<std::string>())
200
201 ("o,output-name",
202 "Identifier of the output tensors in the network separated by comma.",
203 cxxopts::value<std::string>());
204
205 m_CxxOptions.add_options("b) General")
206 ("b,dynamic-backends-path",
207 "Path where to load any available dynamic backend from. "
208 "If left empty (the default), dynamic backends will not be used.",
209 cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
210
Sadik Armagana04a9d72021-04-27 10:02:10 +0100211 ("n,concurrent",
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100212 "This option is for Arm NN internal asynchronous testing purposes. "
Jan Eilersf17fcd52021-07-26 22:20:00 +0100213 "False by default. If set to true will use std::launch::async or the Arm NN thread pool, "
214 "if 'thread-pool-size' is greater than 0, for asynchronous execution.",
Sadik Armagana04a9d72021-04-27 10:02:10 +0100215 cxxopts::value<bool>(m_ExNetParams.m_Concurrent)->default_value("false")->implicit_value("true"))
216
Jan Eilers45274902020-10-15 18:34:43 +0100217 ("d,input-tensor-data",
218 "Path to files containing the input data as a flat array separated by whitespace. "
Jan Eilersf17fcd52021-07-26 22:20:00 +0100219 "Several paths can be passed by separating them with a comma if the network has multiple inputs "
220 "or you wish to run the model multiple times with different input data using the 'iterations' option. "
221 "If not specified, the network will be run with dummy data (useful for profiling).",
Jan Eilers45274902020-10-15 18:34:43 +0100222 cxxopts::value<std::string>()->default_value(""))
223
224 ("h,help", "Display usage information")
225
226 ("infer-output-shape",
227 "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
228 "parser)",
229 cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
230
Mike Kelly21fe06f2022-05-16 23:10:42 +0100231 ("allow-expanded-dims",
232 "If true will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must "
233 "still match. This is an Experimental parameter that is incompatible with infer-output-shape. "
234 "This parameter may be removed in a later update. ",
235 cxxopts::value<bool>(m_ExNetParams.m_AllowExpandedDims)->default_value("false")
236 ->implicit_value("true"))
237
Jan Eilers45274902020-10-15 18:34:43 +0100238 ("iterations",
Jan Eilersf17fcd52021-07-26 22:20:00 +0100239 "Number of iterations to run the network for, default is set to 1. "
240 "If you wish to run the model with different input data for every execution you can do so by "
241 "supplying more input file paths to the 'input-tensor-data' option. "
242 "Note: The number of input files provided must be divisible by the number of inputs of the model. "
243 "e.g. Your model has 2 inputs and you supply 4 input files. If you set 'iterations' to 6 the first "
244 "run will consume the first two inputs, the second the next two and the last will begin from the "
245 "start and use the first two inputs again. "
246 "Note: If the 'concurrent' option is enabled all iterations will be run asynchronously.",
Jan Eilers45274902020-10-15 18:34:43 +0100247 cxxopts::value<size_t>(m_ExNetParams.m_Iterations)->default_value("1"))
248
249 ("l,dequantize-output",
250 "If this option is enabled, all quantized outputs will be dequantized to float. "
251 "If unset, default to not get dequantized. "
Colm Donelan3cff15a2021-10-12 15:06:19 +0100252 "Accepted values (true or false)"
253 " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
Jan Eilers45274902020-10-15 18:34:43 +0100254 cxxopts::value<bool>(m_ExNetParams.m_DequantizeOutput)->default_value("false")->implicit_value("true"))
255
256 ("p,print-intermediate-layers",
257 "If this option is enabled, the output of every graph layer will be printed.",
258 cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediate)->default_value("false")
259 ->implicit_value("true"))
260
261 ("parse-unsupported",
262 "Add unsupported operators as stand-in layers (where supported by parser)",
263 cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
264
Ryan OSheadfbec2d2022-03-28 10:55:48 +0100265 ("N,do-not-print-output",
Jan Eilers284b5d12021-09-07 12:46:15 +0100266 "The default behaviour of ExecuteNetwork is to print the resulting outputs on the console. "
267 "This behaviour can be changed by adding this flag to your command.",
268 cxxopts::value<bool>(m_ExNetParams.m_DontPrintOutputs)->default_value("false")->implicit_value("true"))
269
Jan Eilers45274902020-10-15 18:34:43 +0100270 ("q,quantize-input",
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100271 "If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. "
Colm Donelan3cff15a2021-10-12 15:06:19 +0100272 "If unset, default to not quantized. Accepted values (true or false)"
273 " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
Jan Eilers45274902020-10-15 18:34:43 +0100274 cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
Jan Eilers45274902020-10-15 18:34:43 +0100275 ("r,threshold-time",
276 "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
277 "inference time is greater than the threshold time, the test will fail. By default, no threshold "
278 "time is used.",
279 cxxopts::value<double>(m_ExNetParams.m_ThresholdTime)->default_value("0.0"))
280
281 ("s,input-tensor-shape",
282 "The shape of the input tensors in the network as a flat array of integers separated by comma."
283 "Several shapes can be passed by separating them with a colon (:).",
284 cxxopts::value<std::string>())
285
286 ("v,visualize-optimized-model",
287 "Enables built optimized model visualizer. If unset, defaults to off.",
288 cxxopts::value<bool>(m_ExNetParams.m_EnableLayerDetails)->default_value("false")
289 ->implicit_value("true"))
290
291 ("w,write-outputs-to-file",
292 "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
293 "If left empty (the default), the output tensors will not be written to a file.",
294 cxxopts::value<std::string>())
295
296 ("x,subgraph-number",
Colm Donelan3cff15a2021-10-12 15:06:19 +0100297 "Id of the subgraph to be executed. Defaults to 0."
298 " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
Jan Eilers45274902020-10-15 18:34:43 +0100299 cxxopts::value<size_t>(m_ExNetParams.m_SubgraphId)->default_value("0"))
300
301 ("y,input-type",
302 "The type of the input tensors in the network separated by comma. "
303 "If unset, defaults to \"float\" for all defined inputs. "
David Monahan67cc5fc2021-11-03 12:56:41 +0000304 "Accepted values (float, int, qasymms8 or qasymmu8).",
Jan Eilers45274902020-10-15 18:34:43 +0100305 cxxopts::value<std::string>())
306
307 ("z,output-type",
308 "The type of the output tensors in the network separated by comma. "
309 "If unset, defaults to \"float\" for all defined outputs. "
David Monahan67cc5fc2021-11-03 12:56:41 +0000310 "Accepted values (float, int, qasymms8 or qasymmu8).",
Finn Williamsf806c4d2021-02-22 15:13:12 +0000311 cxxopts::value<std::string>())
312
313 ("T,tflite-executor",
314 "Set the executor for the tflite model: parser, delegate, tflite"
315 "parser is the ArmNNTfLiteParser, "
316 "delegate is the ArmNNTfLiteDelegate, "
317 "tflite is the TfliteInterpreter",
318 cxxopts::value<std::string>()->default_value("parser"))
319
320 ("D,armnn-tflite-delegate",
321 "Enable Arm NN TfLite delegate. "
Jan Eilersf17fcd52021-07-26 22:20:00 +0100322 "DEPRECATED: This option is deprecated please use tflite-executor instead",
Sadik Armagana04a9d72021-04-27 10:02:10 +0100323 cxxopts::value<bool>(m_ExNetParams.m_EnableDelegate)->default_value("false")->implicit_value("true"))
324
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100325 ("simultaneous-iterations",
326 "Number of simultaneous iterations to async-run the network for, default is set to 1 (disabled). "
Jan Eilersf17fcd52021-07-26 22:20:00 +0100327 "When thread-pool-size is set the Arm NN thread pool is used. Otherwise std::launch::async is used."
328 "DEPRECATED: This option is deprecated and will be removed soon. "
329 "Please use the option 'iterations' combined with 'concurrent' instead.",
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100330 cxxopts::value<size_t>(m_ExNetParams.m_SimultaneousIterations)->default_value("1"))
331
332 ("thread-pool-size",
333 "Number of Arm NN threads to use when running the network asynchronously via the Arm NN thread pool. "
Jan Eilersf17fcd52021-07-26 22:20:00 +0100334 "The default is set to 0 which equals disabled. If 'thread-pool-size' is greater than 0 the "
335 "'concurrent' option is automatically set to true.",
Kevin May94dd4db2021-05-26 16:01:08 +0100336 cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"));
Jan Eilers45274902020-10-15 18:34:43 +0100337
338 m_CxxOptions.add_options("c) Optimization")
339 ("bf16-turbo-mode",
340 "If this option is enabled, FP32 layers, "
341 "weights and biases will be converted to BFloat16 where the backend supports it",
342 cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
343 ->default_value("false")->implicit_value("true"))
344
345 ("enable-fast-math",
346 "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
347 "performance improvements but may result in reduced or different precision.",
348 cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
349
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000350 ("number-of-threads",
351 "Assign the number of threads used by the CpuAcc backend. "
352 "Input value must be between 1 and 64. "
353 "Default is set to 0 (Backend will decide number of threads to use).",
354 cxxopts::value<unsigned int>(m_ExNetParams.m_NumberOfThreads)->default_value("0"))
355
Matthew Sloyan42432112021-01-08 10:30:51 +0000356 ("save-cached-network",
Matthew Sloyan9d7a3322021-01-12 16:19:43 +0000357 "Enables saving of the cached network to a file given with the cached-network-filepath option. "
Matthew Sloyan42432112021-01-08 10:30:51 +0000358 "See also --cached-network-filepath",
359 cxxopts::value<bool>(m_ExNetParams.m_SaveCachedNetwork)
360 ->default_value("false")->implicit_value("true"))
361
362 ("cached-network-filepath",
Matthew Sloyan9d7a3322021-01-12 16:19:43 +0000363 "If non-empty, the given file will be used to load/save the cached network. "
364 "If save-cached-network is given then the cached network will be saved to the given file. "
365 "To save the cached network a file must already exist. "
366 "If save-cached-network is not given then the cached network will be loaded from the given file. "
367 "This will remove initial compilation time of kernels and speed up the first execution.",
Matthew Sloyan42432112021-01-08 10:30:51 +0000368 cxxopts::value<std::string>(m_ExNetParams.m_CachedNetworkFilePath)->default_value(""))
369
Jan Eilers45274902020-10-15 18:34:43 +0100370 ("fp16-turbo-mode",
371 "If this option is enabled, FP32 layers, "
372 "weights and biases will be converted to FP16 where the backend supports it",
373 cxxopts::value<bool>(m_ExNetParams.m_EnableFp16TurboMode)
374 ->default_value("false")->implicit_value("true"))
375
376 ("tuning-level",
377 "Sets the tuning level which enables a tuning run which will update/create a tuning file. "
378 "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
379 "Requires tuning-path to be set, default is set to 0 (No tuning run)",
380 cxxopts::value<int>(m_ExNetParams.m_TuningLevel)->default_value("0"))
381
382 ("tuning-path",
383 "Path to tuning file. Enables use of CL tuning",
Finn Williams40646322021-02-11 16:16:42 +0000384 cxxopts::value<std::string>(m_ExNetParams.m_TuningPath))
385
386 ("MLGOTuningFilePath",
387 "Path to tuning file. Enables use of CL MLGO tuning",
Ryan OSheadfbec2d2022-03-28 10:55:48 +0100388 cxxopts::value<std::string>(m_ExNetParams.m_MLGOTuningFilePath))
389
390 ("R, reuse-buffers",
391 "If enabled then the IO buffers will be reused for each inference",
392 cxxopts::value<bool>(m_ExNetParams.m_ReuseBuffers)->default_value("false")->implicit_value("true"));
Jan Eilers45274902020-10-15 18:34:43 +0100393
394 m_CxxOptions.add_options("d) Profiling")
395 ("a,enable-external-profiling",
396 "If enabled external profiling will be switched on",
397 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
398 ->default_value("false")->implicit_value("true"))
399
400 ("e,event-based-profiling",
401 "Enables built in profiler. If unset, defaults to off.",
402 cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
403
404 ("g,file-only-external-profiling",
405 "If enabled then the 'file-only' test mode of external profiling will be enabled",
406 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_FileOnly)
407 ->default_value("false")->implicit_value("true"))
408
409 ("file-format",
410 "If profiling is enabled specifies the output file format",
411 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_FileFormat)->default_value("binary"))
412
413 ("j,outgoing-capture-file",
414 "If specified the outgoing external profiling packets will be captured in this binary file",
415 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile))
416
417 ("k,incoming-capture-file",
418 "If specified the incoming external profiling packets will be captured in this binary file",
419 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_IncomingCaptureFile))
420
421 ("timeline-profiling",
422 "If enabled timeline profiling will be switched on, requires external profiling",
423 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled)
424 ->default_value("false")->implicit_value("true"))
425
426 ("u,counter-capture-period",
427 "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
Keith Davisf4874862021-08-09 16:49:18 +0100428 cxxopts::value<uint32_t>(m_RuntimeOptions.m_ProfilingOptions.m_CapturePeriod)->default_value("150"))
429
430 ("output-network-details",
Keith Davis4914d0c2021-08-18 17:14:05 +0100431 "Outputs layer tensor infos and descriptors to std out along with profiling events. Defaults to off.",
Keith Davisf4874862021-08-09 16:49:18 +0100432 cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsToStdOut)->default_value("false")
Keith Davis4914d0c2021-08-18 17:14:05 +0100433 ->implicit_value("true"))
434 ("output-network-details-only",
435 "Outputs layer tensor infos and descriptors to std out without profiling events. Defaults to off.",
436 cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsOnlyToStdOut)->default_value("false")
Jim Flynn15425812022-02-15 16:53:13 +0000437 ->implicit_value("true"))
Keith Davis4914d0c2021-08-18 17:14:05 +0100438
Jim Flynn15425812022-02-15 16:53:13 +0000439 ("import-inputs-if-aligned",
440 "In & Out tensors will be imported per inference if the memory alignment allows. Defaults to false.",
441 cxxopts::value<bool>(m_ExNetParams.m_ImportInputsIfAligned)->default_value("false")
442 ->implicit_value("true"));
Jan Eilers45274902020-10-15 18:34:43 +0100443 }
444 catch (const std::exception& e)
445 {
446 ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
447 ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
448 exit(EXIT_FAILURE);
449 }
450}
451
452ProgramOptions::ProgramOptions(int ac, const char* av[]): ProgramOptions()
453{
454 ParseOptions(ac, av);
455}
456
457void ProgramOptions::ParseOptions(int ac, const char* av[])
458{
459 // Parses the command-line.
460 m_CxxResult = m_CxxOptions.parse(ac, av);
461
462 if (m_CxxResult.count("help") || ac <= 1)
463 {
464 std::cout << m_CxxOptions.help() << std::endl;
465 exit(EXIT_SUCCESS);
466 }
467
468 CheckRequiredOptions(m_CxxResult);
469 CheckOptionDependencies(m_CxxResult);
Jan Eilersf17fcd52021-07-26 22:20:00 +0100470 CheckForDeprecatedOptions(m_CxxResult);
Jan Eilers45274902020-10-15 18:34:43 +0100471
472 // Some options can't be assigned directly because they need some post-processing:
Jan Eilers3dda41d2020-11-11 11:44:14 +0000473 auto computeDevices = GetOptionValue<std::vector<std::string>>("compute", m_CxxResult);
474 m_ExNetParams.m_ComputeDevices = GetBackendIDs(computeDevices);
Jan Eilers45274902020-10-15 18:34:43 +0100475 m_ExNetParams.m_ModelFormat =
476 armnn::stringUtils::StringTrimCopy(GetOptionValue<std::string>("model-format", m_CxxResult));
477 m_ExNetParams.m_InputNames =
478 ParseStringList(GetOptionValue<std::string>("input-name", m_CxxResult), ",");
479 m_ExNetParams.m_InputTensorDataFilePaths =
480 ParseStringList(GetOptionValue<std::string>("input-tensor-data", m_CxxResult), ",");
481 m_ExNetParams.m_OutputNames =
482 ParseStringList(GetOptionValue<std::string>("output-name", m_CxxResult), ",");
483 m_ExNetParams.m_InputTypes =
484 ParseStringList(GetOptionValue<std::string>("input-type", m_CxxResult), ",");
485 m_ExNetParams.m_OutputTypes =
486 ParseStringList(GetOptionValue<std::string>("output-type", m_CxxResult), ",");
487 m_ExNetParams.m_OutputTensorFiles =
488 ParseStringList(GetOptionValue<std::string>("write-outputs-to-file", m_CxxResult), ",");
489 m_ExNetParams.m_GenerateTensorData =
490 m_ExNetParams.m_InputTensorDataFilePaths.empty();
Francis Murtaghbf18a262020-10-27 15:20:40 +0000491 m_ExNetParams.m_DynamicBackendsPath = m_RuntimeOptions.m_DynamicBackendsPath;
Jan Eilers45274902020-10-15 18:34:43 +0100492
Sadik Armagan8c7a28b2021-04-01 17:27:21 +0100493 m_RuntimeOptions.m_EnableGpuProfiling = m_ExNetParams.m_EnableProfiling;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000494
495 std::string tfliteExecutor = GetOptionValue<std::string>("tflite-executor", m_CxxResult);
496
497 if (tfliteExecutor.size() == 0 || tfliteExecutor == "parser")
498 {
499 m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser;
500 }
501 else if (tfliteExecutor == "delegate")
502 {
503 m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate;
504 }
505 else if (tfliteExecutor == "tflite")
506 {
507 m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter;
508 }
509 else
510 {
511 ARMNN_LOG(info) << fmt::format("Invalid tflite-executor option '{}'.", tfliteExecutor);
512 throw armnn::InvalidArgumentException ("Invalid tflite-executor option");
513 }
514
Jan Eilersf17fcd52021-07-26 22:20:00 +0100515 // For backwards compatibility when deprecated options are used
Finn Williamsf806c4d2021-02-22 15:13:12 +0000516 if (m_ExNetParams.m_EnableDelegate)
517 {
518 m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate;
Jan Eilersf17fcd52021-07-26 22:20:00 +0100519 }
520 if (m_ExNetParams.m_SimultaneousIterations > 1)
521 {
522 m_ExNetParams.m_Iterations = m_ExNetParams.m_SimultaneousIterations;
523 m_ExNetParams.m_Concurrent = true;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000524 }
525
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100526 // Set concurrent to true if the user expects to run inferences asynchronously
Jan Eilersf17fcd52021-07-26 22:20:00 +0100527 if (m_ExNetParams.m_ThreadPoolSize > 0)
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100528 {
529 m_ExNetParams.m_Concurrent = true;
530 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000531
Jan Eilers45274902020-10-15 18:34:43 +0100532 // Parse input tensor shape from the string we got from the command-line.
533 std::vector<std::string> inputTensorShapesVector =
534 ParseStringList(GetOptionValue<std::string>("input-tensor-shape", m_CxxResult), ":");
535
536 if (!inputTensorShapesVector.empty())
537 {
538 m_ExNetParams.m_InputTensorShapes.reserve(inputTensorShapesVector.size());
539
540 for(const std::string& shape : inputTensorShapesVector)
541 {
542 std::stringstream ss(shape);
543 std::vector<unsigned int> dims = ParseArray(ss);
544
545 m_ExNetParams.m_InputTensorShapes.push_back(
546 std::make_unique<armnn::TensorShape>(static_cast<unsigned int>(dims.size()), dims.data()));
547 }
548 }
549
550 // We have to validate ExecuteNetworkParams first so that the tuning path and level is validated
551 ValidateExecuteNetworkParams();
552
553 // Parse CL tuning parameters to runtime options
554 if (!m_ExNetParams.m_TuningPath.empty())
555 {
556 m_RuntimeOptions.m_BackendOptions.emplace_back(
557 armnn::BackendOptions
558 {
559 "GpuAcc",
560 {
561 {"TuningLevel", m_ExNetParams.m_TuningLevel},
562 {"TuningFile", m_ExNetParams.m_TuningPath.c_str()},
Finn Williams40646322021-02-11 16:16:42 +0000563 {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling},
564 {"MLGOTuningFilePath", m_ExNetParams.m_MLGOTuningFilePath}
Jan Eilers45274902020-10-15 18:34:43 +0100565 }
566 }
567 );
568 }
569
570 ValidateRuntimeOptions();
571}
572