blob: cba6748b45c061c8e3d216fa6f23bdf2984a76be [file] [log] [blame]
Jan Eilers45274902020-10-15 18:34:43 +01001//
Cathal Corbett71723d42023-01-10 09:52:12 +00002// Copyright © 2022-2023 Arm Ltd and Contributors. All rights reserved.
Jan Eilers45274902020-10-15 18:34:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "ExecuteNetworkProgramOptions.hpp"
7#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
Jan Eilers45274902020-10-15 18:34:43 +01008
9#include <armnn/BackendRegistry.hpp>
10#include <armnn/Exceptions.hpp>
11#include <armnn/utility/Assert.hpp>
12#include <armnn/utility/StringUtils.hpp>
13#include <armnn/Logging.hpp>
14
15#include <fmt/format.h>
16
17bool CheckOption(const cxxopts::ParseResult& result,
18 const char* option)
19{
20 // Check that the given option is valid.
21 if (option == nullptr)
22 {
23 return false;
24 }
25
26 // Check whether 'option' is provided.
27 return ((result.count(option)) ? true : false);
28}
29
30void CheckOptionDependency(const cxxopts::ParseResult& result,
31 const char* option,
32 const char* required)
33{
34 // Check that the given options are valid.
35 if (option == nullptr || required == nullptr)
36 {
37 throw cxxopts::OptionParseException("Invalid option to check dependency for");
38 }
39
40 // Check that if 'option' is provided, 'required' is also provided.
41 if (CheckOption(result, option) && !result[option].has_default())
42 {
43 if (CheckOption(result, required) == 0 || result[required].has_default())
44 {
45 throw cxxopts::OptionParseException(
46 std::string("Option '") + option + "' requires option '" + required + "'.");
47 }
48 }
49}
50
51void CheckOptionDependencies(const cxxopts::ParseResult& result)
52{
Jan Eilers45274902020-10-15 18:34:43 +010053 CheckOptionDependency(result, "tuning-level", "tuning-path");
54}
55
56void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
57{
58 // Mark the duplicate devices as 'Undefined'.
59 for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
60 {
61 for (auto j = std::next(i); j != computeDevices.end(); ++j)
62 {
63 if (*j == *i)
64 {
65 *j = armnn::Compute::Undefined;
66 }
67 }
68 }
69
70 // Remove 'Undefined' devices.
71 computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
72 computeDevices.end());
73}
74
Jan Eilersc5b84b52021-02-16 12:40:43 +000075/// Takes a vector of backend strings and returns a vector of backendIDs.
76/// Removes duplicate entries.
77/// Can handle backend strings that contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
78std::vector<armnn::BackendId> GetBackendIDs(const std::vector<std::string>& backendStringsVec)
Jan Eilers45274902020-10-15 18:34:43 +010079{
80 std::vector<armnn::BackendId> backendIDs;
Jan Eilersc5b84b52021-02-16 12:40:43 +000081 for (const auto& backendStrings : backendStringsVec)
Jan Eilers45274902020-10-15 18:34:43 +010082 {
Jan Eilersc5b84b52021-02-16 12:40:43 +000083 // Each backendStrings might contain multiple backends separated by comma e.g "CpuRef,CpuAcc"
84 std::vector<std::string> backendStringVec = ParseStringList(backendStrings, ",");
85 for (const auto& b : backendStringVec)
86 {
87 backendIDs.push_back(armnn::BackendId(b));
88 }
Jan Eilers45274902020-10-15 18:34:43 +010089 }
90
91 RemoveDuplicateDevices(backendIDs);
92
93 return backendIDs;
94}
95
96/// Provides a segfault safe way to get cxxopts option values by checking if the option was defined.
97/// If the option wasn't defined it returns an empty object.
98template<typename optionType>
99optionType GetOptionValue(std::string&& optionName, const cxxopts::ParseResult& result)
100{
101 optionType out;
102 if(result.count(optionName))
103 {
104 out = result[optionName].as<optionType>();
105 }
106 return out;
107}
108
109void LogAndThrowFatal(std::string errorMessage)
110{
111 throw armnn::InvalidArgumentException (errorMessage);
112}
113
114void CheckRequiredOptions(const cxxopts::ParseResult& result)
115{
116
117 // For each option in option-group "a) Required
118 std::vector<std::string> requiredOptions{"compute",
Teresa Charlin83b42912022-07-07 14:24:59 +0100119 "model-path"
120 };
Jan Eilers45274902020-10-15 18:34:43 +0100121
122 bool requiredMissing = false;
123 for(auto const& str : requiredOptions)
124 {
125 if(!(result.count(str) > 0))
126 {
127 ARMNN_LOG(error) << fmt::format("The program option '{}' is mandatory but wasn't provided.", str);
128 requiredMissing = true;
129 }
130 }
131 if(requiredMissing)
132 {
133 throw armnn::InvalidArgumentException ("Some required arguments are missing");
134 }
135}
136
Jan Eilersf17fcd52021-07-26 22:20:00 +0100137void CheckForDeprecatedOptions(const cxxopts::ParseResult& result)
138{
Jan Eilersf17fcd52021-07-26 22:20:00 +0100139 if(result.count("armnn-tflite-delegate") > 0)
140 {
141 ARMNN_LOG(warning) << "DEPRECATED: The program option 'armnn-tflite-delegate' is deprecated and will be "
142 "removed soon. Please use the option 'tflite-executor' instead.";
143 }
Teresa Charlin83b42912022-07-07 14:24:59 +0100144 if(result.count("concurrent") > 0)
145 {
146 ARMNN_LOG(warning) << "DEPRECATED: The program option 'concurrent' is deprecated and will be "
147 "removed soon. Please use the option '\"P, thread-pool-size\"' instead.";
148 }
149 if(result.count("input-type") > 0)
150 {
151 ARMNN_LOG(warning) << "DEPRECATED: The program option 'input-type' is deprecated and will be "
152 "removed soon. The input-types are now automatically set.";
153 }
154 if(result.count("input-name") > 0)
155 {
156 ARMNN_LOG(warning) << "DEPRECATED: The program option 'input-name' is deprecated and will be "
157 "removed soon. The input-names are now automatically set.";
158 }
159 if(result.count("output-type") > 0)
160 {
161 ARMNN_LOG(warning) << "DEPRECATED: The program option 'output-type' is deprecated and will be "
162 "removed soon. The output-types are now automatically set.";
163 }
164 if(result.count("output-name") > 0)
165 {
166 ARMNN_LOG(warning) << "DEPRECATED: The program option 'output-name' is deprecated and will be "
167 "removed soon. The output-names are now automatically set.";
168 }
169 if(result.count("model-format") > 0)
170 {
171 ARMNN_LOG(warning) << "DEPRECATED: The program option 'model-format' is deprecated and will be "
172 "removed soon. The model-format is now automatically set.";
173 }
174
Jan Eilersf17fcd52021-07-26 22:20:00 +0100175}
176
Jan Eilers45274902020-10-15 18:34:43 +0100177void ProgramOptions::ValidateExecuteNetworkParams()
178{
179 m_ExNetParams.ValidateParams();
180}
181
182void ProgramOptions::ValidateRuntimeOptions()
183{
184 if (m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled &&
185 !m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
186 {
187 LogAndThrowFatal("Timeline profiling requires external profiling to be turned on");
188 }
189}
190
191
192ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
193 "Executes a neural network model using the provided input "
194 "tensor. Prints the resulting output tensor."}
195{
196 try
197 {
198 // cxxopts doesn't provide a mechanism to ensure required options are given. There is a
199 // separate function CheckRequiredOptions() for that.
200 m_CxxOptions.add_options("a) Required")
201 ("c,compute",
Jan Eilersc5b84b52021-02-16 12:40:43 +0000202 "Which device to run layers on by default. If a single device doesn't support all layers in the model "
203 "you can specify a second or third to fall back on. Possible choices: "
Jan Eilers45274902020-10-15 18:34:43 +0100204 + armnn::BackendRegistryInstance().GetBackendIdsAsString()
Jan Eilersc5b84b52021-02-16 12:40:43 +0000205 + " NOTE: Multiple compute devices need to be passed as a comma separated list without whitespaces "
Teresa Charlin83b42912022-07-07 14:24:59 +0100206 "e.g. GpuAcc,CpuAcc,CpuRef or by repeating the program option e.g. '-c CpuAcc -c CpuRef'. "
Jan Eilersc5b84b52021-02-16 12:40:43 +0000207 "Duplicates are ignored.",
Jan Eilers3dda41d2020-11-11 11:44:14 +0000208 cxxopts::value<std::vector<std::string>>())
Jan Eilers45274902020-10-15 18:34:43 +0100209
210 ("f,model-format",
Teresa Charlin83b42912022-07-07 14:24:59 +0100211 "armnn-binary, onnx-binary, onnx-text, tflite-binary"
212 "DEPRECATED: The program option 'model-format' is deprecated and will be "
213 "removed soon. The model-format is now automatically set.",
Jan Eilers45274902020-10-15 18:34:43 +0100214 cxxopts::value<std::string>())
215
216 ("m,model-path",
Nikhil Raj6dd178f2021-04-02 22:04:39 +0100217 "Path to model file, e.g. .armnn, , .prototxt, .tflite, .onnx",
Jan Eilers45274902020-10-15 18:34:43 +0100218 cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
219
220 ("i,input-name",
Teresa Charlin83b42912022-07-07 14:24:59 +0100221 "Identifier of the input tensors in the network separated by comma."
222 "This option is not required, but can be used to set the order of inputs",
Jan Eilers45274902020-10-15 18:34:43 +0100223 cxxopts::value<std::string>())
224
225 ("o,output-name",
Teresa Charlin83b42912022-07-07 14:24:59 +0100226 "Identifier of the output tensors in the network separated by comma."
227 "This option is not required, but can be used to set the order of outputs",
Jan Eilers45274902020-10-15 18:34:43 +0100228 cxxopts::value<std::string>());
229
230 m_CxxOptions.add_options("b) General")
231 ("b,dynamic-backends-path",
232 "Path where to load any available dynamic backend from. "
233 "If left empty (the default), dynamic backends will not be used.",
234 cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
235
Teresa Charlin83b42912022-07-07 14:24:59 +0100236 ("P, thread-pool-size",
237 "Run the network using the Arm NN thread pool with the number of threads provided. ",
238 cxxopts::value<size_t>(m_ExNetParams.m_ThreadPoolSize)->default_value("0"))
239
Sadik Armagana04a9d72021-04-27 10:02:10 +0100240 ("n,concurrent",
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100241 "This option is for Arm NN internal asynchronous testing purposes. "
Jan Eilersf17fcd52021-07-26 22:20:00 +0100242 "False by default. If set to true will use std::launch::async or the Arm NN thread pool, "
Teresa Charlin83b42912022-07-07 14:24:59 +0100243 "if 'thread-pool-size' is greater than 0, for asynchronous execution."
244 "DEPRECATED: The program option 'concurrent' is deprecated and will be "
245 "removed soon. Please use the option '\"P, thread-pool-size\"' instead.",
Sadik Armagana04a9d72021-04-27 10:02:10 +0100246 cxxopts::value<bool>(m_ExNetParams.m_Concurrent)->default_value("false")->implicit_value("true"))
247
Jan Eilers45274902020-10-15 18:34:43 +0100248 ("d,input-tensor-data",
249 "Path to files containing the input data as a flat array separated by whitespace. "
Jan Eilersf17fcd52021-07-26 22:20:00 +0100250 "Several paths can be passed by separating them with a comma if the network has multiple inputs "
251 "or you wish to run the model multiple times with different input data using the 'iterations' option. "
252 "If not specified, the network will be run with dummy data (useful for profiling).",
Jan Eilers45274902020-10-15 18:34:43 +0100253 cxxopts::value<std::string>()->default_value(""))
254
255 ("h,help", "Display usage information")
256
257 ("infer-output-shape",
258 "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
259 "parser)",
260 cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
261
Mike Kelly80512b02022-05-16 23:10:42 +0100262 ("allow-expanded-dims",
263 "If true will disregard dimensions with a size of 1 when validating tensor shapes. Tensor sizes must "
264 "still match. This is an Experimental parameter that is incompatible with infer-output-shape. "
265 "This parameter may be removed in a later update. ",
266 cxxopts::value<bool>(m_ExNetParams.m_AllowExpandedDims)->default_value("false")
Teresa Charlin83b42912022-07-07 14:24:59 +0100267 ->implicit_value("true"))
Mike Kelly80512b02022-05-16 23:10:42 +0100268
Teresa Charlin83b42912022-07-07 14:24:59 +0100269 ("I,iterations",
Jan Eilersf17fcd52021-07-26 22:20:00 +0100270 "Number of iterations to run the network for, default is set to 1. "
271 "If you wish to run the model with different input data for every execution you can do so by "
272 "supplying more input file paths to the 'input-tensor-data' option. "
273 "Note: The number of input files provided must be divisible by the number of inputs of the model. "
274 "e.g. Your model has 2 inputs and you supply 4 input files. If you set 'iterations' to 6 the first "
275 "run will consume the first two inputs, the second the next two and the last will begin from the "
276 "start and use the first two inputs again. "
277 "Note: If the 'concurrent' option is enabled all iterations will be run asynchronously.",
Jan Eilers45274902020-10-15 18:34:43 +0100278 cxxopts::value<size_t>(m_ExNetParams.m_Iterations)->default_value("1"))
279
280 ("l,dequantize-output",
281 "If this option is enabled, all quantized outputs will be dequantized to float. "
282 "If unset, default to not get dequantized. "
Colm Donelan3cff15a2021-10-12 15:06:19 +0100283 "Accepted values (true or false)"
284 " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
Jan Eilers45274902020-10-15 18:34:43 +0100285 cxxopts::value<bool>(m_ExNetParams.m_DequantizeOutput)->default_value("false")->implicit_value("true"))
286
287 ("p,print-intermediate-layers",
288 "If this option is enabled, the output of every graph layer will be printed.",
289 cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediate)->default_value("false")
Teresa Charlin83b42912022-07-07 14:24:59 +0100290 ->implicit_value("true"))
Jan Eilers45274902020-10-15 18:34:43 +0100291
Keith Davis15f9c682022-10-14 15:50:33 +0100292 ("F,print-intermediate-layers-to-file",
293 "If this option is enabled, the output of every graph layer will be printed within separate files.",
294 cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediateOutputsToFile)->default_value("false")
295 ->implicit_value("true"))
296
Jan Eilers45274902020-10-15 18:34:43 +0100297 ("parse-unsupported",
298 "Add unsupported operators as stand-in layers (where supported by parser)",
299 cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
300
Ryan OSheadfbec2d2022-03-28 10:55:48 +0100301 ("N,do-not-print-output",
Jan Eilers284b5d12021-09-07 12:46:15 +0100302 "The default behaviour of ExecuteNetwork is to print the resulting outputs on the console. "
303 "This behaviour can be changed by adding this flag to your command.",
304 cxxopts::value<bool>(m_ExNetParams.m_DontPrintOutputs)->default_value("false")->implicit_value("true"))
305
Jan Eilers45274902020-10-15 18:34:43 +0100306 ("q,quantize-input",
Mike Kellyd7ed6d42021-07-21 09:42:43 +0100307 "If this option is enabled, all float inputs will be quantized as appropriate for the model's inputs. "
Colm Donelan3cff15a2021-10-12 15:06:19 +0100308 "If unset, default to not quantized. Accepted values (true or false)"
309 " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
Jan Eilers45274902020-10-15 18:34:43 +0100310 cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
Teresa Charlin83b42912022-07-07 14:24:59 +0100311
Jan Eilers45274902020-10-15 18:34:43 +0100312 ("r,threshold-time",
313 "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
314 "inference time is greater than the threshold time, the test will fail. By default, no threshold "
315 "time is used.",
316 cxxopts::value<double>(m_ExNetParams.m_ThresholdTime)->default_value("0.0"))
317
318 ("s,input-tensor-shape",
319 "The shape of the input tensors in the network as a flat array of integers separated by comma."
320 "Several shapes can be passed by separating them with a colon (:).",
321 cxxopts::value<std::string>())
322
323 ("v,visualize-optimized-model",
324 "Enables built optimized model visualizer. If unset, defaults to off.",
325 cxxopts::value<bool>(m_ExNetParams.m_EnableLayerDetails)->default_value("false")
Teresa Charlin83b42912022-07-07 14:24:59 +0100326 ->implicit_value("true"))
Jan Eilers45274902020-10-15 18:34:43 +0100327
328 ("w,write-outputs-to-file",
329 "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
330 "If left empty (the default), the output tensors will not be written to a file.",
331 cxxopts::value<std::string>())
332
333 ("x,subgraph-number",
Colm Donelan3cff15a2021-10-12 15:06:19 +0100334 "Id of the subgraph to be executed. Defaults to 0."
335 " (Not available when executing ArmNNTfLiteDelegate or TfliteInterpreter)",
Jan Eilers45274902020-10-15 18:34:43 +0100336 cxxopts::value<size_t>(m_ExNetParams.m_SubgraphId)->default_value("0"))
337
338 ("y,input-type",
339 "The type of the input tensors in the network separated by comma. "
340 "If unset, defaults to \"float\" for all defined inputs. "
Teresa Charlin83b42912022-07-07 14:24:59 +0100341 "Accepted values (float, int, qasymms8 or qasymmu8)."
342 "DEPRECATED: The program option 'input-type' is deprecated and will be "
343 "removed soon. The input-types are now automatically set.",
Jan Eilers45274902020-10-15 18:34:43 +0100344 cxxopts::value<std::string>())
345
346 ("z,output-type",
347 "The type of the output tensors in the network separated by comma. "
348 "If unset, defaults to \"float\" for all defined outputs. "
Teresa Charlin83b42912022-07-07 14:24:59 +0100349 "Accepted values (float, int, qasymms8 or qasymmu8)."
350 "DEPRECATED: The program option 'output-type' is deprecated and will be "
351 "removed soon. The output-types are now automatically set.",
Finn Williamsf806c4d2021-02-22 15:13:12 +0000352 cxxopts::value<std::string>())
353
354 ("T,tflite-executor",
355 "Set the executor for the tflite model: parser, delegate, tflite"
356 "parser is the ArmNNTfLiteParser, "
357 "delegate is the ArmNNTfLiteDelegate, "
358 "tflite is the TfliteInterpreter",
359 cxxopts::value<std::string>()->default_value("parser"))
360
Teresa Charlin83b42912022-07-07 14:24:59 +0100361 ("C, compare-output",
Cathal Corbett71723d42023-01-10 09:52:12 +0000362 "Compare the output of the network with an output file that has been previously "
363 "produced by running a network through ExecuteNetwork. See --write-outputs-to-file "
364 "to produce an output file for an execution.",
Teresa Charlin83b42912022-07-07 14:24:59 +0100365 cxxopts::value<std::string>(m_ExNetParams.m_ComparisonFile))
366
367 ("B, compare-output-with-backend",
368 "Compare the output of the network with a different backend.",
369 cxxopts::value<std::vector<std::string>>())
370
371 ("A, compare-with-tflite",
372 "Compare the output of the network with the tflite ref model.",
373 cxxopts::value<bool>(m_ExNetParams.m_CompareWithTflite)->default_value("false")
374 ->implicit_value("true"));
Jan Eilers45274902020-10-15 18:34:43 +0100375
376 m_CxxOptions.add_options("c) Optimization")
377 ("bf16-turbo-mode",
Ryan OShea31441592022-11-07 16:20:48 +0000378 "This option is no longer being used. In order to use bf16 please set enable-fast-math "
379 "to true",
Jan Eilers45274902020-10-15 18:34:43 +0100380 cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
Teresa Charlin83b42912022-07-07 14:24:59 +0100381 ->default_value("false")->implicit_value("true"))
Jan Eilers45274902020-10-15 18:34:43 +0100382
383 ("enable-fast-math",
384 "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
Ryan OShea31441592022-11-07 16:20:48 +0000385 "performance improvements but may result in reduced or different precision. ",
Jan Eilers45274902020-10-15 18:34:43 +0100386 cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
387
Matthew Sloyan0a7dc6b2021-02-10 16:50:53 +0000388 ("number-of-threads",
389 "Assign the number of threads used by the CpuAcc backend. "
390 "Input value must be between 1 and 64. "
391 "Default is set to 0 (Backend will decide number of threads to use).",
392 cxxopts::value<unsigned int>(m_ExNetParams.m_NumberOfThreads)->default_value("0"))
393
Matthew Sloyan42432112021-01-08 10:30:51 +0000394 ("save-cached-network",
Matthew Sloyan9d7a3322021-01-12 16:19:43 +0000395 "Enables saving of the cached network to a file given with the cached-network-filepath option. "
Matthew Sloyan42432112021-01-08 10:30:51 +0000396 "See also --cached-network-filepath",
397 cxxopts::value<bool>(m_ExNetParams.m_SaveCachedNetwork)
Teresa Charlin83b42912022-07-07 14:24:59 +0100398 ->default_value("false")->implicit_value("true"))
Matthew Sloyan42432112021-01-08 10:30:51 +0000399
400 ("cached-network-filepath",
Matthew Sloyan9d7a3322021-01-12 16:19:43 +0000401 "If non-empty, the given file will be used to load/save the cached network. "
402 "If save-cached-network is given then the cached network will be saved to the given file. "
403 "To save the cached network a file must already exist. "
404 "If save-cached-network is not given then the cached network will be loaded from the given file. "
405 "This will remove initial compilation time of kernels and speed up the first execution.",
Matthew Sloyan42432112021-01-08 10:30:51 +0000406 cxxopts::value<std::string>(m_ExNetParams.m_CachedNetworkFilePath)->default_value(""))
407
Jan Eilers45274902020-10-15 18:34:43 +0100408 ("fp16-turbo-mode",
409 "If this option is enabled, FP32 layers, "
410 "weights and biases will be converted to FP16 where the backend supports it",
411 cxxopts::value<bool>(m_ExNetParams.m_EnableFp16TurboMode)
Teresa Charlin83b42912022-07-07 14:24:59 +0100412 ->default_value("false")->implicit_value("true"))
Jan Eilers45274902020-10-15 18:34:43 +0100413
414 ("tuning-level",
415 "Sets the tuning level which enables a tuning run which will update/create a tuning file. "
416 "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
417 "Requires tuning-path to be set, default is set to 0 (No tuning run)",
418 cxxopts::value<int>(m_ExNetParams.m_TuningLevel)->default_value("0"))
419
420 ("tuning-path",
421 "Path to tuning file. Enables use of CL tuning",
Finn Williams40646322021-02-11 16:16:42 +0000422 cxxopts::value<std::string>(m_ExNetParams.m_TuningPath))
423
424 ("MLGOTuningFilePath",
Teresa Charlin83b42912022-07-07 14:24:59 +0100425 "Path to tuning file. Enables use of CL MLGO tuning",
426 cxxopts::value<std::string>(m_ExNetParams.m_MLGOTuningFilePath))
Ryan OSheadfbec2d2022-03-28 10:55:48 +0100427
428 ("R, reuse-buffers",
Teresa Charlin83b42912022-07-07 14:24:59 +0100429 "If enabled then the IO buffers will be reused for each inference",
430 cxxopts::value<bool>(m_ExNetParams.m_ReuseBuffers)->default_value("false")->implicit_value("true"));
Jan Eilers45274902020-10-15 18:34:43 +0100431
432 m_CxxOptions.add_options("d) Profiling")
433 ("a,enable-external-profiling",
434 "If enabled external profiling will be switched on",
435 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
436 ->default_value("false")->implicit_value("true"))
437
438 ("e,event-based-profiling",
439 "Enables built in profiler. If unset, defaults to off.",
440 cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
441
442 ("g,file-only-external-profiling",
443 "If enabled then the 'file-only' test mode of external profiling will be enabled",
444 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_FileOnly)
Teresa Charlin83b42912022-07-07 14:24:59 +0100445 ->default_value("false")->implicit_value("true"))
Jan Eilers45274902020-10-15 18:34:43 +0100446
447 ("file-format",
448 "If profiling is enabled specifies the output file format",
449 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_FileFormat)->default_value("binary"))
450
451 ("j,outgoing-capture-file",
452 "If specified the outgoing external profiling packets will be captured in this binary file",
453 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile))
454
455 ("k,incoming-capture-file",
456 "If specified the incoming external profiling packets will be captured in this binary file",
457 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_IncomingCaptureFile))
458
459 ("timeline-profiling",
460 "If enabled timeline profiling will be switched on, requires external profiling",
461 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled)
Teresa Charlin83b42912022-07-07 14:24:59 +0100462 ->default_value("false")->implicit_value("true"))
Jan Eilers45274902020-10-15 18:34:43 +0100463
464 ("u,counter-capture-period",
465 "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
Keith Davisf4874862021-08-09 16:49:18 +0100466 cxxopts::value<uint32_t>(m_RuntimeOptions.m_ProfilingOptions.m_CapturePeriod)->default_value("150"))
467
468 ("output-network-details",
Keith Davis4914d0c2021-08-18 17:14:05 +0100469 "Outputs layer tensor infos and descriptors to std out along with profiling events. Defaults to off.",
Keith Davisf4874862021-08-09 16:49:18 +0100470 cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsToStdOut)->default_value("false")
Teresa Charlin83b42912022-07-07 14:24:59 +0100471 ->implicit_value("true"))
472
Keith Davis4914d0c2021-08-18 17:14:05 +0100473 ("output-network-details-only",
474 "Outputs layer tensor infos and descriptors to std out without profiling events. Defaults to off.",
475 cxxopts::value<bool>(m_ExNetParams.m_OutputDetailsOnlyToStdOut)->default_value("false")
Teresa Charlin83b42912022-07-07 14:24:59 +0100476 ->implicit_value("true"))
Keith Davis4914d0c2021-08-18 17:14:05 +0100477
Jim Flynn15425812022-02-15 16:53:13 +0000478 ("import-inputs-if-aligned",
479 "In & Out tensors will be imported per inference if the memory alignment allows. Defaults to false.",
480 cxxopts::value<bool>(m_ExNetParams.m_ImportInputsIfAligned)->default_value("false")
481 ->implicit_value("true"));
Jan Eilers45274902020-10-15 18:34:43 +0100482 }
483 catch (const std::exception& e)
484 {
485 ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
486 ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
487 exit(EXIT_FAILURE);
488 }
489}
490
491ProgramOptions::ProgramOptions(int ac, const char* av[]): ProgramOptions()
492{
493 ParseOptions(ac, av);
494}
495
496void ProgramOptions::ParseOptions(int ac, const char* av[])
497{
498 // Parses the command-line.
499 m_CxxResult = m_CxxOptions.parse(ac, av);
500
501 if (m_CxxResult.count("help") || ac <= 1)
502 {
503 std::cout << m_CxxOptions.help() << std::endl;
504 exit(EXIT_SUCCESS);
505 }
506
507 CheckRequiredOptions(m_CxxResult);
508 CheckOptionDependencies(m_CxxResult);
Jan Eilersf17fcd52021-07-26 22:20:00 +0100509 CheckForDeprecatedOptions(m_CxxResult);
Jan Eilers45274902020-10-15 18:34:43 +0100510
Teresa Charlin83b42912022-07-07 14:24:59 +0100511 if ((m_ExNetParams.m_OutputDetailsToStdOut ||
512 m_ExNetParams.m_OutputDetailsOnlyToStdOut) &&
513 !m_ExNetParams.m_EnableProfiling)
514 {
515 throw cxxopts::OptionParseException("You must enable profiling if you would like to output layer details");
516 }
517
Jan Eilers45274902020-10-15 18:34:43 +0100518 // Some options can't be assigned directly because they need some post-processing:
Jan Eilers3dda41d2020-11-11 11:44:14 +0000519 auto computeDevices = GetOptionValue<std::vector<std::string>>("compute", m_CxxResult);
520 m_ExNetParams.m_ComputeDevices = GetBackendIDs(computeDevices);
Jan Eilers45274902020-10-15 18:34:43 +0100521 m_ExNetParams.m_InputNames =
522 ParseStringList(GetOptionValue<std::string>("input-name", m_CxxResult), ",");
523 m_ExNetParams.m_InputTensorDataFilePaths =
524 ParseStringList(GetOptionValue<std::string>("input-tensor-data", m_CxxResult), ",");
525 m_ExNetParams.m_OutputNames =
526 ParseStringList(GetOptionValue<std::string>("output-name", m_CxxResult), ",");
Jan Eilers45274902020-10-15 18:34:43 +0100527 m_ExNetParams.m_OutputTensorFiles =
528 ParseStringList(GetOptionValue<std::string>("write-outputs-to-file", m_CxxResult), ",");
Teresa Charlin83b42912022-07-07 14:24:59 +0100529 m_ExNetParams.m_GenerateTensorData = m_ExNetParams.m_InputTensorDataFilePaths.empty();
Francis Murtaghbf18a262020-10-27 15:20:40 +0000530 m_ExNetParams.m_DynamicBackendsPath = m_RuntimeOptions.m_DynamicBackendsPath;
Jan Eilers45274902020-10-15 18:34:43 +0100531
Sadik Armagan8c7a28b2021-04-01 17:27:21 +0100532 m_RuntimeOptions.m_EnableGpuProfiling = m_ExNetParams.m_EnableProfiling;
Finn Williamsf806c4d2021-02-22 15:13:12 +0000533
534 std::string tfliteExecutor = GetOptionValue<std::string>("tflite-executor", m_CxxResult);
535
536 if (tfliteExecutor.size() == 0 || tfliteExecutor == "parser")
537 {
538 m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteParser;
539 }
540 else if (tfliteExecutor == "delegate")
541 {
542 m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate;
543 }
544 else if (tfliteExecutor == "tflite")
545 {
546 m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::TfliteInterpreter;
547 }
548 else
549 {
550 ARMNN_LOG(info) << fmt::format("Invalid tflite-executor option '{}'.", tfliteExecutor);
551 throw armnn::InvalidArgumentException ("Invalid tflite-executor option");
552 }
553
Jan Eilersf17fcd52021-07-26 22:20:00 +0100554 // For backwards compatibility when deprecated options are used
Finn Williamsf806c4d2021-02-22 15:13:12 +0000555 if (m_ExNetParams.m_EnableDelegate)
556 {
557 m_ExNetParams.m_TfLiteExecutor = ExecuteNetworkParams::TfLiteExecutor::ArmNNTfLiteDelegate;
Jan Eilersf17fcd52021-07-26 22:20:00 +0100558 }
Finn Williams452c5802022-06-20 13:48:20 +0100559
Nikhil Raj Armf4ccb1f2022-07-05 09:29:18 +0000560 // Set concurrent to true if the user expects to run inferences asynchronously
Teresa Charlin83b42912022-07-07 14:24:59 +0100561 if (m_ExNetParams.m_Concurrent)
562 {
563 m_ExNetParams.m_ThreadPoolSize = 1;
564 }
565
Jan Eilersf17fcd52021-07-26 22:20:00 +0100566 if (m_ExNetParams.m_ThreadPoolSize > 0)
Kevin Mayb4b3ac92021-05-21 16:42:21 +0100567 {
568 m_ExNetParams.m_Concurrent = true;
569 }
Finn Williamsf806c4d2021-02-22 15:13:12 +0000570
Jan Eilers45274902020-10-15 18:34:43 +0100571 // Parse input tensor shape from the string we got from the command-line.
572 std::vector<std::string> inputTensorShapesVector =
573 ParseStringList(GetOptionValue<std::string>("input-tensor-shape", m_CxxResult), ":");
574
575 if (!inputTensorShapesVector.empty())
576 {
577 m_ExNetParams.m_InputTensorShapes.reserve(inputTensorShapesVector.size());
578
579 for(const std::string& shape : inputTensorShapesVector)
580 {
581 std::stringstream ss(shape);
582 std::vector<unsigned int> dims = ParseArray(ss);
583
584 m_ExNetParams.m_InputTensorShapes.push_back(
Teresa Charlin83b42912022-07-07 14:24:59 +0100585 armnn::TensorShape{static_cast<unsigned int>(dims.size()), dims.data()});
Jan Eilers45274902020-10-15 18:34:43 +0100586 }
587 }
588
589 // We have to validate ExecuteNetworkParams first so that the tuning path and level is validated
590 ValidateExecuteNetworkParams();
591
592 // Parse CL tuning parameters to runtime options
593 if (!m_ExNetParams.m_TuningPath.empty())
594 {
595 m_RuntimeOptions.m_BackendOptions.emplace_back(
596 armnn::BackendOptions
597 {
598 "GpuAcc",
599 {
600 {"TuningLevel", m_ExNetParams.m_TuningLevel},
601 {"TuningFile", m_ExNetParams.m_TuningPath.c_str()},
Finn Williams40646322021-02-11 16:16:42 +0000602 {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling},
603 {"MLGOTuningFilePath", m_ExNetParams.m_MLGOTuningFilePath}
Jan Eilers45274902020-10-15 18:34:43 +0100604 }
605 }
606 );
607 }
608
609 ValidateRuntimeOptions();
Teresa Charlin83b42912022-07-07 14:24:59 +0100610
611 auto comparisonComputDevices = GetOptionValue<std::vector<std::string>>("compare-output-with-backend", m_CxxResult);
612
613 if (!comparisonComputDevices.empty())
614 {
615 m_ExNetParams.m_ComparisonComputeDevices = GetBackendIDs(comparisonComputDevices);
616 }
Jan Eilers45274902020-10-15 18:34:43 +0100617}
618