blob: 72b784c72ef3651db3b9adda93ed97d149a98c9f [file] [log] [blame]
Jan Eilers45274902020-10-15 18:34:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ExecuteNetworkProgramOptions.hpp"
7#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
8#include "InferenceTest.hpp"
9
10#include <armnn/BackendRegistry.hpp>
11#include <armnn/Exceptions.hpp>
12#include <armnn/utility/Assert.hpp>
13#include <armnn/utility/StringUtils.hpp>
14#include <armnn/Logging.hpp>
15
16#include <fmt/format.h>
17
18bool CheckOption(const cxxopts::ParseResult& result,
19 const char* option)
20{
21 // Check that the given option is valid.
22 if (option == nullptr)
23 {
24 return false;
25 }
26
27 // Check whether 'option' is provided.
28 return ((result.count(option)) ? true : false);
29}
30
31void CheckOptionDependency(const cxxopts::ParseResult& result,
32 const char* option,
33 const char* required)
34{
35 // Check that the given options are valid.
36 if (option == nullptr || required == nullptr)
37 {
38 throw cxxopts::OptionParseException("Invalid option to check dependency for");
39 }
40
41 // Check that if 'option' is provided, 'required' is also provided.
42 if (CheckOption(result, option) && !result[option].has_default())
43 {
44 if (CheckOption(result, required) == 0 || result[required].has_default())
45 {
46 throw cxxopts::OptionParseException(
47 std::string("Option '") + option + "' requires option '" + required + "'.");
48 }
49 }
50}
51
52void CheckOptionDependencies(const cxxopts::ParseResult& result)
53{
54 CheckOptionDependency(result, "model-path", "model-format");
55 CheckOptionDependency(result, "input-tensor-shape", "model-path");
56 CheckOptionDependency(result, "tuning-level", "tuning-path");
57}
58
59void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
60{
61 // Mark the duplicate devices as 'Undefined'.
62 for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
63 {
64 for (auto j = std::next(i); j != computeDevices.end(); ++j)
65 {
66 if (*j == *i)
67 {
68 *j = armnn::Compute::Undefined;
69 }
70 }
71 }
72
73 // Remove 'Undefined' devices.
74 computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
75 computeDevices.end());
76}
77
78/// Takes a vector of backend strings and returns a vector of backendIDs. Removes duplicate entries.
79std::vector<armnn::BackendId> GetBackendIDs(const std::vector<std::string>& backendStrings)
80{
81 std::vector<armnn::BackendId> backendIDs;
82 for (const auto& b : backendStrings)
83 {
84 backendIDs.push_back(armnn::BackendId(b));
85 }
86
87 RemoveDuplicateDevices(backendIDs);
88
89 return backendIDs;
90}
91
92/// Provides a segfault safe way to get cxxopts option values by checking if the option was defined.
93/// If the option wasn't defined it returns an empty object.
94template<typename optionType>
95optionType GetOptionValue(std::string&& optionName, const cxxopts::ParseResult& result)
96{
97 optionType out;
98 if(result.count(optionName))
99 {
100 out = result[optionName].as<optionType>();
101 }
102 return out;
103}
104
105void LogAndThrowFatal(std::string errorMessage)
106{
107 throw armnn::InvalidArgumentException (errorMessage);
108}
109
110void CheckRequiredOptions(const cxxopts::ParseResult& result)
111{
112
113 // For each option in option-group "a) Required
114 std::vector<std::string> requiredOptions{"compute",
115 "model-format",
116 "model-path",
117 "input-name",
118 "output-name"};
119
120 bool requiredMissing = false;
121 for(auto const& str : requiredOptions)
122 {
123 if(!(result.count(str) > 0))
124 {
125 ARMNN_LOG(error) << fmt::format("The program option '{}' is mandatory but wasn't provided.", str);
126 requiredMissing = true;
127 }
128 }
129 if(requiredMissing)
130 {
131 throw armnn::InvalidArgumentException ("Some required arguments are missing");
132 }
133}
134
135void ProgramOptions::ValidateExecuteNetworkParams()
136{
137 m_ExNetParams.ValidateParams();
138}
139
140void ProgramOptions::ValidateRuntimeOptions()
141{
142 if (m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled &&
143 !m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
144 {
145 LogAndThrowFatal("Timeline profiling requires external profiling to be turned on");
146 }
147}
148
149
150ProgramOptions::ProgramOptions() : m_CxxOptions{"ExecuteNetwork",
151 "Executes a neural network model using the provided input "
152 "tensor. Prints the resulting output tensor."}
153{
154 try
155 {
156 // cxxopts doesn't provide a mechanism to ensure required options are given. There is a
157 // separate function CheckRequiredOptions() for that.
158 m_CxxOptions.add_options("a) Required")
159 ("c,compute",
160 "Which device to run layers on by default. Possible choices: "
161 + armnn::BackendRegistryInstance().GetBackendIdsAsString()
162 + " NOTE: Compute devices need to be passed as a comma separated list without whitespaces "
163 "e.g. CpuRef,CpuAcc",
164 cxxopts::value<std::string>())
165
166 ("f,model-format",
167 "armnn-binary, caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or "
168 "tensorflow-text.",
169 cxxopts::value<std::string>())
170
171 ("m,model-path",
172 "Path to model file, e.g. .armnn, .caffemodel, .prototxt, .tflite, .onnx",
173 cxxopts::value<std::string>(m_ExNetParams.m_ModelPath))
174
175 ("i,input-name",
176 "Identifier of the input tensors in the network separated by comma.",
177 cxxopts::value<std::string>())
178
179 ("o,output-name",
180 "Identifier of the output tensors in the network separated by comma.",
181 cxxopts::value<std::string>());
182
183 m_CxxOptions.add_options("b) General")
184 ("b,dynamic-backends-path",
185 "Path where to load any available dynamic backend from. "
186 "If left empty (the default), dynamic backends will not be used.",
187 cxxopts::value<std::string>(m_RuntimeOptions.m_DynamicBackendsPath))
188
189 ("d,input-tensor-data",
190 "Path to files containing the input data as a flat array separated by whitespace. "
191 "Several paths can be passed by separating them with a comma. If not specified, the network will be "
192 "run with dummy data (useful for profiling).",
193 cxxopts::value<std::string>()->default_value(""))
194
195 ("h,help", "Display usage information")
196
197 ("infer-output-shape",
198 "Infers output tensor shape from input tensor shape and validate where applicable (where supported by "
199 "parser)",
200 cxxopts::value<bool>(m_ExNetParams.m_InferOutputShape)->default_value("false")->implicit_value("true"))
201
202 ("iterations",
203 "Number of iterations to run the network for, default is set to 1",
204 cxxopts::value<size_t>(m_ExNetParams.m_Iterations)->default_value("1"))
205
206 ("l,dequantize-output",
207 "If this option is enabled, all quantized outputs will be dequantized to float. "
208 "If unset, default to not get dequantized. "
209 "Accepted values (true or false)",
210 cxxopts::value<bool>(m_ExNetParams.m_DequantizeOutput)->default_value("false")->implicit_value("true"))
211
212 ("p,print-intermediate-layers",
213 "If this option is enabled, the output of every graph layer will be printed.",
214 cxxopts::value<bool>(m_ExNetParams.m_PrintIntermediate)->default_value("false")
215 ->implicit_value("true"))
216
217 ("parse-unsupported",
218 "Add unsupported operators as stand-in layers (where supported by parser)",
219 cxxopts::value<bool>(m_ExNetParams.m_ParseUnsupported)->default_value("false")->implicit_value("true"))
220
221 ("q,quantize-input",
222 "If this option is enabled, all float inputs will be quantized to qasymm8. "
223 "If unset, default to not quantized. Accepted values (true or false)",
224 cxxopts::value<bool>(m_ExNetParams.m_QuantizeInput)->default_value("false")->implicit_value("true"))
225
226 ("r,threshold-time",
227 "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
228 "inference time is greater than the threshold time, the test will fail. By default, no threshold "
229 "time is used.",
230 cxxopts::value<double>(m_ExNetParams.m_ThresholdTime)->default_value("0.0"))
231
232 ("s,input-tensor-shape",
233 "The shape of the input tensors in the network as a flat array of integers separated by comma."
234 "Several shapes can be passed by separating them with a colon (:).",
235 cxxopts::value<std::string>())
236
237 ("v,visualize-optimized-model",
238 "Enables built optimized model visualizer. If unset, defaults to off.",
239 cxxopts::value<bool>(m_ExNetParams.m_EnableLayerDetails)->default_value("false")
240 ->implicit_value("true"))
241
242 ("w,write-outputs-to-file",
243 "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
244 "If left empty (the default), the output tensors will not be written to a file.",
245 cxxopts::value<std::string>())
246
247 ("x,subgraph-number",
248 "Id of the subgraph to be executed. Defaults to 0.",
249 cxxopts::value<size_t>(m_ExNetParams.m_SubgraphId)->default_value("0"))
250
251 ("y,input-type",
252 "The type of the input tensors in the network separated by comma. "
253 "If unset, defaults to \"float\" for all defined inputs. "
254 "Accepted values (float, int or qasymm8).",
255 cxxopts::value<std::string>())
256
257 ("z,output-type",
258 "The type of the output tensors in the network separated by comma. "
259 "If unset, defaults to \"float\" for all defined outputs. "
260 "Accepted values (float, int or qasymm8).",
261 cxxopts::value<std::string>());
262
263 m_CxxOptions.add_options("c) Optimization")
264 ("bf16-turbo-mode",
265 "If this option is enabled, FP32 layers, "
266 "weights and biases will be converted to BFloat16 where the backend supports it",
267 cxxopts::value<bool>(m_ExNetParams.m_EnableBf16TurboMode)
268 ->default_value("false")->implicit_value("true"))
269
270 ("enable-fast-math",
271 "Enables fast_math options in backends that support it. Using the fast_math flag can lead to "
272 "performance improvements but may result in reduced or different precision.",
273 cxxopts::value<bool>(m_ExNetParams.m_EnableFastMath)->default_value("false")->implicit_value("true"))
274
275 ("fp16-turbo-mode",
276 "If this option is enabled, FP32 layers, "
277 "weights and biases will be converted to FP16 where the backend supports it",
278 cxxopts::value<bool>(m_ExNetParams.m_EnableFp16TurboMode)
279 ->default_value("false")->implicit_value("true"))
280
281 ("tuning-level",
282 "Sets the tuning level which enables a tuning run which will update/create a tuning file. "
283 "Available options are: 1 (Rapid), 2 (Normal), 3 (Exhaustive). "
284 "Requires tuning-path to be set, default is set to 0 (No tuning run)",
285 cxxopts::value<int>(m_ExNetParams.m_TuningLevel)->default_value("0"))
286
287 ("tuning-path",
288 "Path to tuning file. Enables use of CL tuning",
289 cxxopts::value<std::string>(m_ExNetParams.m_TuningPath));
290
291 m_CxxOptions.add_options("d) Profiling")
292 ("a,enable-external-profiling",
293 "If enabled external profiling will be switched on",
294 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_EnableProfiling)
295 ->default_value("false")->implicit_value("true"))
296
297 ("e,event-based-profiling",
298 "Enables built in profiler. If unset, defaults to off.",
299 cxxopts::value<bool>(m_ExNetParams.m_EnableProfiling)->default_value("false")->implicit_value("true"))
300
301 ("g,file-only-external-profiling",
302 "If enabled then the 'file-only' test mode of external profiling will be enabled",
303 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_FileOnly)
304 ->default_value("false")->implicit_value("true"))
305
306 ("file-format",
307 "If profiling is enabled specifies the output file format",
308 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_FileFormat)->default_value("binary"))
309
310 ("j,outgoing-capture-file",
311 "If specified the outgoing external profiling packets will be captured in this binary file",
312 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_OutgoingCaptureFile))
313
314 ("k,incoming-capture-file",
315 "If specified the incoming external profiling packets will be captured in this binary file",
316 cxxopts::value<std::string>(m_RuntimeOptions.m_ProfilingOptions.m_IncomingCaptureFile))
317
318 ("timeline-profiling",
319 "If enabled timeline profiling will be switched on, requires external profiling",
320 cxxopts::value<bool>(m_RuntimeOptions.m_ProfilingOptions.m_TimelineEnabled)
321 ->default_value("false")->implicit_value("true"))
322
323 ("u,counter-capture-period",
324 "If profiling is enabled in 'file-only' mode this is the capture period that will be used in the test",
325 cxxopts::value<uint32_t>(m_RuntimeOptions.m_ProfilingOptions.m_CapturePeriod)->default_value("150"));
326 }
327 catch (const std::exception& e)
328 {
329 ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
330 ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
331 exit(EXIT_FAILURE);
332 }
333}
334
335ProgramOptions::ProgramOptions(int ac, const char* av[]): ProgramOptions()
336{
337 ParseOptions(ac, av);
338}
339
340void ProgramOptions::ParseOptions(int ac, const char* av[])
341{
342 // Parses the command-line.
343 m_CxxResult = m_CxxOptions.parse(ac, av);
344
345 if (m_CxxResult.count("help") || ac <= 1)
346 {
347 std::cout << m_CxxOptions.help() << std::endl;
348 exit(EXIT_SUCCESS);
349 }
350
351 CheckRequiredOptions(m_CxxResult);
352 CheckOptionDependencies(m_CxxResult);
353
354 // Some options can't be assigned directly because they need some post-processing:
355 auto computeDevices = GetOptionValue<std::string>("compute", m_CxxResult);
356 m_ExNetParams.m_ComputeDevices =
357 GetBackendIDs(ParseStringList(computeDevices, ","));
358 m_ExNetParams.m_ModelFormat =
359 armnn::stringUtils::StringTrimCopy(GetOptionValue<std::string>("model-format", m_CxxResult));
360 m_ExNetParams.m_InputNames =
361 ParseStringList(GetOptionValue<std::string>("input-name", m_CxxResult), ",");
362 m_ExNetParams.m_InputTensorDataFilePaths =
363 ParseStringList(GetOptionValue<std::string>("input-tensor-data", m_CxxResult), ",");
364 m_ExNetParams.m_OutputNames =
365 ParseStringList(GetOptionValue<std::string>("output-name", m_CxxResult), ",");
366 m_ExNetParams.m_InputTypes =
367 ParseStringList(GetOptionValue<std::string>("input-type", m_CxxResult), ",");
368 m_ExNetParams.m_OutputTypes =
369 ParseStringList(GetOptionValue<std::string>("output-type", m_CxxResult), ",");
370 m_ExNetParams.m_OutputTensorFiles =
371 ParseStringList(GetOptionValue<std::string>("write-outputs-to-file", m_CxxResult), ",");
372 m_ExNetParams.m_GenerateTensorData =
373 m_ExNetParams.m_InputTensorDataFilePaths.empty();
Francis Murtaghbf18a262020-10-27 15:20:40 +0000374 m_ExNetParams.m_DynamicBackendsPath = m_RuntimeOptions.m_DynamicBackendsPath;
Jan Eilers45274902020-10-15 18:34:43 +0100375
376 // Parse input tensor shape from the string we got from the command-line.
377 std::vector<std::string> inputTensorShapesVector =
378 ParseStringList(GetOptionValue<std::string>("input-tensor-shape", m_CxxResult), ":");
379
380 if (!inputTensorShapesVector.empty())
381 {
382 m_ExNetParams.m_InputTensorShapes.reserve(inputTensorShapesVector.size());
383
384 for(const std::string& shape : inputTensorShapesVector)
385 {
386 std::stringstream ss(shape);
387 std::vector<unsigned int> dims = ParseArray(ss);
388
389 m_ExNetParams.m_InputTensorShapes.push_back(
390 std::make_unique<armnn::TensorShape>(static_cast<unsigned int>(dims.size()), dims.data()));
391 }
392 }
393
394 // We have to validate ExecuteNetworkParams first so that the tuning path and level is validated
395 ValidateExecuteNetworkParams();
396
397 // Parse CL tuning parameters to runtime options
398 if (!m_ExNetParams.m_TuningPath.empty())
399 {
400 m_RuntimeOptions.m_BackendOptions.emplace_back(
401 armnn::BackendOptions
402 {
403 "GpuAcc",
404 {
405 {"TuningLevel", m_ExNetParams.m_TuningLevel},
406 {"TuningFile", m_ExNetParams.m_TuningPath.c_str()},
407 {"KernelProfilingEnabled", m_ExNetParams.m_EnableProfiling}
408 }
409 }
410 );
411 }
412
413 ValidateRuntimeOptions();
414}
415