blob: 07615517629aaa82380ba812992bbb685cd0c2f5 [file] [log] [blame]
telsoa014fcda012018-03-09 14:13:49 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beckecb56cd2018-09-05 12:52:57 +01003// SPDX-License-Identifier: MIT
telsoa014fcda012018-03-09 14:13:49 +00004//
telsoa01c577f2c2018-08-31 09:22:23 +01005
Francis Murtaghbee4bc92019-06-18 12:30:37 +01006#include "../NetworkExecutionUtils/NetworkExecutionUtils.hpp"
telsoa01c577f2c2018-08-31 09:22:23 +01007
James Conroy7b4886f2019-04-11 10:23:58 +01008// MAIN
telsoa01c577f2c2018-08-31 09:22:23 +01009int main(int argc, const char* argv[])
10{
11 // Configures logging for both the ARMNN library and this test program.
12#ifdef NDEBUG
13 armnn::LogSeverity level = armnn::LogSeverity::Info;
14#else
15 armnn::LogSeverity level = armnn::LogSeverity::Debug;
16#endif
17 armnn::ConfigureLogging(true, true, level);
18 armnnUtils::ConfigureLogging(boost::log::core::get().get(), true, true, level);
19
20 std::string testCasesFile;
21
22 std::string modelFormat;
23 std::string modelPath;
Ferran Balaguerc602f292019-02-08 17:09:55 +000024 std::string inputNames;
25 std::string inputTensorShapes;
26 std::string inputTensorDataFilePaths;
27 std::string outputNames;
28 std::string inputTypes;
Éanna Ó Catháinb3d481a2019-02-26 11:26:24 +000029 std::string outputTypes;
Matteo Martincigh00dda4a2019-08-14 11:42:30 +010030 std::string dynamicBackendsPath;
Sadik Armagan77086282019-09-02 11:46:28 +010031 std::string outputTensorFiles;
telsoa01c577f2c2018-08-31 09:22:23 +010032
James Conroy7b4886f2019-04-11 10:23:58 +010033 double thresholdTime = 0.0;
34
telsoa01c577f2c2018-08-31 09:22:23 +010035 size_t subgraphId = 0;
36
Aron Virginas-Tar5cc8e562018-10-23 15:14:46 +010037 const std::string backendsMessage = "Which device to run layers on by default. Possible choices: "
38 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
39
telsoa01c577f2c2018-08-31 09:22:23 +010040 po::options_description desc("Options");
41 try
42 {
43 desc.add_options()
44 ("help", "Display usage information")
45 ("test-cases,t", po::value(&testCasesFile), "Path to a CSV file containing test cases to run. "
46 "If set, further parameters -- with the exception of compute device and concurrency -- will be ignored, "
47 "as they are expected to be defined in the file for each test in particular.")
48 ("concurrent,n", po::bool_switch()->default_value(false),
49 "Whether or not the test cases should be executed in parallel")
Matteo Martincigh49124022019-01-11 13:25:59 +000050 ("model-format,f", po::value(&modelFormat)->required(),
Aron Virginas-Tar64e4ccb2019-02-12 11:27:53 +000051 "armnn-binary, caffe-binary, caffe-text, onnx-binary, onnx-text, tflite-binary, tensorflow-binary or "
52 "tensorflow-text.")
53 ("model-path,m", po::value(&modelPath)->required(), "Path to model file, e.g. .armnn, .caffemodel, "
54 ".prototxt, .tflite, .onnx")
David Beckf0b48452018-10-19 15:20:56 +010055 ("compute,c", po::value<std::vector<std::string>>()->multitoken(),
Aron Virginas-Tar5cc8e562018-10-23 15:14:46 +010056 backendsMessage.c_str())
Matteo Martincigh00dda4a2019-08-14 11:42:30 +010057 ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
58 "Path where to load any available dynamic backend from. "
59 "If left empty (the default), dynamic backends will not be used.")
Ferran Balaguerc602f292019-02-08 17:09:55 +000060 ("input-name,i", po::value(&inputNames),
61 "Identifier of the input tensors in the network separated by comma.")
telsoa01c577f2c2018-08-31 09:22:23 +010062 ("subgraph-number,x", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be executed."
63 "Defaults to 0")
Ferran Balaguerc602f292019-02-08 17:09:55 +000064 ("input-tensor-shape,s", po::value(&inputTensorShapes),
65 "The shape of the input tensors in the network as a flat array of integers separated by comma. "
66 "Several shapes can be passed separating them by semicolon. "
telsoa01c577f2c2018-08-31 09:22:23 +010067 "This parameter is optional, depending on the network.")
Ferran Balaguerc602f292019-02-08 17:09:55 +000068 ("input-tensor-data,d", po::value(&inputTensorDataFilePaths),
69 "Path to files containing the input data as a flat array separated by whitespace. "
Matteo Martincigh00dda4a2019-08-14 11:42:30 +010070 "Several paths can be passed separating them by comma.")
Ferran Balaguerc602f292019-02-08 17:09:55 +000071 ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
72 "If unset, defaults to \"float\" for all defined inputs. "
Éanna Ó Catháinb3d481a2019-02-26 11:26:24 +000073 "Accepted values (float, int or qasymm8)")
Narumol Prangnawarat610256f2019-06-26 15:10:46 +010074 ("quantize-input,q",po::bool_switch()->default_value(false),
75 "If this option is enabled, all float inputs will be quantized to qasymm8. "
76 "If unset, default to not quantized. "
77 "Accepted values (true or false)")
Éanna Ó Catháinb3d481a2019-02-26 11:26:24 +000078 ("output-type,z",po::value(&outputTypes),
79 "The type of the output tensors in the network separated by comma. "
80 "If unset, defaults to \"float\" for all defined outputs. "
81 "Accepted values (float, int or qasymm8).")
Ferran Balaguerc602f292019-02-08 17:09:55 +000082 ("output-name,o", po::value(&outputNames),
83 "Identifier of the output tensors in the network separated by comma.")
Sadik Armagan77086282019-09-02 11:46:28 +010084 ("write-outputs-to-file,w", po::value(&outputTensorFiles),
85 "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
86 "If left empty (the default), the output tensors will not be written to a file.")
telsoa01c577f2c2018-08-31 09:22:23 +010087 ("event-based-profiling,e", po::bool_switch()->default_value(false),
Ruomei Yan2fcce082019-04-02 16:47:34 +010088 "Enables built in profiler. If unset, defaults to off.")
89 ("fp16-turbo-mode,h", po::bool_switch()->default_value(false), "If this option is enabled, FP32 layers, "
James Conroy7b4886f2019-04-11 10:23:58 +010090 "weights and biases will be converted to FP16 where the backend supports it")
91 ("threshold-time,r", po::value<double>(&thresholdTime)->default_value(0.0),
92 "Threshold time is the maximum allowed time for inference measured in milliseconds. If the actual "
93 "inference time is greater than the threshold time, the test will fail. By default, no threshold "
Matthew Jackson54658b92019-08-27 15:35:59 +010094 "time is used.")
95 ("print-intermediate-layers,p", po::bool_switch()->default_value(false),
96 "If this option is enabled, the output of every graph layer will be printed.");
telsoa01c577f2c2018-08-31 09:22:23 +010097 }
98 catch (const std::exception& e)
99 {
100 // Coverity points out that default_value(...) can throw a bad_lexical_cast,
101 // and that desc.add_options() can throw boost::io::too_few_args.
102 // They really won't in any of these cases.
103 BOOST_ASSERT_MSG(false, "Caught unexpected exception");
104 BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what();
105 return EXIT_FAILURE;
106 }
107
108 // Parses the command-line.
109 po::variables_map vm;
110 try
111 {
112 po::store(po::parse_command_line(argc, argv, desc), vm);
113
114 if (CheckOption(vm, "help") || argc <= 1)
115 {
116 std::cout << "Executes a neural network model using the provided input tensor. " << std::endl;
117 std::cout << "Prints the resulting output tensor." << std::endl;
118 std::cout << std::endl;
119 std::cout << desc << std::endl;
120 return EXIT_SUCCESS;
121 }
122
123 po::notify(vm);
124 }
125 catch (const po::error& e)
126 {
127 std::cerr << e.what() << std::endl << std::endl;
128 std::cerr << desc << std::endl;
129 return EXIT_FAILURE;
130 }
131
132 // Get the value of the switch arguments.
133 bool concurrent = vm["concurrent"].as<bool>();
134 bool enableProfiling = vm["event-based-profiling"].as<bool>();
Ruomei Yan2fcce082019-04-02 16:47:34 +0100135 bool enableFp16TurboMode = vm["fp16-turbo-mode"].as<bool>();
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100136 bool quantizeInput = vm["quantize-input"].as<bool>();
Matthew Jackson54658b92019-08-27 15:35:59 +0100137 bool printIntermediate = vm["print-intermediate-layers"].as<bool>();
telsoa01c577f2c2018-08-31 09:22:23 +0100138
139 // Check whether we have to load test cases from a file.
140 if (CheckOption(vm, "test-cases"))
141 {
142 // Check that the file exists.
143 if (!boost::filesystem::exists(testCasesFile))
144 {
145 BOOST_LOG_TRIVIAL(fatal) << "Given file \"" << testCasesFile << "\" does not exist";
146 return EXIT_FAILURE;
147 }
148
149 // Parse CSV file and extract test cases
150 armnnUtils::CsvReader reader;
151 std::vector<armnnUtils::CsvRow> testCases = reader.ParseFile(testCasesFile);
152
153 // Check that there is at least one test case to run
154 if (testCases.empty())
155 {
156 BOOST_LOG_TRIVIAL(fatal) << "Given file \"" << testCasesFile << "\" has no test cases";
157 return EXIT_FAILURE;
158 }
159
160 // Create runtime
161 armnn::IRuntime::CreationOptions options;
Nina Drozd549ae372018-09-10 14:26:44 +0100162 options.m_EnableGpuProfiling = enableProfiling;
163
telsoa01c577f2c2018-08-31 09:22:23 +0100164 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
165
166 const std::string executableName("ExecuteNetwork");
167
168 // Check whether we need to run the test cases concurrently
169 if (concurrent)
170 {
171 std::vector<std::future<int>> results;
172 results.reserve(testCases.size());
173
174 // Run each test case in its own thread
175 for (auto& testCase : testCases)
176 {
177 testCase.values.insert(testCase.values.begin(), executableName);
Nina Drozd549ae372018-09-10 14:26:44 +0100178 results.push_back(std::async(std::launch::async, RunCsvTest, std::cref(testCase), std::cref(runtime),
Matthew Jackson54658b92019-08-27 15:35:59 +0100179 enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate));
telsoa01c577f2c2018-08-31 09:22:23 +0100180 }
181
182 // Check results
183 for (auto& result : results)
184 {
185 if (result.get() != EXIT_SUCCESS)
186 {
187 return EXIT_FAILURE;
188 }
189 }
190 }
191 else
192 {
193 // Run tests sequentially
194 for (auto& testCase : testCases)
195 {
196 testCase.values.insert(testCase.values.begin(), executableName);
Matthew Jackson54658b92019-08-27 15:35:59 +0100197 if (RunCsvTest(testCase, runtime, enableProfiling,
198 enableFp16TurboMode, thresholdTime, printIntermediate) != EXIT_SUCCESS)
telsoa01c577f2c2018-08-31 09:22:23 +0100199 {
200 return EXIT_FAILURE;
201 }
202 }
203 }
204
205 return EXIT_SUCCESS;
206 }
207 else // Run single test
208 {
Aron Virginas-Tar382e21c2019-01-22 14:10:39 +0000209 // Get the preferred order of compute devices. If none are specified, default to using CpuRef
210 const std::string computeOption("compute");
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100211 std::vector<std::string> computeDevicesAsStrings =
212 CheckOption(vm, computeOption.c_str()) ?
213 vm[computeOption].as<std::vector<std::string>>() :
214 std::vector<std::string>();
Matteo Martincigh067112f2018-10-29 11:01:09 +0000215 std::vector<armnn::BackendId> computeDevices(computeDevicesAsStrings.begin(), computeDevicesAsStrings.end());
telsoa01c577f2c2018-08-31 09:22:23 +0100216
217 // Remove duplicates from the list of compute devices.
218 RemoveDuplicateDevices(computeDevices);
219
telsoa01c577f2c2018-08-31 09:22:23 +0100220 try
221 {
222 CheckOptionDependencies(vm);
223 }
224 catch (const po::error& e)
225 {
226 std::cerr << e.what() << std::endl << std::endl;
227 std::cerr << desc << std::endl;
228 return EXIT_FAILURE;
229 }
230
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100231 return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100232 inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
Sadik Armagan77086282019-09-02 11:46:28 +0100233 outputTensorFiles, enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate,
234 subgraphId);
telsoa014fcda012018-03-09 14:13:49 +0000235 }
236}