blob: cbdc327b0b1e0a59d57a90239809cb97e813491e [file] [log] [blame]
Francis Murtaghbee4bc92019-06-18 12:30:37 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#include <armnn/ArmNN.hpp>
6#include <armnn/TypesUtils.hpp>
7
8#if defined(ARMNN_SERIALIZER)
9#include "armnnDeserializer/IDeserializer.hpp"
10#endif
11#if defined(ARMNN_CAFFE_PARSER)
12#include "armnnCaffeParser/ICaffeParser.hpp"
13#endif
14#if defined(ARMNN_TF_PARSER)
15#include "armnnTfParser/ITfParser.hpp"
16#endif
17#if defined(ARMNN_TF_LITE_PARSER)
18#include "armnnTfLiteParser/ITfLiteParser.hpp"
19#endif
20#if defined(ARMNN_ONNX_PARSER)
21#include "armnnOnnxParser/IOnnxParser.hpp"
22#endif
23#include "CsvReader.hpp"
24#include "../InferenceTest.hpp"
25
26#include <Logging.hpp>
27#include <Profiling.hpp>
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +010028#include <ResolveType.hpp>
Francis Murtaghbee4bc92019-06-18 12:30:37 +010029
30#include <boost/algorithm/string/trim.hpp>
31#include <boost/algorithm/string/split.hpp>
32#include <boost/algorithm/string/classification.hpp>
33#include <boost/program_options.hpp>
34#include <boost/variant.hpp>
35
36#include <iostream>
37#include <fstream>
38#include <functional>
39#include <future>
40#include <algorithm>
41#include <iterator>
42
43namespace
44{
45
46// Configure boost::program_options for command-line parsing and validation.
47namespace po = boost::program_options;
48
49template<typename T, typename TParseElementFunc>
50std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char * chars = "\t ,:")
51{
52 std::vector<T> result;
53 // Processes line-by-line.
54 std::string line;
55 while (std::getline(stream, line))
56 {
57 std::vector<std::string> tokens;
58 try
59 {
60 // Coverity fix: boost::split() may throw an exception of type boost::bad_function_call.
61 boost::split(tokens, line, boost::algorithm::is_any_of(chars), boost::token_compress_on);
62 }
63 catch (const std::exception& e)
64 {
65 BOOST_LOG_TRIVIAL(error) << "An error occurred when splitting tokens: " << e.what();
66 continue;
67 }
68 for (const std::string& token : tokens)
69 {
70 if (!token.empty()) // See https://stackoverflow.com/questions/10437406/
71 {
72 try
73 {
74 result.push_back(parseElementFunc(token));
75 }
76 catch (const std::exception&)
77 {
78 BOOST_LOG_TRIVIAL(error) << "'" << token << "' is not a valid number. It has been ignored.";
79 }
80 }
81 }
82 }
83
84 return result;
85}
86
87bool CheckOption(const po::variables_map& vm,
88 const char* option)
89{
90 // Check that the given option is valid.
91 if (option == nullptr)
92 {
93 return false;
94 }
95
96 // Check whether 'option' is provided.
97 return vm.find(option) != vm.end();
98}
99
100void CheckOptionDependency(const po::variables_map& vm,
101 const char* option,
102 const char* required)
103{
104 // Check that the given options are valid.
105 if (option == nullptr || required == nullptr)
106 {
107 throw po::error("Invalid option to check dependency for");
108 }
109
110 // Check that if 'option' is provided, 'required' is also provided.
111 if (CheckOption(vm, option) && !vm[option].defaulted())
112 {
113 if (CheckOption(vm, required) == 0 || vm[required].defaulted())
114 {
115 throw po::error(std::string("Option '") + option + "' requires option '" + required + "'.");
116 }
117 }
118}
119
120void CheckOptionDependencies(const po::variables_map& vm)
121{
122 CheckOptionDependency(vm, "model-path", "model-format");
123 CheckOptionDependency(vm, "model-path", "input-name");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100124 CheckOptionDependency(vm, "model-path", "output-name");
125 CheckOptionDependency(vm, "input-tensor-shape", "model-path");
126}
127
128template<armnn::DataType NonQuantizedType>
129auto ParseDataArray(std::istream & stream);
130
131template<armnn::DataType QuantizedType>
132auto ParseDataArray(std::istream& stream,
133 const float& quantizationScale,
134 const int32_t& quantizationOffset);
135
136template<>
137auto ParseDataArray<armnn::DataType::Float32>(std::istream & stream)
138{
139 return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
140}
141
142template<>
143auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream)
144{
145 return ParseArrayImpl<int>(stream, [](const std::string & s) { return std::stoi(s); });
146}
147
148template<>
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100149auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream)
150{
151 return ParseArrayImpl<uint8_t>(stream,
152 [](const std::string& s) { return boost::numeric_cast<uint8_t>(std::stoi(s)); });
153}
154
155template<>
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100156auto ParseDataArray<armnn::DataType::QuantisedAsymm8>(std::istream& stream,
157 const float& quantizationScale,
158 const int32_t& quantizationOffset)
159{
160 return ParseArrayImpl<uint8_t>(stream,
161 [&quantizationScale, &quantizationOffset](const std::string & s)
162 {
163 return boost::numeric_cast<uint8_t>(
Rob Hughes93667b12019-09-23 16:24:05 +0100164 armnn::Quantize<uint8_t>(std::stof(s),
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100165 quantizationScale,
166 quantizationOffset));
167 });
168}
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100169std::vector<unsigned int> ParseArray(std::istream& stream)
170{
171 return ParseArrayImpl<unsigned int>(stream,
172 [](const std::string& s) { return boost::numeric_cast<unsigned int>(std::stoi(s)); });
173}
174
175std::vector<std::string> ParseStringList(const std::string & inputString, const char * delimiter)
176{
177 std::stringstream stream(inputString);
178 return ParseArrayImpl<std::string>(stream, [](const std::string& s) { return boost::trim_copy(s); }, delimiter);
179}
180
181void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
182{
183 // Mark the duplicate devices as 'Undefined'.
184 for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
185 {
186 for (auto j = std::next(i); j != computeDevices.end(); ++j)
187 {
188 if (*j == *i)
189 {
190 *j = armnn::Compute::Undefined;
191 }
192 }
193 }
194
195 // Remove 'Undefined' devices.
196 computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
197 computeDevices.end());
198}
199
200struct TensorPrinter : public boost::static_visitor<>
201{
Sadik Armagan77086282019-09-02 11:46:28 +0100202 TensorPrinter(const std::string& binding, const armnn::TensorInfo& info, const std::string& outputTensorFile)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100203 : m_OutputBinding(binding)
204 , m_Scale(info.GetQuantizationScale())
205 , m_Offset(info.GetQuantizationOffset())
Sadik Armagan77086282019-09-02 11:46:28 +0100206 , m_OutputTensorFile(outputTensorFile)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100207 {}
208
209 void operator()(const std::vector<float>& values)
210 {
Sadik Armagan77086282019-09-02 11:46:28 +0100211 ForEachValue(values, [](float value)
212 {
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100213 printf("%f ", value);
214 });
Sadik Armagan77086282019-09-02 11:46:28 +0100215 WriteToFile(values);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100216 }
217
218 void operator()(const std::vector<uint8_t>& values)
219 {
220 auto& scale = m_Scale;
221 auto& offset = m_Offset;
Sadik Armagan77086282019-09-02 11:46:28 +0100222 std::vector<float> dequantizedValues;
223 ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100224 {
Sadik Armagan77086282019-09-02 11:46:28 +0100225 auto dequantizedValue = armnn::Dequantize(value, scale, offset);
226 printf("%f ", dequantizedValue);
227 dequantizedValues.push_back(dequantizedValue);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100228 });
Sadik Armagan77086282019-09-02 11:46:28 +0100229 WriteToFile(dequantizedValues);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100230 }
231
232 void operator()(const std::vector<int>& values)
233 {
234 ForEachValue(values, [](int value)
235 {
236 printf("%d ", value);
237 });
Sadik Armagan77086282019-09-02 11:46:28 +0100238 WriteToFile(values);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100239 }
240
241private:
242 template<typename Container, typename Delegate>
243 void ForEachValue(const Container& c, Delegate delegate)
244 {
245 std::cout << m_OutputBinding << ": ";
246 for (const auto& value : c)
247 {
248 delegate(value);
249 }
250 printf("\n");
251 }
252
Sadik Armagan77086282019-09-02 11:46:28 +0100253 template<typename T>
254 void WriteToFile(const std::vector<T>& values)
255 {
256 if (!m_OutputTensorFile.empty())
257 {
258 std::ofstream outputTensorFile;
259 outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
260 if (outputTensorFile.is_open())
261 {
262 outputTensorFile << m_OutputBinding << ": ";
263 std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
264 }
265 else
266 {
267 BOOST_LOG_TRIVIAL(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
268 }
269 outputTensorFile.close();
270 }
271 }
272
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100273 std::string m_OutputBinding;
274 float m_Scale=0.0f;
275 int m_Offset=0;
Sadik Armagan77086282019-09-02 11:46:28 +0100276 std::string m_OutputTensorFile;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100277};
278
279
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100280
281template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
282std::vector<T> GenerateDummyTensorData(unsigned int numElements)
283{
284 return std::vector<T>(numElements, static_cast<T>(0));
285}
286
287using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
288using QuantizationParams = std::pair<float, int32_t>;
289
290void PopulateTensorWithData(TContainer& tensorData,
291 unsigned int numElements,
292 const std::string& dataTypeStr,
293 const armnn::Optional<QuantizationParams>& qParams,
294 const armnn::Optional<std::string>& dataFile)
295{
296 const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
297 const bool quantizeData = qParams.has_value();
298
299 std::ifstream inputTensorFile;
300 if (readFromFile)
301 {
302 inputTensorFile = std::ifstream(dataFile.value());
303 }
304
305 if (dataTypeStr.compare("float") == 0)
306 {
307 if (quantizeData)
308 {
309 const float qScale = qParams.value().first;
310 const int qOffset = qParams.value().second;
311
312 tensorData = readFromFile ?
313 ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile, qScale, qOffset) :
314 GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements);
315 }
316 else
317 {
318 tensorData = readFromFile ?
319 ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
320 GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
321 }
322 }
323 else if (dataTypeStr.compare("int") == 0)
324 {
325 tensorData = readFromFile ?
326 ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
327 GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
328 }
329 else if (dataTypeStr.compare("qasymm8") == 0)
330 {
331 tensorData = readFromFile ?
332 ParseDataArray<armnn::DataType::QuantisedAsymm8>(inputTensorFile) :
333 GenerateDummyTensorData<armnn::DataType::QuantisedAsymm8>(numElements);
334 }
335 else
336 {
337 std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
338 BOOST_LOG_TRIVIAL(fatal) << errorMessage;
339
340 inputTensorFile.close();
341 throw armnn::Exception(errorMessage);
342 }
343
344 inputTensorFile.close();
345}
346
347} // anonymous namespace
348
349bool generateTensorData = true;
350
351struct ExecuteNetworkParams
352{
353 using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
354
355 const char* m_ModelPath;
356 bool m_IsModelBinary;
357 std::vector<armnn::BackendId> m_ComputeDevices;
358 std::string m_DynamicBackendsPath;
359 std::vector<string> m_InputNames;
360 std::vector<TensorShapePtr> m_InputTensorShapes;
361 std::vector<string> m_InputTensorDataFilePaths;
362 std::vector<string> m_InputTypes;
363 bool m_QuantizeInput;
364 std::vector<string> m_OutputTypes;
365 std::vector<string> m_OutputNames;
366 std::vector<string> m_OutputTensorFiles;
367 bool m_EnableProfiling;
368 bool m_EnableFp16TurboMode;
369 double m_ThresholdTime;
370 bool m_PrintIntermediate;
371 size_t m_SubgraphId;
372 bool m_EnableLayerDetails = false;
373 bool m_GenerateTensorData;
Derek Lamberti05f19472019-12-02 16:06:40 +0000374 bool m_ParseUnsupported = false;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100375};
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100376
377template<typename TParser, typename TDataType>
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100378int MainImpl(const ExecuteNetworkParams& params,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100379 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
380{
381 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
382
383 std::vector<TContainer> inputDataContainers;
384
385 try
386 {
387 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100388 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
389 inferenceModelParams.m_ModelPath = params.m_ModelPath;
390 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
391 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
392 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
393 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
394 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
Derek Lambertic7314002019-12-03 09:37:32 +0000395 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100396
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100397 for(const std::string& inputName: params.m_InputNames)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100398 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100399 inferenceModelParams.m_InputBindings.push_back(inputName);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100400 }
401
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100402 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100403 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100404 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100405 }
406
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100407 for(const std::string& outputName: params.m_OutputNames)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100408 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100409 inferenceModelParams.m_OutputBindings.push_back(outputName);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100410 }
411
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100412 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
413 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100414
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100415 InferenceModel<TParser, TDataType> model(inferenceModelParams,
416 params.m_EnableProfiling,
417 params.m_DynamicBackendsPath,
418 runtime);
419
420 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
421 for(unsigned int i = 0; i < numInputs; ++i)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100422 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100423 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
424 armnn::MakeOptional<QuantizationParams>(model.GetInputQuantizationParams()) :
425 armnn::EmptyOptional();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100426
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100427 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
428 armnn::EmptyOptional() :
429 armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[i]);
430
431 unsigned int numElements = model.GetInputSize(i);
432 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100433 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100434 // If the user has provided a tensor shape for the current input,
435 // override numElements
436 numElements = params.m_InputTensorShapes[i]->GetNumElements();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100437 }
438
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100439 TContainer tensorData;
440 PopulateTensorWithData(tensorData,
441 numElements,
442 params.m_InputTypes[i],
443 qParams,
444 dataFile);
445
446 inputDataContainers.push_back(tensorData);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100447 }
448
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100449 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100450 std::vector<TContainer> outputDataContainers;
451
452 for (unsigned int i = 0; i < numOutputs; ++i)
453 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100454 if (params.m_OutputTypes[i].compare("float") == 0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100455 {
456 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
457 }
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100458 else if (params.m_OutputTypes[i].compare("int") == 0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100459 {
460 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
461 }
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100462 else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100463 {
464 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
465 }
466 else
467 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100468 BOOST_LOG_TRIVIAL(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100469 return EXIT_FAILURE;
470 }
471 }
472
473 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
474 auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
475
Matteo Martincighd6f26fc2019-10-28 10:48:05 +0000476 if (params.m_GenerateTensorData)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100477 {
Matteo Martincighd6f26fc2019-10-28 10:48:05 +0000478 BOOST_LOG_TRIVIAL(warning) << "The input data was generated, note that the output will not be useful";
479 }
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100480
Matteo Martincighd6f26fc2019-10-28 10:48:05 +0000481 // Print output tensors
482 const auto& infosOut = model.GetOutputBindingInfos();
483 for (size_t i = 0; i < numOutputs; i++)
484 {
485 const armnn::TensorInfo& infoOut = infosOut[i].second;
486 auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100487
Matteo Martincighd6f26fc2019-10-28 10:48:05 +0000488 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], infoOut, outputTensorFile);
489 boost::apply_visitor(printer, outputDataContainers[i]);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100490 }
491
492 BOOST_LOG_TRIVIAL(info) << "\nInference time: " << std::setprecision(2)
493 << std::fixed << inference_duration.count() << " ms";
494
495 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100496 if (params.m_ThresholdTime != 0.0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100497 {
498 BOOST_LOG_TRIVIAL(info) << "Threshold time: " << std::setprecision(2)
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100499 << std::fixed << params.m_ThresholdTime << " ms";
500 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100501 BOOST_LOG_TRIVIAL(info) << "Threshold time - Inference time: " << std::setprecision(2)
502 << std::fixed << thresholdMinusInference << " ms" << "\n";
503
504 if (thresholdMinusInference < 0)
505 {
506 BOOST_LOG_TRIVIAL(fatal) << "Elapsed inference time is greater than provided threshold time.\n";
507 return EXIT_FAILURE;
508 }
509 }
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100510 }
511 catch (armnn::Exception const& e)
512 {
513 BOOST_LOG_TRIVIAL(fatal) << "Armnn Error: " << e.what();
514 return EXIT_FAILURE;
515 }
516
517 return EXIT_SUCCESS;
518}
519
520// This will run a test
521int RunTest(const std::string& format,
522 const std::string& inputTensorShapesStr,
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100523 const vector<armnn::BackendId>& computeDevices,
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100524 const std::string& dynamicBackendsPath,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100525 const std::string& path,
526 const std::string& inputNames,
527 const std::string& inputTensorDataFilePaths,
528 const std::string& inputTypes,
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100529 bool quantizeInput,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100530 const std::string& outputTypes,
531 const std::string& outputNames,
Sadik Armagan77086282019-09-02 11:46:28 +0100532 const std::string& outputTensorFiles,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100533 bool enableProfiling,
534 bool enableFp16TurboMode,
535 const double& thresholdTime,
Matthew Jackson54658b92019-08-27 15:35:59 +0100536 bool printIntermediate,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100537 const size_t subgraphId,
Andre Ghattas23ae2ea2019-08-07 12:18:38 +0100538 bool enableLayerDetails = false,
Derek Lamberti05f19472019-12-02 16:06:40 +0000539 bool parseUnsupported = false,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100540 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
541{
542 std::string modelFormat = boost::trim_copy(format);
543 std::string modelPath = boost::trim_copy(path);
544 std::vector<std::string> inputNamesVector = ParseStringList(inputNames, ",");
Francis Murtagh1555cbd2019-10-08 14:47:46 +0100545 std::vector<std::string> inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ":");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100546 std::vector<std::string> inputTensorDataFilePathsVector = ParseStringList(
547 inputTensorDataFilePaths, ",");
548 std::vector<std::string> outputNamesVector = ParseStringList(outputNames, ",");
549 std::vector<std::string> inputTypesVector = ParseStringList(inputTypes, ",");
550 std::vector<std::string> outputTypesVector = ParseStringList(outputTypes, ",");
Sadik Armagan77086282019-09-02 11:46:28 +0100551 std::vector<std::string> outputTensorFilesVector = ParseStringList(outputTensorFiles, ",");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100552
553 // Parse model binary flag from the model-format string we got from the command-line
554 bool isModelBinary;
555 if (modelFormat.find("bin") != std::string::npos)
556 {
557 isModelBinary = true;
558 }
559 else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
560 {
561 isModelBinary = false;
562 }
563 else
564 {
565 BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'";
566 return EXIT_FAILURE;
567 }
568
569 if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size()))
570 {
571 BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-shape must have the same amount of elements.";
572 return EXIT_FAILURE;
573 }
574
575 if ((inputTensorDataFilePathsVector.size() != 0) &&
576 (inputTensorDataFilePathsVector.size() != inputNamesVector.size()))
577 {
578 BOOST_LOG_TRIVIAL(fatal) << "input-name and input-tensor-data must have the same amount of elements.";
579 return EXIT_FAILURE;
580 }
581
Sadik Armagan77086282019-09-02 11:46:28 +0100582 if ((outputTensorFilesVector.size() != 0) &&
583 (outputTensorFilesVector.size() != outputNamesVector.size()))
584 {
585 BOOST_LOG_TRIVIAL(fatal) << "output-name and write-outputs-to-file must have the same amount of elements.";
586 return EXIT_FAILURE;
587 }
588
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100589 if (inputTypesVector.size() == 0)
590 {
591 //Defaults the value of all inputs to "float"
592 inputTypesVector.assign(inputNamesVector.size(), "float");
593 }
Matteo Martincigh08b51862019-08-29 16:26:10 +0100594 else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size()))
595 {
596 BOOST_LOG_TRIVIAL(fatal) << "input-name and input-type must have the same amount of elements.";
597 return EXIT_FAILURE;
598 }
599
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100600 if (outputTypesVector.size() == 0)
601 {
602 //Defaults the value of all outputs to "float"
603 outputTypesVector.assign(outputNamesVector.size(), "float");
604 }
Matteo Martincigh08b51862019-08-29 16:26:10 +0100605 else if ((outputTypesVector.size() != 0) && (outputTypesVector.size() != outputNamesVector.size()))
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100606 {
Matteo Martincigh08b51862019-08-29 16:26:10 +0100607 BOOST_LOG_TRIVIAL(fatal) << "output-name and output-type must have the same amount of elements.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100608 return EXIT_FAILURE;
609 }
610
611 // Parse input tensor shape from the string we got from the command-line.
612 std::vector<std::unique_ptr<armnn::TensorShape>> inputTensorShapes;
613
614 if (!inputTensorShapesVector.empty())
615 {
616 inputTensorShapes.reserve(inputTensorShapesVector.size());
617
618 for(const std::string& shape : inputTensorShapesVector)
619 {
620 std::stringstream ss(shape);
621 std::vector<unsigned int> dims = ParseArray(ss);
622
623 try
624 {
625 // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
626 inputTensorShapes.push_back(std::make_unique<armnn::TensorShape>(dims.size(), dims.data()));
627 }
628 catch (const armnn::InvalidArgumentException& e)
629 {
630 BOOST_LOG_TRIVIAL(fatal) << "Cannot create tensor shape: " << e.what();
631 return EXIT_FAILURE;
632 }
633 }
634 }
635
636 // Check that threshold time is not less than zero
637 if (thresholdTime < 0)
638 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100639 BOOST_LOG_TRIVIAL(fatal) << "Threshold time supplied as a command line argument is less than zero.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100640 return EXIT_FAILURE;
641 }
642
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100643 ExecuteNetworkParams params;
644 params.m_ModelPath = modelPath.c_str();
645 params.m_IsModelBinary = isModelBinary;
646 params.m_ComputeDevices = computeDevices;
647 params.m_DynamicBackendsPath = dynamicBackendsPath;
648 params.m_InputNames = inputNamesVector;
649 params.m_InputTensorShapes = std::move(inputTensorShapes);
650 params.m_InputTensorDataFilePaths = inputTensorDataFilePathsVector;
651 params.m_InputTypes = inputTypesVector;
652 params.m_QuantizeInput = quantizeInput;
653 params.m_OutputTypes = outputTypesVector;
654 params.m_OutputNames = outputNamesVector;
655 params.m_OutputTensorFiles = outputTensorFilesVector;
656 params.m_EnableProfiling = enableProfiling;
657 params.m_EnableFp16TurboMode = enableFp16TurboMode;
658 params.m_ThresholdTime = thresholdTime;
659 params.m_PrintIntermediate = printIntermediate;
660 params.m_SubgraphId = subgraphId;
661 params.m_EnableLayerDetails = enableLayerDetails;
662 params.m_GenerateTensorData = inputTensorDataFilePathsVector.empty();
Derek Lamberti05f19472019-12-02 16:06:40 +0000663 params.m_ParseUnsupported = parseUnsupported;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100664
665 // Warn if ExecuteNetwork will generate dummy input data
666 if (params.m_GenerateTensorData)
667 {
668 BOOST_LOG_TRIVIAL(warning) << "No input files provided, input tensors will be filled with 0s.";
669 }
670
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100671 // Forward to implementation based on the parser type
672 if (modelFormat.find("armnn") != std::string::npos)
673 {
674#if defined(ARMNN_SERIALIZER)
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100675 return MainImpl<armnnDeserializer::IDeserializer, float>(params, runtime);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100676#else
677 BOOST_LOG_TRIVIAL(fatal) << "Not built with serialization support.";
678 return EXIT_FAILURE;
679#endif
680 }
681 else if (modelFormat.find("caffe") != std::string::npos)
682 {
683#if defined(ARMNN_CAFFE_PARSER)
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100684 return MainImpl<armnnCaffeParser::ICaffeParser, float>(params, runtime);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100685#else
686 BOOST_LOG_TRIVIAL(fatal) << "Not built with Caffe parser support.";
687 return EXIT_FAILURE;
688#endif
689 }
690 else if (modelFormat.find("onnx") != std::string::npos)
691{
692#if defined(ARMNN_ONNX_PARSER)
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100693 return MainImpl<armnnOnnxParser::IOnnxParser, float>(params, runtime);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100694#else
695 BOOST_LOG_TRIVIAL(fatal) << "Not built with Onnx parser support.";
696 return EXIT_FAILURE;
697#endif
698 }
699 else if (modelFormat.find("tensorflow") != std::string::npos)
700 {
701#if defined(ARMNN_TF_PARSER)
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100702 return MainImpl<armnnTfParser::ITfParser, float>(params, runtime);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100703#else
704 BOOST_LOG_TRIVIAL(fatal) << "Not built with Tensorflow parser support.";
705 return EXIT_FAILURE;
706#endif
707 }
708 else if(modelFormat.find("tflite") != std::string::npos)
709 {
710#if defined(ARMNN_TF_LITE_PARSER)
711 if (! isModelBinary)
712 {
713 BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat << "'. Only 'binary' format supported \
714 for tflite files";
715 return EXIT_FAILURE;
716 }
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100717 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(params, runtime);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100718#else
719 BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
720 "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
721 return EXIT_FAILURE;
722#endif
723 }
724 else
725 {
726 BOOST_LOG_TRIVIAL(fatal) << "Unknown model format: '" << modelFormat <<
727 "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
728 return EXIT_FAILURE;
729 }
730}
731
732int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
Matthew Jackson54658b92019-08-27 15:35:59 +0100733 const bool enableProfiling, const bool enableFp16TurboMode, const double& thresholdTime,
Derek Lamberti05f19472019-12-02 16:06:40 +0000734 const bool printIntermediate, bool enableLayerDetails = false, bool parseUnuspported = false)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100735{
736 std::string modelFormat;
737 std::string modelPath;
738 std::string inputNames;
739 std::string inputTensorShapes;
740 std::string inputTensorDataFilePaths;
741 std::string outputNames;
742 std::string inputTypes;
743 std::string outputTypes;
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100744 std::string dynamicBackendsPath;
Sadik Armagan77086282019-09-02 11:46:28 +0100745 std::string outputTensorFiles;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100746
747 size_t subgraphId = 0;
748
749 const std::string backendsMessage = std::string("The preferred order of devices to run layers on by default. ")
750 + std::string("Possible choices: ")
751 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
752
753 po::options_description desc("Options");
754 try
755 {
756 desc.add_options()
757 ("model-format,f", po::value(&modelFormat),
758 "armnn-binary, caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or "
759 "tensorflow-text.")
760 ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .armnn, .caffemodel, .prototxt, "
761 ".tflite, .onnx")
762 ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
763 backendsMessage.c_str())
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100764 ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
765 "Path where to load any available dynamic backend from. "
766 "If left empty (the default), dynamic backends will not be used.")
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100767 ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
768 ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
769 "executed. Defaults to 0.")
770 ("input-tensor-shape,s", po::value(&inputTensorShapes),
771 "The shape of the input tensors in the network as a flat array of integers separated by comma. "
772 "Several shapes can be passed separating them by semicolon. "
773 "This parameter is optional, depending on the network.")
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100774 ("input-tensor-data,d", po::value(&inputTensorDataFilePaths)->default_value(""),
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100775 "Path to files containing the input data as a flat array separated by whitespace. "
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100776 "Several paths can be passed separating them by comma. If not specified, the network will be run with dummy "
777 "data (useful for profiling).")
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100778 ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
779 "If unset, defaults to \"float\" for all defined inputs. "
780 "Accepted values (float, int or qasymm8).")
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100781 ("quantize-input,q",po::bool_switch()->default_value(false),
782 "If this option is enabled, all float inputs will be quantized to qasymm8. "
783 "If unset, default to not quantized. "
784 "Accepted values (true or false)")
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100785 ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. "
786 "If unset, defaults to \"float\" for all defined outputs. "
787 "Accepted values (float, int or qasymm8).")
788 ("output-name,o", po::value(&outputNames),
Sadik Armagan77086282019-09-02 11:46:28 +0100789 "Identifier of the output tensors in the network separated by comma.")
790 ("write-outputs-to-file,w", po::value(&outputTensorFiles),
791 "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
792 "If left empty (the default), the output tensors will not be written to a file.");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100793 }
794 catch (const std::exception& e)
795 {
796 // Coverity points out that default_value(...) can throw a bad_lexical_cast,
797 // and that desc.add_options() can throw boost::io::too_few_args.
798 // They really won't in any of these cases.
799 BOOST_ASSERT_MSG(false, "Caught unexpected exception");
800 BOOST_LOG_TRIVIAL(fatal) << "Fatal internal error: " << e.what();
801 return EXIT_FAILURE;
802 }
803
804 std::vector<const char*> clOptions;
805 clOptions.reserve(csvRow.values.size());
806 for (const std::string& value : csvRow.values)
807 {
808 clOptions.push_back(value.c_str());
809 }
810
811 po::variables_map vm;
812 try
813 {
814 po::store(po::parse_command_line(static_cast<int>(clOptions.size()), clOptions.data(), desc), vm);
815
816 po::notify(vm);
817
818 CheckOptionDependencies(vm);
819 }
820 catch (const po::error& e)
821 {
822 std::cerr << e.what() << std::endl << std::endl;
823 std::cerr << desc << std::endl;
824 return EXIT_FAILURE;
825 }
826
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100827 // Get the value of the switch arguments.
828 bool quantizeInput = vm["quantize-input"].as<bool>();
829
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100830 // Get the preferred order of compute devices.
831 std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
832
833 // Remove duplicates from the list of compute devices.
834 RemoveDuplicateDevices(computeDevices);
835
836 // Check that the specified compute devices are valid.
837 std::string invalidBackends;
838 if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
839 {
840 BOOST_LOG_TRIVIAL(fatal) << "The list of preferred devices contains invalid backend IDs: "
841 << invalidBackends;
842 return EXIT_FAILURE;
843 }
844
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100845 return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
Sadik Armagan77086282019-09-02 11:46:28 +0100846 inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
Andre Ghattas23ae2ea2019-08-07 12:18:38 +0100847 enableProfiling, enableFp16TurboMode, thresholdTime, printIntermediate, subgraphId,
Derek Lamberti05f19472019-12-02 16:06:40 +0000848 enableLayerDetails, parseUnuspported);
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100849}