blob: b5652df37fc1912fe0f44467ea9c7c45cbcf750e [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Sadik Armagana9c2ce12020-07-14 10:02:22 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Francis Murtaghbee4bc92019-06-18 12:30:37 +01003// SPDX-License-Identifier: MIT
4//
5#include <armnn/ArmNN.hpp>
6#include <armnn/TypesUtils.hpp>
Matthew Sloyan80c6b142020-09-08 12:00:32 +01007#include <armnn/utility/NumericCast.hpp>
alered01a7227ac2020-05-07 14:58:29 +01008#include <armnn/utility/Timer.hpp>
Francis Murtaghbee4bc92019-06-18 12:30:37 +01009
10#if defined(ARMNN_SERIALIZER)
11#include "armnnDeserializer/IDeserializer.hpp"
12#endif
13#if defined(ARMNN_CAFFE_PARSER)
14#include "armnnCaffeParser/ICaffeParser.hpp"
15#endif
16#if defined(ARMNN_TF_PARSER)
17#include "armnnTfParser/ITfParser.hpp"
18#endif
19#if defined(ARMNN_TF_LITE_PARSER)
20#include "armnnTfLiteParser/ITfLiteParser.hpp"
21#endif
22#if defined(ARMNN_ONNX_PARSER)
23#include "armnnOnnxParser/IOnnxParser.hpp"
24#endif
25#include "CsvReader.hpp"
26#include "../InferenceTest.hpp"
27
Francis Murtaghbee4bc92019-06-18 12:30:37 +010028#include <Profiling.hpp>
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +010029#include <ResolveType.hpp>
Francis Murtaghbee4bc92019-06-18 12:30:37 +010030
Francis Murtaghbee4bc92019-06-18 12:30:37 +010031#include <boost/program_options.hpp>
32#include <boost/variant.hpp>
33
34#include <iostream>
35#include <fstream>
36#include <functional>
37#include <future>
38#include <algorithm>
39#include <iterator>
40
41namespace
42{
43
44// Configure boost::program_options for command-line parsing and validation.
45namespace po = boost::program_options;
46
47template<typename T, typename TParseElementFunc>
48std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char * chars = "\t ,:")
49{
50 std::vector<T> result;
51 // Processes line-by-line.
52 std::string line;
53 while (std::getline(stream, line))
54 {
David Monahana8837bf2020-04-16 10:01:56 +010055 std::vector<std::string> tokens = armnn::stringUtils::StringTokenizer(line, chars);
Francis Murtaghbee4bc92019-06-18 12:30:37 +010056 for (const std::string& token : tokens)
57 {
58 if (!token.empty()) // See https://stackoverflow.com/questions/10437406/
59 {
60 try
61 {
62 result.push_back(parseElementFunc(token));
63 }
64 catch (const std::exception&)
65 {
Derek Lamberti08446972019-11-26 16:38:31 +000066 ARMNN_LOG(error) << "'" << token << "' is not a valid number. It has been ignored.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +010067 }
68 }
69 }
70 }
71
72 return result;
73}
74
75bool CheckOption(const po::variables_map& vm,
76 const char* option)
77{
78 // Check that the given option is valid.
79 if (option == nullptr)
80 {
81 return false;
82 }
83
84 // Check whether 'option' is provided.
85 return vm.find(option) != vm.end();
86}
87
88void CheckOptionDependency(const po::variables_map& vm,
89 const char* option,
90 const char* required)
91{
92 // Check that the given options are valid.
93 if (option == nullptr || required == nullptr)
94 {
95 throw po::error("Invalid option to check dependency for");
96 }
97
98 // Check that if 'option' is provided, 'required' is also provided.
99 if (CheckOption(vm, option) && !vm[option].defaulted())
100 {
101 if (CheckOption(vm, required) == 0 || vm[required].defaulted())
102 {
103 throw po::error(std::string("Option '") + option + "' requires option '" + required + "'.");
104 }
105 }
106}
107
108void CheckOptionDependencies(const po::variables_map& vm)
109{
110 CheckOptionDependency(vm, "model-path", "model-format");
111 CheckOptionDependency(vm, "model-path", "input-name");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100112 CheckOptionDependency(vm, "model-path", "output-name");
113 CheckOptionDependency(vm, "input-tensor-shape", "model-path");
114}
115
116template<armnn::DataType NonQuantizedType>
117auto ParseDataArray(std::istream & stream);
118
119template<armnn::DataType QuantizedType>
120auto ParseDataArray(std::istream& stream,
121 const float& quantizationScale,
122 const int32_t& quantizationOffset);
123
124template<>
125auto ParseDataArray<armnn::DataType::Float32>(std::istream & stream)
126{
127 return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
128}
129
130template<>
131auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream)
132{
133 return ParseArrayImpl<int>(stream, [](const std::string & s) { return std::stoi(s); });
134}
135
136template<>
Derek Lambertif90c56d2020-01-10 17:14:08 +0000137auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100138{
139 return ParseArrayImpl<uint8_t>(stream,
Matthew Sloyan80c6b142020-09-08 12:00:32 +0100140 [](const std::string& s) { return armnn::numeric_cast<uint8_t>(std::stoi(s)); });
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100141}
142
143template<>
Derek Lambertif90c56d2020-01-10 17:14:08 +0000144auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100145 const float& quantizationScale,
146 const int32_t& quantizationOffset)
147{
148 return ParseArrayImpl<uint8_t>(stream,
149 [&quantizationScale, &quantizationOffset](const std::string & s)
150 {
Matthew Sloyan80c6b142020-09-08 12:00:32 +0100151 return armnn::numeric_cast<uint8_t>(
Rob Hughes93667b12019-09-23 16:24:05 +0100152 armnn::Quantize<uint8_t>(std::stof(s),
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100153 quantizationScale,
154 quantizationOffset));
155 });
156}
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100157std::vector<unsigned int> ParseArray(std::istream& stream)
158{
159 return ParseArrayImpl<unsigned int>(stream,
Matthew Sloyan80c6b142020-09-08 12:00:32 +0100160 [](const std::string& s) { return armnn::numeric_cast<unsigned int>(std::stoi(s)); });
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100161}
162
163std::vector<std::string> ParseStringList(const std::string & inputString, const char * delimiter)
164{
165 std::stringstream stream(inputString);
David Monahana8837bf2020-04-16 10:01:56 +0100166 return ParseArrayImpl<std::string>(stream, [](const std::string& s) {
167 return armnn::stringUtils::StringTrimCopy(s); }, delimiter);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100168}
169
170void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
171{
172 // Mark the duplicate devices as 'Undefined'.
173 for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
174 {
175 for (auto j = std::next(i); j != computeDevices.end(); ++j)
176 {
177 if (*j == *i)
178 {
179 *j = armnn::Compute::Undefined;
180 }
181 }
182 }
183
184 // Remove 'Undefined' devices.
185 computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
186 computeDevices.end());
187}
188
189struct TensorPrinter : public boost::static_visitor<>
190{
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000191 TensorPrinter(const std::string& binding,
192 const armnn::TensorInfo& info,
193 const std::string& outputTensorFile,
194 bool dequantizeOutput)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100195 : m_OutputBinding(binding)
196 , m_Scale(info.GetQuantizationScale())
197 , m_Offset(info.GetQuantizationOffset())
Sadik Armagan77086282019-09-02 11:46:28 +0100198 , m_OutputTensorFile(outputTensorFile)
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000199 , m_DequantizeOutput(dequantizeOutput)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100200 {}
201
202 void operator()(const std::vector<float>& values)
203 {
Sadik Armagan77086282019-09-02 11:46:28 +0100204 ForEachValue(values, [](float value)
205 {
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100206 printf("%f ", value);
207 });
Sadik Armagan77086282019-09-02 11:46:28 +0100208 WriteToFile(values);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100209 }
210
211 void operator()(const std::vector<uint8_t>& values)
212 {
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000213 if(m_DequantizeOutput)
214 {
215 auto& scale = m_Scale;
216 auto& offset = m_Offset;
217 std::vector<float> dequantizedValues;
218 ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100219 {
Sadik Armagan77086282019-09-02 11:46:28 +0100220 auto dequantizedValue = armnn::Dequantize(value, scale, offset);
221 printf("%f ", dequantizedValue);
222 dequantizedValues.push_back(dequantizedValue);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100223 });
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000224 WriteToFile(dequantizedValues);
225 }
226 else
227 {
228 const std::vector<int> intValues(values.begin(), values.end());
229 operator()(intValues);
230 }
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100231 }
232
233 void operator()(const std::vector<int>& values)
234 {
235 ForEachValue(values, [](int value)
236 {
237 printf("%d ", value);
238 });
Sadik Armagan77086282019-09-02 11:46:28 +0100239 WriteToFile(values);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100240 }
241
242private:
243 template<typename Container, typename Delegate>
244 void ForEachValue(const Container& c, Delegate delegate)
245 {
246 std::cout << m_OutputBinding << ": ";
247 for (const auto& value : c)
248 {
249 delegate(value);
250 }
251 printf("\n");
252 }
253
Sadik Armagan77086282019-09-02 11:46:28 +0100254 template<typename T>
255 void WriteToFile(const std::vector<T>& values)
256 {
257 if (!m_OutputTensorFile.empty())
258 {
259 std::ofstream outputTensorFile;
260 outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
261 if (outputTensorFile.is_open())
262 {
263 outputTensorFile << m_OutputBinding << ": ";
264 std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
265 }
266 else
267 {
Derek Lamberti08446972019-11-26 16:38:31 +0000268 ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
Sadik Armagan77086282019-09-02 11:46:28 +0100269 }
270 outputTensorFile.close();
271 }
272 }
273
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100274 std::string m_OutputBinding;
275 float m_Scale=0.0f;
276 int m_Offset=0;
Sadik Armagan77086282019-09-02 11:46:28 +0100277 std::string m_OutputTensorFile;
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000278 bool m_DequantizeOutput = false;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100279};
280
281
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100282
283template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
284std::vector<T> GenerateDummyTensorData(unsigned int numElements)
285{
286 return std::vector<T>(numElements, static_cast<T>(0));
287}
288
289using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
290using QuantizationParams = std::pair<float, int32_t>;
291
292void PopulateTensorWithData(TContainer& tensorData,
293 unsigned int numElements,
294 const std::string& dataTypeStr,
295 const armnn::Optional<QuantizationParams>& qParams,
296 const armnn::Optional<std::string>& dataFile)
297{
298 const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
299 const bool quantizeData = qParams.has_value();
300
301 std::ifstream inputTensorFile;
302 if (readFromFile)
303 {
304 inputTensorFile = std::ifstream(dataFile.value());
305 }
306
307 if (dataTypeStr.compare("float") == 0)
308 {
309 if (quantizeData)
310 {
311 const float qScale = qParams.value().first;
312 const int qOffset = qParams.value().second;
313
314 tensorData = readFromFile ?
Derek Lambertif90c56d2020-01-10 17:14:08 +0000315 ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
316 GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100317 }
318 else
319 {
320 tensorData = readFromFile ?
321 ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
322 GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
323 }
324 }
325 else if (dataTypeStr.compare("int") == 0)
326 {
327 tensorData = readFromFile ?
328 ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
329 GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
330 }
331 else if (dataTypeStr.compare("qasymm8") == 0)
332 {
333 tensorData = readFromFile ?
Derek Lambertif90c56d2020-01-10 17:14:08 +0000334 ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
335 GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100336 }
337 else
338 {
339 std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
Derek Lamberti08446972019-11-26 16:38:31 +0000340 ARMNN_LOG(fatal) << errorMessage;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100341
342 inputTensorFile.close();
343 throw armnn::Exception(errorMessage);
344 }
345
346 inputTensorFile.close();
347}
348
349} // anonymous namespace
350
351bool generateTensorData = true;
352
353struct ExecuteNetworkParams
354{
355 using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
356
357 const char* m_ModelPath;
358 bool m_IsModelBinary;
359 std::vector<armnn::BackendId> m_ComputeDevices;
360 std::string m_DynamicBackendsPath;
361 std::vector<string> m_InputNames;
362 std::vector<TensorShapePtr> m_InputTensorShapes;
363 std::vector<string> m_InputTensorDataFilePaths;
364 std::vector<string> m_InputTypes;
365 bool m_QuantizeInput;
366 std::vector<string> m_OutputTypes;
367 std::vector<string> m_OutputNames;
368 std::vector<string> m_OutputTensorFiles;
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000369 bool m_DequantizeOutput;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100370 bool m_EnableProfiling;
371 bool m_EnableFp16TurboMode;
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000372 bool m_EnableBf16TurboMode;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100373 double m_ThresholdTime;
374 bool m_PrintIntermediate;
375 size_t m_SubgraphId;
376 bool m_EnableLayerDetails = false;
377 bool m_GenerateTensorData;
Derek Lamberti132563c2019-12-02 16:06:40 +0000378 bool m_ParseUnsupported = false;
Sadik Armagana9c2ce12020-07-14 10:02:22 +0100379 bool m_InferOutputShape = false;
Sadik Armagana25886e2020-09-15 17:17:08 +0100380 bool m_EnableFastMath = false;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100381};
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100382
383template<typename TParser, typename TDataType>
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100384int MainImpl(const ExecuteNetworkParams& params,
alered01a7227ac2020-05-07 14:58:29 +0100385 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr,
386 size_t iterations = 1)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100387{
388 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
389
390 std::vector<TContainer> inputDataContainers;
391
392 try
393 {
394 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100395 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
396 inferenceModelParams.m_ModelPath = params.m_ModelPath;
397 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
398 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
399 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
400 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
401 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
Derek Lamberti167c0822019-12-03 09:37:32 +0000402 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
Sadik Armagana9c2ce12020-07-14 10:02:22 +0100403 inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape;
Sadik Armagana25886e2020-09-15 17:17:08 +0100404 inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100405
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100406 for(const std::string& inputName: params.m_InputNames)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100407 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100408 inferenceModelParams.m_InputBindings.push_back(inputName);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100409 }
410
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100411 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100412 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100413 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100414 }
415
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100416 for(const std::string& outputName: params.m_OutputNames)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100417 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100418 inferenceModelParams.m_OutputBindings.push_back(outputName);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100419 }
420
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100421 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
422 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000423 inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100424
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100425 InferenceModel<TParser, TDataType> model(inferenceModelParams,
426 params.m_EnableProfiling,
427 params.m_DynamicBackendsPath,
428 runtime);
429
430 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
431 for(unsigned int i = 0; i < numInputs; ++i)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100432 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100433 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
434 armnn::MakeOptional<QuantizationParams>(model.GetInputQuantizationParams()) :
435 armnn::EmptyOptional();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100436
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100437 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
438 armnn::EmptyOptional() :
439 armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[i]);
440
441 unsigned int numElements = model.GetInputSize(i);
442 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100443 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100444 // If the user has provided a tensor shape for the current input,
445 // override numElements
446 numElements = params.m_InputTensorShapes[i]->GetNumElements();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100447 }
448
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100449 TContainer tensorData;
450 PopulateTensorWithData(tensorData,
451 numElements,
452 params.m_InputTypes[i],
453 qParams,
454 dataFile);
455
456 inputDataContainers.push_back(tensorData);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100457 }
458
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100459 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100460 std::vector<TContainer> outputDataContainers;
461
462 for (unsigned int i = 0; i < numOutputs; ++i)
463 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100464 if (params.m_OutputTypes[i].compare("float") == 0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100465 {
466 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
467 }
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100468 else if (params.m_OutputTypes[i].compare("int") == 0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100469 {
470 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
471 }
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100472 else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100473 {
474 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
475 }
476 else
477 {
Derek Lamberti08446972019-11-26 16:38:31 +0000478 ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100479 return EXIT_FAILURE;
480 }
481 }
482
alered01a7227ac2020-05-07 14:58:29 +0100483 for (size_t x = 0; x < iterations; x++)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100484 {
alered01a7227ac2020-05-07 14:58:29 +0100485 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
486 auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100487
alered01a7227ac2020-05-07 14:58:29 +0100488 if (params.m_GenerateTensorData)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100489 {
alered01a7227ac2020-05-07 14:58:29 +0100490 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
491 }
492
493 // Print output tensors
494 const auto& infosOut = model.GetOutputBindingInfos();
495 for (size_t i = 0; i < numOutputs; i++)
496 {
497 const armnn::TensorInfo& infoOut = infosOut[i].second;
498 auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
499
500 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
501 infoOut,
502 outputTensorFile,
503 params.m_DequantizeOutput);
504 boost::apply_visitor(printer, outputDataContainers[i]);
505 }
506
507 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
508 << std::fixed << inference_duration.count() << " ms\n";
509
510 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
511 if (params.m_ThresholdTime != 0.0)
512 {
513 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
514 << std::fixed << params.m_ThresholdTime << " ms";
515 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
516 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
517 << std::fixed << thresholdMinusInference << " ms" << "\n";
518
519 if (thresholdMinusInference < 0)
520 {
521 std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
522 ARMNN_LOG(fatal) << errorMessage;
523 }
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100524 }
525 }
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100526 }
Pavel Macenauer855a47b2020-05-26 10:54:22 +0000527 catch (const armnn::Exception& e)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100528 {
Derek Lamberti08446972019-11-26 16:38:31 +0000529 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100530 return EXIT_FAILURE;
531 }
532
533 return EXIT_SUCCESS;
534}
535
536// This will run a test
537int RunTest(const std::string& format,
538 const std::string& inputTensorShapesStr,
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100539 const vector<armnn::BackendId>& computeDevices,
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100540 const std::string& dynamicBackendsPath,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100541 const std::string& path,
542 const std::string& inputNames,
543 const std::string& inputTensorDataFilePaths,
544 const std::string& inputTypes,
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100545 bool quantizeInput,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100546 const std::string& outputTypes,
547 const std::string& outputNames,
Sadik Armagan77086282019-09-02 11:46:28 +0100548 const std::string& outputTensorFiles,
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000549 bool dequantizeOuput,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100550 bool enableProfiling,
551 bool enableFp16TurboMode,
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000552 bool enableBf16TurboMode,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100553 const double& thresholdTime,
Matthew Jackson54658b92019-08-27 15:35:59 +0100554 bool printIntermediate,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100555 const size_t subgraphId,
Andre Ghattas23ae2ea2019-08-07 12:18:38 +0100556 bool enableLayerDetails = false,
Derek Lamberti132563c2019-12-02 16:06:40 +0000557 bool parseUnsupported = false,
Sadik Armagana9c2ce12020-07-14 10:02:22 +0100558 bool inferOutputShape = false,
Sadik Armagana25886e2020-09-15 17:17:08 +0100559 bool enableFastMath = false,
alered01a7227ac2020-05-07 14:58:29 +0100560 const size_t iterations = 1,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100561 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
562{
David Monahana8837bf2020-04-16 10:01:56 +0100563 std::string modelFormat = armnn::stringUtils::StringTrimCopy(format);
564 std::string modelPath = armnn::stringUtils::StringTrimCopy(path);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100565 std::vector<std::string> inputNamesVector = ParseStringList(inputNames, ",");
Francis Murtagh1555cbd2019-10-08 14:47:46 +0100566 std::vector<std::string> inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ":");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100567 std::vector<std::string> inputTensorDataFilePathsVector = ParseStringList(
568 inputTensorDataFilePaths, ",");
569 std::vector<std::string> outputNamesVector = ParseStringList(outputNames, ",");
570 std::vector<std::string> inputTypesVector = ParseStringList(inputTypes, ",");
571 std::vector<std::string> outputTypesVector = ParseStringList(outputTypes, ",");
Sadik Armagan77086282019-09-02 11:46:28 +0100572 std::vector<std::string> outputTensorFilesVector = ParseStringList(outputTensorFiles, ",");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100573
574 // Parse model binary flag from the model-format string we got from the command-line
575 bool isModelBinary;
576 if (modelFormat.find("bin") != std::string::npos)
577 {
578 isModelBinary = true;
579 }
580 else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
581 {
582 isModelBinary = false;
583 }
584 else
585 {
Derek Lamberti08446972019-11-26 16:38:31 +0000586 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100587 return EXIT_FAILURE;
588 }
589
590 if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size()))
591 {
Derek Lamberti08446972019-11-26 16:38:31 +0000592 ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100593 return EXIT_FAILURE;
594 }
595
596 if ((inputTensorDataFilePathsVector.size() != 0) &&
597 (inputTensorDataFilePathsVector.size() != inputNamesVector.size()))
598 {
Derek Lamberti08446972019-11-26 16:38:31 +0000599 ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100600 return EXIT_FAILURE;
601 }
602
Sadik Armagan77086282019-09-02 11:46:28 +0100603 if ((outputTensorFilesVector.size() != 0) &&
604 (outputTensorFilesVector.size() != outputNamesVector.size()))
605 {
Derek Lamberti08446972019-11-26 16:38:31 +0000606 ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements.";
Sadik Armagan77086282019-09-02 11:46:28 +0100607 return EXIT_FAILURE;
608 }
609
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100610 if (inputTypesVector.size() == 0)
611 {
612 //Defaults the value of all inputs to "float"
613 inputTypesVector.assign(inputNamesVector.size(), "float");
614 }
Matteo Martincigh08b51862019-08-29 16:26:10 +0100615 else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size()))
616 {
Derek Lamberti08446972019-11-26 16:38:31 +0000617 ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
Matteo Martincigh08b51862019-08-29 16:26:10 +0100618 return EXIT_FAILURE;
619 }
620
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100621 if (outputTypesVector.size() == 0)
622 {
623 //Defaults the value of all outputs to "float"
624 outputTypesVector.assign(outputNamesVector.size(), "float");
625 }
Matteo Martincigh08b51862019-08-29 16:26:10 +0100626 else if ((outputTypesVector.size() != 0) && (outputTypesVector.size() != outputNamesVector.size()))
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100627 {
Derek Lamberti08446972019-11-26 16:38:31 +0000628 ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100629 return EXIT_FAILURE;
630 }
631
632 // Parse input tensor shape from the string we got from the command-line.
633 std::vector<std::unique_ptr<armnn::TensorShape>> inputTensorShapes;
634
635 if (!inputTensorShapesVector.empty())
636 {
637 inputTensorShapes.reserve(inputTensorShapesVector.size());
638
639 for(const std::string& shape : inputTensorShapesVector)
640 {
641 std::stringstream ss(shape);
642 std::vector<unsigned int> dims = ParseArray(ss);
643
644 try
645 {
646 // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
Rob Hughesbb46dde2020-05-20 15:27:37 +0100647 inputTensorShapes.push_back(
648 std::make_unique<armnn::TensorShape>(static_cast<unsigned int>(dims.size()), dims.data()));
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100649 }
650 catch (const armnn::InvalidArgumentException& e)
651 {
Derek Lamberti08446972019-11-26 16:38:31 +0000652 ARMNN_LOG(fatal) << "Cannot create tensor shape: " << e.what();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100653 return EXIT_FAILURE;
654 }
655 }
656 }
657
658 // Check that threshold time is not less than zero
659 if (thresholdTime < 0)
660 {
Derek Lamberti08446972019-11-26 16:38:31 +0000661 ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100662 return EXIT_FAILURE;
663 }
664
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100665 ExecuteNetworkParams params;
666 params.m_ModelPath = modelPath.c_str();
667 params.m_IsModelBinary = isModelBinary;
668 params.m_ComputeDevices = computeDevices;
669 params.m_DynamicBackendsPath = dynamicBackendsPath;
670 params.m_InputNames = inputNamesVector;
671 params.m_InputTensorShapes = std::move(inputTensorShapes);
672 params.m_InputTensorDataFilePaths = inputTensorDataFilePathsVector;
673 params.m_InputTypes = inputTypesVector;
674 params.m_QuantizeInput = quantizeInput;
675 params.m_OutputTypes = outputTypesVector;
676 params.m_OutputNames = outputNamesVector;
677 params.m_OutputTensorFiles = outputTensorFilesVector;
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000678 params.m_DequantizeOutput = dequantizeOuput;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100679 params.m_EnableProfiling = enableProfiling;
680 params.m_EnableFp16TurboMode = enableFp16TurboMode;
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000681 params.m_EnableBf16TurboMode = enableBf16TurboMode;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100682 params.m_ThresholdTime = thresholdTime;
683 params.m_PrintIntermediate = printIntermediate;
684 params.m_SubgraphId = subgraphId;
685 params.m_EnableLayerDetails = enableLayerDetails;
686 params.m_GenerateTensorData = inputTensorDataFilePathsVector.empty();
Derek Lamberti132563c2019-12-02 16:06:40 +0000687 params.m_ParseUnsupported = parseUnsupported;
Sadik Armagana9c2ce12020-07-14 10:02:22 +0100688 params.m_InferOutputShape = inferOutputShape;
Sadik Armagana25886e2020-09-15 17:17:08 +0100689 params.m_EnableFastMath = enableFastMath;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100690
691 // Warn if ExecuteNetwork will generate dummy input data
692 if (params.m_GenerateTensorData)
693 {
Derek Lamberti08446972019-11-26 16:38:31 +0000694 ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100695 }
696
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100697 // Forward to implementation based on the parser type
698 if (modelFormat.find("armnn") != std::string::npos)
699 {
700#if defined(ARMNN_SERIALIZER)
alered01a7227ac2020-05-07 14:58:29 +0100701 return MainImpl<armnnDeserializer::IDeserializer, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100702#else
Derek Lamberti08446972019-11-26 16:38:31 +0000703 ARMNN_LOG(fatal) << "Not built with serialization support.";
alered01a7227ac2020-05-07 14:58:29 +0100704 return EXIT_FAILURE;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100705#endif
706 }
707 else if (modelFormat.find("caffe") != std::string::npos)
708 {
709#if defined(ARMNN_CAFFE_PARSER)
alered01a7227ac2020-05-07 14:58:29 +0100710 return MainImpl<armnnCaffeParser::ICaffeParser, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100711#else
Derek Lamberti08446972019-11-26 16:38:31 +0000712 ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100713 return EXIT_FAILURE;
714#endif
715 }
716 else if (modelFormat.find("onnx") != std::string::npos)
alered01a7227ac2020-05-07 14:58:29 +0100717 {
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100718#if defined(ARMNN_ONNX_PARSER)
alered01a7227ac2020-05-07 14:58:29 +0100719 return MainImpl<armnnOnnxParser::IOnnxParser, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100720#else
Derek Lamberti08446972019-11-26 16:38:31 +0000721 ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
alered01a7227ac2020-05-07 14:58:29 +0100722 return EXIT_FAILURE;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100723#endif
724 }
725 else if (modelFormat.find("tensorflow") != std::string::npos)
726 {
727#if defined(ARMNN_TF_PARSER)
alered01a7227ac2020-05-07 14:58:29 +0100728 return MainImpl<armnnTfParser::ITfParser, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100729#else
Derek Lamberti08446972019-11-26 16:38:31 +0000730 ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100731 return EXIT_FAILURE;
732#endif
733 }
734 else if(modelFormat.find("tflite") != std::string::npos)
735 {
736#if defined(ARMNN_TF_LITE_PARSER)
737 if (! isModelBinary)
738 {
alered01a7227ac2020-05-07 14:58:29 +0100739 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
740 << "'. Only 'binary' format supported for tflite files";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100741 return EXIT_FAILURE;
742 }
alered01a7227ac2020-05-07 14:58:29 +0100743 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100744#else
alered01a7227ac2020-05-07 14:58:29 +0100745 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
746 << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100747 return EXIT_FAILURE;
748#endif
749 }
750 else
751 {
alered01a7227ac2020-05-07 14:58:29 +0100752 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
753 << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100754 return EXIT_FAILURE;
755 }
756}
757
758int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000759 const bool enableProfiling, const bool enableFp16TurboMode, const bool enableBf16TurboMode,
760 const double& thresholdTime, const bool printIntermediate, bool enableLayerDetails = false,
Sadik Armagana25886e2020-09-15 17:17:08 +0100761 bool parseUnuspported = false, bool inferOutputShape = false, bool enableFastMath = false)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100762{
Jan Eilers8eb25602020-03-09 12:13:48 +0000763 IgnoreUnused(runtime);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100764 std::string modelFormat;
765 std::string modelPath;
766 std::string inputNames;
767 std::string inputTensorShapes;
768 std::string inputTensorDataFilePaths;
769 std::string outputNames;
770 std::string inputTypes;
771 std::string outputTypes;
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100772 std::string dynamicBackendsPath;
Sadik Armagan77086282019-09-02 11:46:28 +0100773 std::string outputTensorFiles;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100774
775 size_t subgraphId = 0;
776
777 const std::string backendsMessage = std::string("The preferred order of devices to run layers on by default. ")
778 + std::string("Possible choices: ")
779 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
780
781 po::options_description desc("Options");
782 try
783 {
784 desc.add_options()
785 ("model-format,f", po::value(&modelFormat),
786 "armnn-binary, caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or "
787 "tensorflow-text.")
788 ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .armnn, .caffemodel, .prototxt, "
789 ".tflite, .onnx")
790 ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
791 backendsMessage.c_str())
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100792 ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
793 "Path where to load any available dynamic backend from. "
794 "If left empty (the default), dynamic backends will not be used.")
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100795 ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
796 ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
797 "executed. Defaults to 0.")
798 ("input-tensor-shape,s", po::value(&inputTensorShapes),
799 "The shape of the input tensors in the network as a flat array of integers separated by comma. "
800 "Several shapes can be passed separating them by semicolon. "
801 "This parameter is optional, depending on the network.")
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100802 ("input-tensor-data,d", po::value(&inputTensorDataFilePaths)->default_value(""),
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100803 "Path to files containing the input data as a flat array separated by whitespace. "
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100804 "Several paths can be passed separating them by comma. If not specified, the network will be run with dummy "
805 "data (useful for profiling).")
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100806 ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
807 "If unset, defaults to \"float\" for all defined inputs. "
808 "Accepted values (float, int or qasymm8).")
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100809 ("quantize-input,q",po::bool_switch()->default_value(false),
810 "If this option is enabled, all float inputs will be quantized to qasymm8. "
811 "If unset, default to not quantized. "
812 "Accepted values (true or false)")
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100813 ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. "
814 "If unset, defaults to \"float\" for all defined outputs. "
815 "Accepted values (float, int or qasymm8).")
816 ("output-name,o", po::value(&outputNames),
Sadik Armagan77086282019-09-02 11:46:28 +0100817 "Identifier of the output tensors in the network separated by comma.")
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000818 ("dequantize-output,l",po::bool_switch()->default_value(false),
819 "If this option is enabled, all quantized outputs will be dequantized to float. "
820 "If unset, default to not get dequantized. "
821 "Accepted values (true or false)")
Sadik Armagan77086282019-09-02 11:46:28 +0100822 ("write-outputs-to-file,w", po::value(&outputTensorFiles),
823 "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
824 "If left empty (the default), the output tensors will not be written to a file.");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100825 }
826 catch (const std::exception& e)
827 {
828 // Coverity points out that default_value(...) can throw a bad_lexical_cast,
829 // and that desc.add_options() can throw boost::io::too_few_args.
830 // They really won't in any of these cases.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100831 ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
Derek Lamberti08446972019-11-26 16:38:31 +0000832 ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100833 return EXIT_FAILURE;
834 }
835
836 std::vector<const char*> clOptions;
837 clOptions.reserve(csvRow.values.size());
838 for (const std::string& value : csvRow.values)
839 {
840 clOptions.push_back(value.c_str());
841 }
842
843 po::variables_map vm;
844 try
845 {
846 po::store(po::parse_command_line(static_cast<int>(clOptions.size()), clOptions.data(), desc), vm);
847
848 po::notify(vm);
849
850 CheckOptionDependencies(vm);
851 }
852 catch (const po::error& e)
853 {
854 std::cerr << e.what() << std::endl << std::endl;
855 std::cerr << desc << std::endl;
856 return EXIT_FAILURE;
857 }
858
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100859 // Get the value of the switch arguments.
860 bool quantizeInput = vm["quantize-input"].as<bool>();
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000861 bool dequantizeOutput = vm["dequantize-output"].as<bool>();
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100862
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100863 // Get the preferred order of compute devices.
864 std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
865
866 // Remove duplicates from the list of compute devices.
867 RemoveDuplicateDevices(computeDevices);
868
869 // Check that the specified compute devices are valid.
870 std::string invalidBackends;
871 if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
872 {
Derek Lamberti08446972019-11-26 16:38:31 +0000873 ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100874 << invalidBackends;
875 return EXIT_FAILURE;
876 }
877
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100878 return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
Sadik Armagan77086282019-09-02 11:46:28 +0100879 inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000880 dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
Sadik Armagana9c2ce12020-07-14 10:02:22 +0100881 thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnuspported,
Sadik Armagana25886e2020-09-15 17:17:08 +0100882 inferOutputShape, enableFastMath);
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100883}
alered01a7227ac2020-05-07 14:58:29 +0100884
885#if defined(ARMCOMPUTECL_ENABLED)
886int RunCLTuning(const std::string& tuningPath,
887 const int tuningLevel,
888 const std::string& modelFormat,
889 const std::string& inputTensorShapes,
890 const vector<armnn::BackendId>& computeDevices,
891 const std::string& dynamicBackendsPath,
892 const std::string& modelPath,
893 const std::string& inputNames,
894 const std::string& inputTensorDataFilePaths,
895 const std::string& inputTypes,
896 bool quantizeInput,
897 const std::string& outputTypes,
898 const std::string& outputNames,
899 const std::string& outputTensorFiles,
900 bool dequantizeOutput,
901 bool enableProfiling,
902 bool enableFp16TurboMode,
903 bool enableBf16TurboMode,
904 const double& thresholdTime,
905 bool printIntermediate,
906 const size_t subgraphId,
907 bool enableLayerDetails = false,
Sadik Armagana9c2ce12020-07-14 10:02:22 +0100908 bool parseUnsupported = false,
Sadik Armagana25886e2020-09-15 17:17:08 +0100909 bool inferOutputShape = false,
910 bool enableFastMath = false)
alered01a7227ac2020-05-07 14:58:29 +0100911{
912 armnn::IRuntime::CreationOptions options;
913 options.m_BackendOptions.emplace_back(
914 armnn::BackendOptions
915 {
916 "GpuAcc",
917 {
918 {"TuningLevel", tuningLevel},
919 {"TuningFile", tuningPath.c_str()},
920 {"KernelProfilingEnabled", enableProfiling}
921 }
922 }
923 );
924
925 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
926 const auto start_time = armnn::GetTimeNow();
927
928 ARMNN_LOG(info) << "Tuning run...\n";
929 int state = RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
930 inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
931 outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
Sadik Armagana9c2ce12020-07-14 10:02:22 +0100932 thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported,
Sadik Armagana25886e2020-09-15 17:17:08 +0100933 inferOutputShape, enableFastMath, 1, runtime);
alered01a7227ac2020-05-07 14:58:29 +0100934
935 ARMNN_LOG(info) << "Tuning time: " << std::setprecision(2)
936 << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
937
938 return state;
939}
940#endif