blob: 31f37916b8ab481fbc0f47ef719944bb25b21212 [file] [log] [blame]
Laurent Carlier749294b2020-06-01 09:03:17 +01001//
Francis Murtaghbee4bc92019-06-18 12:30:37 +01002// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5#include <armnn/ArmNN.hpp>
6#include <armnn/TypesUtils.hpp>
alered01a7227ac2020-05-07 14:58:29 +01007#include <armnn/utility/Timer.hpp>
Francis Murtaghbee4bc92019-06-18 12:30:37 +01008
9#if defined(ARMNN_SERIALIZER)
10#include "armnnDeserializer/IDeserializer.hpp"
11#endif
12#if defined(ARMNN_CAFFE_PARSER)
13#include "armnnCaffeParser/ICaffeParser.hpp"
14#endif
15#if defined(ARMNN_TF_PARSER)
16#include "armnnTfParser/ITfParser.hpp"
17#endif
18#if defined(ARMNN_TF_LITE_PARSER)
19#include "armnnTfLiteParser/ITfLiteParser.hpp"
20#endif
21#if defined(ARMNN_ONNX_PARSER)
22#include "armnnOnnxParser/IOnnxParser.hpp"
23#endif
24#include "CsvReader.hpp"
25#include "../InferenceTest.hpp"
26
Francis Murtaghbee4bc92019-06-18 12:30:37 +010027#include <Profiling.hpp>
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +010028#include <ResolveType.hpp>
Francis Murtaghbee4bc92019-06-18 12:30:37 +010029
Francis Murtaghbee4bc92019-06-18 12:30:37 +010030#include <boost/program_options.hpp>
31#include <boost/variant.hpp>
32
33#include <iostream>
34#include <fstream>
35#include <functional>
36#include <future>
37#include <algorithm>
38#include <iterator>
39
40namespace
41{
42
43// Configure boost::program_options for command-line parsing and validation.
44namespace po = boost::program_options;
45
46template<typename T, typename TParseElementFunc>
47std::vector<T> ParseArrayImpl(std::istream& stream, TParseElementFunc parseElementFunc, const char * chars = "\t ,:")
48{
49 std::vector<T> result;
50 // Processes line-by-line.
51 std::string line;
52 while (std::getline(stream, line))
53 {
David Monahana8837bf2020-04-16 10:01:56 +010054 std::vector<std::string> tokens = armnn::stringUtils::StringTokenizer(line, chars);
Francis Murtaghbee4bc92019-06-18 12:30:37 +010055 for (const std::string& token : tokens)
56 {
57 if (!token.empty()) // See https://stackoverflow.com/questions/10437406/
58 {
59 try
60 {
61 result.push_back(parseElementFunc(token));
62 }
63 catch (const std::exception&)
64 {
Derek Lamberti08446972019-11-26 16:38:31 +000065 ARMNN_LOG(error) << "'" << token << "' is not a valid number. It has been ignored.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +010066 }
67 }
68 }
69 }
70
71 return result;
72}
73
74bool CheckOption(const po::variables_map& vm,
75 const char* option)
76{
77 // Check that the given option is valid.
78 if (option == nullptr)
79 {
80 return false;
81 }
82
83 // Check whether 'option' is provided.
84 return vm.find(option) != vm.end();
85}
86
87void CheckOptionDependency(const po::variables_map& vm,
88 const char* option,
89 const char* required)
90{
91 // Check that the given options are valid.
92 if (option == nullptr || required == nullptr)
93 {
94 throw po::error("Invalid option to check dependency for");
95 }
96
97 // Check that if 'option' is provided, 'required' is also provided.
98 if (CheckOption(vm, option) && !vm[option].defaulted())
99 {
100 if (CheckOption(vm, required) == 0 || vm[required].defaulted())
101 {
102 throw po::error(std::string("Option '") + option + "' requires option '" + required + "'.");
103 }
104 }
105}
106
107void CheckOptionDependencies(const po::variables_map& vm)
108{
109 CheckOptionDependency(vm, "model-path", "model-format");
110 CheckOptionDependency(vm, "model-path", "input-name");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100111 CheckOptionDependency(vm, "model-path", "output-name");
112 CheckOptionDependency(vm, "input-tensor-shape", "model-path");
113}
114
115template<armnn::DataType NonQuantizedType>
116auto ParseDataArray(std::istream & stream);
117
118template<armnn::DataType QuantizedType>
119auto ParseDataArray(std::istream& stream,
120 const float& quantizationScale,
121 const int32_t& quantizationOffset);
122
123template<>
124auto ParseDataArray<armnn::DataType::Float32>(std::istream & stream)
125{
126 return ParseArrayImpl<float>(stream, [](const std::string& s) { return std::stof(s); });
127}
128
129template<>
130auto ParseDataArray<armnn::DataType::Signed32>(std::istream & stream)
131{
132 return ParseArrayImpl<int>(stream, [](const std::string & s) { return std::stoi(s); });
133}
134
135template<>
Derek Lambertif90c56d2020-01-10 17:14:08 +0000136auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream)
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100137{
138 return ParseArrayImpl<uint8_t>(stream,
139 [](const std::string& s) { return boost::numeric_cast<uint8_t>(std::stoi(s)); });
140}
141
142template<>
Derek Lambertif90c56d2020-01-10 17:14:08 +0000143auto ParseDataArray<armnn::DataType::QAsymmU8>(std::istream& stream,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100144 const float& quantizationScale,
145 const int32_t& quantizationOffset)
146{
147 return ParseArrayImpl<uint8_t>(stream,
148 [&quantizationScale, &quantizationOffset](const std::string & s)
149 {
150 return boost::numeric_cast<uint8_t>(
Rob Hughes93667b12019-09-23 16:24:05 +0100151 armnn::Quantize<uint8_t>(std::stof(s),
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100152 quantizationScale,
153 quantizationOffset));
154 });
155}
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100156std::vector<unsigned int> ParseArray(std::istream& stream)
157{
158 return ParseArrayImpl<unsigned int>(stream,
159 [](const std::string& s) { return boost::numeric_cast<unsigned int>(std::stoi(s)); });
160}
161
162std::vector<std::string> ParseStringList(const std::string & inputString, const char * delimiter)
163{
164 std::stringstream stream(inputString);
David Monahana8837bf2020-04-16 10:01:56 +0100165 return ParseArrayImpl<std::string>(stream, [](const std::string& s) {
166 return armnn::stringUtils::StringTrimCopy(s); }, delimiter);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100167}
168
169void RemoveDuplicateDevices(std::vector<armnn::BackendId>& computeDevices)
170{
171 // Mark the duplicate devices as 'Undefined'.
172 for (auto i = computeDevices.begin(); i != computeDevices.end(); ++i)
173 {
174 for (auto j = std::next(i); j != computeDevices.end(); ++j)
175 {
176 if (*j == *i)
177 {
178 *j = armnn::Compute::Undefined;
179 }
180 }
181 }
182
183 // Remove 'Undefined' devices.
184 computeDevices.erase(std::remove(computeDevices.begin(), computeDevices.end(), armnn::Compute::Undefined),
185 computeDevices.end());
186}
187
188struct TensorPrinter : public boost::static_visitor<>
189{
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000190 TensorPrinter(const std::string& binding,
191 const armnn::TensorInfo& info,
192 const std::string& outputTensorFile,
193 bool dequantizeOutput)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100194 : m_OutputBinding(binding)
195 , m_Scale(info.GetQuantizationScale())
196 , m_Offset(info.GetQuantizationOffset())
Sadik Armagan77086282019-09-02 11:46:28 +0100197 , m_OutputTensorFile(outputTensorFile)
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000198 , m_DequantizeOutput(dequantizeOutput)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100199 {}
200
201 void operator()(const std::vector<float>& values)
202 {
Sadik Armagan77086282019-09-02 11:46:28 +0100203 ForEachValue(values, [](float value)
204 {
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100205 printf("%f ", value);
206 });
Sadik Armagan77086282019-09-02 11:46:28 +0100207 WriteToFile(values);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100208 }
209
210 void operator()(const std::vector<uint8_t>& values)
211 {
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000212 if(m_DequantizeOutput)
213 {
214 auto& scale = m_Scale;
215 auto& offset = m_Offset;
216 std::vector<float> dequantizedValues;
217 ForEachValue(values, [&scale, &offset, &dequantizedValues](uint8_t value)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100218 {
Sadik Armagan77086282019-09-02 11:46:28 +0100219 auto dequantizedValue = armnn::Dequantize(value, scale, offset);
220 printf("%f ", dequantizedValue);
221 dequantizedValues.push_back(dequantizedValue);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100222 });
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000223 WriteToFile(dequantizedValues);
224 }
225 else
226 {
227 const std::vector<int> intValues(values.begin(), values.end());
228 operator()(intValues);
229 }
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100230 }
231
232 void operator()(const std::vector<int>& values)
233 {
234 ForEachValue(values, [](int value)
235 {
236 printf("%d ", value);
237 });
Sadik Armagan77086282019-09-02 11:46:28 +0100238 WriteToFile(values);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100239 }
240
241private:
242 template<typename Container, typename Delegate>
243 void ForEachValue(const Container& c, Delegate delegate)
244 {
245 std::cout << m_OutputBinding << ": ";
246 for (const auto& value : c)
247 {
248 delegate(value);
249 }
250 printf("\n");
251 }
252
Sadik Armagan77086282019-09-02 11:46:28 +0100253 template<typename T>
254 void WriteToFile(const std::vector<T>& values)
255 {
256 if (!m_OutputTensorFile.empty())
257 {
258 std::ofstream outputTensorFile;
259 outputTensorFile.open(m_OutputTensorFile, std::ofstream::out | std::ofstream::trunc);
260 if (outputTensorFile.is_open())
261 {
262 outputTensorFile << m_OutputBinding << ": ";
263 std::copy(values.begin(), values.end(), std::ostream_iterator<T>(outputTensorFile, " "));
264 }
265 else
266 {
Derek Lamberti08446972019-11-26 16:38:31 +0000267 ARMNN_LOG(info) << "Output Tensor File: " << m_OutputTensorFile << " could not be opened!";
Sadik Armagan77086282019-09-02 11:46:28 +0100268 }
269 outputTensorFile.close();
270 }
271 }
272
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100273 std::string m_OutputBinding;
274 float m_Scale=0.0f;
275 int m_Offset=0;
Sadik Armagan77086282019-09-02 11:46:28 +0100276 std::string m_OutputTensorFile;
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000277 bool m_DequantizeOutput = false;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100278};
279
280
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100281
282template<armnn::DataType ArmnnType, typename T = armnn::ResolveType<ArmnnType>>
283std::vector<T> GenerateDummyTensorData(unsigned int numElements)
284{
285 return std::vector<T>(numElements, static_cast<T>(0));
286}
287
288using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
289using QuantizationParams = std::pair<float, int32_t>;
290
291void PopulateTensorWithData(TContainer& tensorData,
292 unsigned int numElements,
293 const std::string& dataTypeStr,
294 const armnn::Optional<QuantizationParams>& qParams,
295 const armnn::Optional<std::string>& dataFile)
296{
297 const bool readFromFile = dataFile.has_value() && !dataFile.value().empty();
298 const bool quantizeData = qParams.has_value();
299
300 std::ifstream inputTensorFile;
301 if (readFromFile)
302 {
303 inputTensorFile = std::ifstream(dataFile.value());
304 }
305
306 if (dataTypeStr.compare("float") == 0)
307 {
308 if (quantizeData)
309 {
310 const float qScale = qParams.value().first;
311 const int qOffset = qParams.value().second;
312
313 tensorData = readFromFile ?
Derek Lambertif90c56d2020-01-10 17:14:08 +0000314 ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile, qScale, qOffset) :
315 GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100316 }
317 else
318 {
319 tensorData = readFromFile ?
320 ParseDataArray<armnn::DataType::Float32>(inputTensorFile) :
321 GenerateDummyTensorData<armnn::DataType::Float32>(numElements);
322 }
323 }
324 else if (dataTypeStr.compare("int") == 0)
325 {
326 tensorData = readFromFile ?
327 ParseDataArray<armnn::DataType::Signed32>(inputTensorFile) :
328 GenerateDummyTensorData<armnn::DataType::Signed32>(numElements);
329 }
330 else if (dataTypeStr.compare("qasymm8") == 0)
331 {
332 tensorData = readFromFile ?
Derek Lambertif90c56d2020-01-10 17:14:08 +0000333 ParseDataArray<armnn::DataType::QAsymmU8>(inputTensorFile) :
334 GenerateDummyTensorData<armnn::DataType::QAsymmU8>(numElements);
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100335 }
336 else
337 {
338 std::string errorMessage = "Unsupported tensor data type " + dataTypeStr;
Derek Lamberti08446972019-11-26 16:38:31 +0000339 ARMNN_LOG(fatal) << errorMessage;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100340
341 inputTensorFile.close();
342 throw armnn::Exception(errorMessage);
343 }
344
345 inputTensorFile.close();
346}
347
348} // anonymous namespace
349
350bool generateTensorData = true;
351
352struct ExecuteNetworkParams
353{
354 using TensorShapePtr = std::unique_ptr<armnn::TensorShape>;
355
356 const char* m_ModelPath;
357 bool m_IsModelBinary;
358 std::vector<armnn::BackendId> m_ComputeDevices;
359 std::string m_DynamicBackendsPath;
360 std::vector<string> m_InputNames;
361 std::vector<TensorShapePtr> m_InputTensorShapes;
362 std::vector<string> m_InputTensorDataFilePaths;
363 std::vector<string> m_InputTypes;
364 bool m_QuantizeInput;
365 std::vector<string> m_OutputTypes;
366 std::vector<string> m_OutputNames;
367 std::vector<string> m_OutputTensorFiles;
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000368 bool m_DequantizeOutput;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100369 bool m_EnableProfiling;
370 bool m_EnableFp16TurboMode;
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000371 bool m_EnableBf16TurboMode;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100372 double m_ThresholdTime;
373 bool m_PrintIntermediate;
374 size_t m_SubgraphId;
375 bool m_EnableLayerDetails = false;
376 bool m_GenerateTensorData;
Derek Lamberti132563c2019-12-02 16:06:40 +0000377 bool m_ParseUnsupported = false;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100378};
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100379
380template<typename TParser, typename TDataType>
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100381int MainImpl(const ExecuteNetworkParams& params,
alered01a7227ac2020-05-07 14:58:29 +0100382 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr,
383 size_t iterations = 1)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100384{
385 using TContainer = boost::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>;
386
387 std::vector<TContainer> inputDataContainers;
388
389 try
390 {
391 // Creates an InferenceModel, which will parse the model and load it into an IRuntime.
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100392 typename InferenceModel<TParser, TDataType>::Params inferenceModelParams;
393 inferenceModelParams.m_ModelPath = params.m_ModelPath;
394 inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary;
395 inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices;
396 inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath;
397 inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate;
398 inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails;
Derek Lamberti167c0822019-12-03 09:37:32 +0000399 inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100400
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100401 for(const std::string& inputName: params.m_InputNames)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100402 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100403 inferenceModelParams.m_InputBindings.push_back(inputName);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100404 }
405
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100406 for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100407 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100408 inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100409 }
410
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100411 for(const std::string& outputName: params.m_OutputNames)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100412 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100413 inferenceModelParams.m_OutputBindings.push_back(outputName);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100414 }
415
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100416 inferenceModelParams.m_SubgraphId = params.m_SubgraphId;
417 inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode;
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000418 inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100419
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100420 InferenceModel<TParser, TDataType> model(inferenceModelParams,
421 params.m_EnableProfiling,
422 params.m_DynamicBackendsPath,
423 runtime);
424
425 const size_t numInputs = inferenceModelParams.m_InputBindings.size();
426 for(unsigned int i = 0; i < numInputs; ++i)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100427 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100428 armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ?
429 armnn::MakeOptional<QuantizationParams>(model.GetInputQuantizationParams()) :
430 armnn::EmptyOptional();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100431
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100432 armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ?
433 armnn::EmptyOptional() :
434 armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[i]);
435
436 unsigned int numElements = model.GetInputSize(i);
437 if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i])
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100438 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100439 // If the user has provided a tensor shape for the current input,
440 // override numElements
441 numElements = params.m_InputTensorShapes[i]->GetNumElements();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100442 }
443
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100444 TContainer tensorData;
445 PopulateTensorWithData(tensorData,
446 numElements,
447 params.m_InputTypes[i],
448 qParams,
449 dataFile);
450
451 inputDataContainers.push_back(tensorData);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100452 }
453
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100454 const size_t numOutputs = inferenceModelParams.m_OutputBindings.size();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100455 std::vector<TContainer> outputDataContainers;
456
457 for (unsigned int i = 0; i < numOutputs; ++i)
458 {
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100459 if (params.m_OutputTypes[i].compare("float") == 0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100460 {
461 outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i)));
462 }
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100463 else if (params.m_OutputTypes[i].compare("int") == 0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100464 {
465 outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i)));
466 }
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100467 else if (params.m_OutputTypes[i].compare("qasymm8") == 0)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100468 {
469 outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i)));
470 }
471 else
472 {
Derek Lamberti08446972019-11-26 16:38:31 +0000473 ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". ";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100474 return EXIT_FAILURE;
475 }
476 }
477
alered01a7227ac2020-05-07 14:58:29 +0100478 for (size_t x = 0; x < iterations; x++)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100479 {
alered01a7227ac2020-05-07 14:58:29 +0100480 // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds)
481 auto inference_duration = model.Run(inputDataContainers, outputDataContainers);
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100482
alered01a7227ac2020-05-07 14:58:29 +0100483 if (params.m_GenerateTensorData)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100484 {
alered01a7227ac2020-05-07 14:58:29 +0100485 ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful";
486 }
487
488 // Print output tensors
489 const auto& infosOut = model.GetOutputBindingInfos();
490 for (size_t i = 0; i < numOutputs; i++)
491 {
492 const armnn::TensorInfo& infoOut = infosOut[i].second;
493 auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i];
494
495 TensorPrinter printer(inferenceModelParams.m_OutputBindings[i],
496 infoOut,
497 outputTensorFile,
498 params.m_DequantizeOutput);
499 boost::apply_visitor(printer, outputDataContainers[i]);
500 }
501
502 ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2)
503 << std::fixed << inference_duration.count() << " ms\n";
504
505 // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line
506 if (params.m_ThresholdTime != 0.0)
507 {
508 ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2)
509 << std::fixed << params.m_ThresholdTime << " ms";
510 auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count();
511 ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2)
512 << std::fixed << thresholdMinusInference << " ms" << "\n";
513
514 if (thresholdMinusInference < 0)
515 {
516 std::string errorMessage = "Elapsed inference time is greater than provided threshold time.";
517 ARMNN_LOG(fatal) << errorMessage;
518 }
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100519 }
520 }
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100521 }
Pavel Macenauer855a47b2020-05-26 10:54:22 +0000522 catch (const armnn::Exception& e)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100523 {
Derek Lamberti08446972019-11-26 16:38:31 +0000524 ARMNN_LOG(fatal) << "Armnn Error: " << e.what();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100525 return EXIT_FAILURE;
526 }
527
528 return EXIT_SUCCESS;
529}
530
531// This will run a test
532int RunTest(const std::string& format,
533 const std::string& inputTensorShapesStr,
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100534 const vector<armnn::BackendId>& computeDevices,
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100535 const std::string& dynamicBackendsPath,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100536 const std::string& path,
537 const std::string& inputNames,
538 const std::string& inputTensorDataFilePaths,
539 const std::string& inputTypes,
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100540 bool quantizeInput,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100541 const std::string& outputTypes,
542 const std::string& outputNames,
Sadik Armagan77086282019-09-02 11:46:28 +0100543 const std::string& outputTensorFiles,
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000544 bool dequantizeOuput,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100545 bool enableProfiling,
546 bool enableFp16TurboMode,
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000547 bool enableBf16TurboMode,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100548 const double& thresholdTime,
Matthew Jackson54658b92019-08-27 15:35:59 +0100549 bool printIntermediate,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100550 const size_t subgraphId,
Andre Ghattas23ae2ea2019-08-07 12:18:38 +0100551 bool enableLayerDetails = false,
Derek Lamberti132563c2019-12-02 16:06:40 +0000552 bool parseUnsupported = false,
alered01a7227ac2020-05-07 14:58:29 +0100553 const size_t iterations = 1,
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100554 const std::shared_ptr<armnn::IRuntime>& runtime = nullptr)
555{
David Monahana8837bf2020-04-16 10:01:56 +0100556 std::string modelFormat = armnn::stringUtils::StringTrimCopy(format);
557 std::string modelPath = armnn::stringUtils::StringTrimCopy(path);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100558 std::vector<std::string> inputNamesVector = ParseStringList(inputNames, ",");
Francis Murtagh1555cbd2019-10-08 14:47:46 +0100559 std::vector<std::string> inputTensorShapesVector = ParseStringList(inputTensorShapesStr, ":");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100560 std::vector<std::string> inputTensorDataFilePathsVector = ParseStringList(
561 inputTensorDataFilePaths, ",");
562 std::vector<std::string> outputNamesVector = ParseStringList(outputNames, ",");
563 std::vector<std::string> inputTypesVector = ParseStringList(inputTypes, ",");
564 std::vector<std::string> outputTypesVector = ParseStringList(outputTypes, ",");
Sadik Armagan77086282019-09-02 11:46:28 +0100565 std::vector<std::string> outputTensorFilesVector = ParseStringList(outputTensorFiles, ",");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100566
567 // Parse model binary flag from the model-format string we got from the command-line
568 bool isModelBinary;
569 if (modelFormat.find("bin") != std::string::npos)
570 {
571 isModelBinary = true;
572 }
573 else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
574 {
575 isModelBinary = false;
576 }
577 else
578 {
Derek Lamberti08446972019-11-26 16:38:31 +0000579 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat << "'. Please include 'binary' or 'text'";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100580 return EXIT_FAILURE;
581 }
582
583 if ((inputTensorShapesVector.size() != 0) && (inputTensorShapesVector.size() != inputNamesVector.size()))
584 {
Derek Lamberti08446972019-11-26 16:38:31 +0000585 ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100586 return EXIT_FAILURE;
587 }
588
589 if ((inputTensorDataFilePathsVector.size() != 0) &&
590 (inputTensorDataFilePathsVector.size() != inputNamesVector.size()))
591 {
Derek Lamberti08446972019-11-26 16:38:31 +0000592 ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100593 return EXIT_FAILURE;
594 }
595
Sadik Armagan77086282019-09-02 11:46:28 +0100596 if ((outputTensorFilesVector.size() != 0) &&
597 (outputTensorFilesVector.size() != outputNamesVector.size()))
598 {
Derek Lamberti08446972019-11-26 16:38:31 +0000599 ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements.";
Sadik Armagan77086282019-09-02 11:46:28 +0100600 return EXIT_FAILURE;
601 }
602
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100603 if (inputTypesVector.size() == 0)
604 {
605 //Defaults the value of all inputs to "float"
606 inputTypesVector.assign(inputNamesVector.size(), "float");
607 }
Matteo Martincigh08b51862019-08-29 16:26:10 +0100608 else if ((inputTypesVector.size() != 0) && (inputTypesVector.size() != inputNamesVector.size()))
609 {
Derek Lamberti08446972019-11-26 16:38:31 +0000610 ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
Matteo Martincigh08b51862019-08-29 16:26:10 +0100611 return EXIT_FAILURE;
612 }
613
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100614 if (outputTypesVector.size() == 0)
615 {
616 //Defaults the value of all outputs to "float"
617 outputTypesVector.assign(outputNamesVector.size(), "float");
618 }
Matteo Martincigh08b51862019-08-29 16:26:10 +0100619 else if ((outputTypesVector.size() != 0) && (outputTypesVector.size() != outputNamesVector.size()))
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100620 {
Derek Lamberti08446972019-11-26 16:38:31 +0000621 ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100622 return EXIT_FAILURE;
623 }
624
625 // Parse input tensor shape from the string we got from the command-line.
626 std::vector<std::unique_ptr<armnn::TensorShape>> inputTensorShapes;
627
628 if (!inputTensorShapesVector.empty())
629 {
630 inputTensorShapes.reserve(inputTensorShapesVector.size());
631
632 for(const std::string& shape : inputTensorShapesVector)
633 {
634 std::stringstream ss(shape);
635 std::vector<unsigned int> dims = ParseArray(ss);
636
637 try
638 {
639 // Coverity fix: An exception of type armnn::InvalidArgumentException is thrown and never caught.
Rob Hughesbb46dde2020-05-20 15:27:37 +0100640 inputTensorShapes.push_back(
641 std::make_unique<armnn::TensorShape>(static_cast<unsigned int>(dims.size()), dims.data()));
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100642 }
643 catch (const armnn::InvalidArgumentException& e)
644 {
Derek Lamberti08446972019-11-26 16:38:31 +0000645 ARMNN_LOG(fatal) << "Cannot create tensor shape: " << e.what();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100646 return EXIT_FAILURE;
647 }
648 }
649 }
650
651 // Check that threshold time is not less than zero
652 if (thresholdTime < 0)
653 {
Derek Lamberti08446972019-11-26 16:38:31 +0000654 ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100655 return EXIT_FAILURE;
656 }
657
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100658 ExecuteNetworkParams params;
659 params.m_ModelPath = modelPath.c_str();
660 params.m_IsModelBinary = isModelBinary;
661 params.m_ComputeDevices = computeDevices;
662 params.m_DynamicBackendsPath = dynamicBackendsPath;
663 params.m_InputNames = inputNamesVector;
664 params.m_InputTensorShapes = std::move(inputTensorShapes);
665 params.m_InputTensorDataFilePaths = inputTensorDataFilePathsVector;
666 params.m_InputTypes = inputTypesVector;
667 params.m_QuantizeInput = quantizeInput;
668 params.m_OutputTypes = outputTypesVector;
669 params.m_OutputNames = outputNamesVector;
670 params.m_OutputTensorFiles = outputTensorFilesVector;
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000671 params.m_DequantizeOutput = dequantizeOuput;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100672 params.m_EnableProfiling = enableProfiling;
673 params.m_EnableFp16TurboMode = enableFp16TurboMode;
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000674 params.m_EnableBf16TurboMode = enableBf16TurboMode;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100675 params.m_ThresholdTime = thresholdTime;
676 params.m_PrintIntermediate = printIntermediate;
677 params.m_SubgraphId = subgraphId;
678 params.m_EnableLayerDetails = enableLayerDetails;
679 params.m_GenerateTensorData = inputTensorDataFilePathsVector.empty();
Derek Lamberti132563c2019-12-02 16:06:40 +0000680 params.m_ParseUnsupported = parseUnsupported;
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100681
682 // Warn if ExecuteNetwork will generate dummy input data
683 if (params.m_GenerateTensorData)
684 {
Derek Lamberti08446972019-11-26 16:38:31 +0000685 ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100686 }
687
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100688 // Forward to implementation based on the parser type
689 if (modelFormat.find("armnn") != std::string::npos)
690 {
691#if defined(ARMNN_SERIALIZER)
alered01a7227ac2020-05-07 14:58:29 +0100692 return MainImpl<armnnDeserializer::IDeserializer, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100693#else
Derek Lamberti08446972019-11-26 16:38:31 +0000694 ARMNN_LOG(fatal) << "Not built with serialization support.";
alered01a7227ac2020-05-07 14:58:29 +0100695 return EXIT_FAILURE;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100696#endif
697 }
698 else if (modelFormat.find("caffe") != std::string::npos)
699 {
700#if defined(ARMNN_CAFFE_PARSER)
alered01a7227ac2020-05-07 14:58:29 +0100701 return MainImpl<armnnCaffeParser::ICaffeParser, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100702#else
Derek Lamberti08446972019-11-26 16:38:31 +0000703 ARMNN_LOG(fatal) << "Not built with Caffe parser support.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100704 return EXIT_FAILURE;
705#endif
706 }
707 else if (modelFormat.find("onnx") != std::string::npos)
alered01a7227ac2020-05-07 14:58:29 +0100708 {
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100709#if defined(ARMNN_ONNX_PARSER)
alered01a7227ac2020-05-07 14:58:29 +0100710 return MainImpl<armnnOnnxParser::IOnnxParser, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100711#else
Derek Lamberti08446972019-11-26 16:38:31 +0000712 ARMNN_LOG(fatal) << "Not built with Onnx parser support.";
alered01a7227ac2020-05-07 14:58:29 +0100713 return EXIT_FAILURE;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100714#endif
715 }
716 else if (modelFormat.find("tensorflow") != std::string::npos)
717 {
718#if defined(ARMNN_TF_PARSER)
alered01a7227ac2020-05-07 14:58:29 +0100719 return MainImpl<armnnTfParser::ITfParser, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100720#else
Derek Lamberti08446972019-11-26 16:38:31 +0000721 ARMNN_LOG(fatal) << "Not built with Tensorflow parser support.";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100722 return EXIT_FAILURE;
723#endif
724 }
725 else if(modelFormat.find("tflite") != std::string::npos)
726 {
727#if defined(ARMNN_TF_LITE_PARSER)
728 if (! isModelBinary)
729 {
alered01a7227ac2020-05-07 14:58:29 +0100730 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
731 << "'. Only 'binary' format supported for tflite files";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100732 return EXIT_FAILURE;
733 }
alered01a7227ac2020-05-07 14:58:29 +0100734 return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(params, runtime, iterations);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100735#else
alered01a7227ac2020-05-07 14:58:29 +0100736 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
737 << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100738 return EXIT_FAILURE;
739#endif
740 }
741 else
742 {
alered01a7227ac2020-05-07 14:58:29 +0100743 ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat
744 << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'";
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100745 return EXIT_FAILURE;
746 }
747}
748
749int RunCsvTest(const armnnUtils::CsvRow &csvRow, const std::shared_ptr<armnn::IRuntime>& runtime,
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000750 const bool enableProfiling, const bool enableFp16TurboMode, const bool enableBf16TurboMode,
751 const double& thresholdTime, const bool printIntermediate, bool enableLayerDetails = false,
752 bool parseUnuspported = false)
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100753{
Jan Eilers8eb25602020-03-09 12:13:48 +0000754 IgnoreUnused(runtime);
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100755 std::string modelFormat;
756 std::string modelPath;
757 std::string inputNames;
758 std::string inputTensorShapes;
759 std::string inputTensorDataFilePaths;
760 std::string outputNames;
761 std::string inputTypes;
762 std::string outputTypes;
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100763 std::string dynamicBackendsPath;
Sadik Armagan77086282019-09-02 11:46:28 +0100764 std::string outputTensorFiles;
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100765
766 size_t subgraphId = 0;
767
768 const std::string backendsMessage = std::string("The preferred order of devices to run layers on by default. ")
769 + std::string("Possible choices: ")
770 + armnn::BackendRegistryInstance().GetBackendIdsAsString();
771
772 po::options_description desc("Options");
773 try
774 {
775 desc.add_options()
776 ("model-format,f", po::value(&modelFormat),
777 "armnn-binary, caffe-binary, caffe-text, tflite-binary, onnx-binary, onnx-text, tensorflow-binary or "
778 "tensorflow-text.")
779 ("model-path,m", po::value(&modelPath), "Path to model file, e.g. .armnn, .caffemodel, .prototxt, "
780 ".tflite, .onnx")
781 ("compute,c", po::value<std::vector<armnn::BackendId>>()->multitoken(),
782 backendsMessage.c_str())
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100783 ("dynamic-backends-path,b", po::value(&dynamicBackendsPath),
784 "Path where to load any available dynamic backend from. "
785 "If left empty (the default), dynamic backends will not be used.")
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100786 ("input-name,i", po::value(&inputNames), "Identifier of the input tensors in the network separated by comma.")
787 ("subgraph-number,n", po::value<size_t>(&subgraphId)->default_value(0), "Id of the subgraph to be "
788 "executed. Defaults to 0.")
789 ("input-tensor-shape,s", po::value(&inputTensorShapes),
790 "The shape of the input tensors in the network as a flat array of integers separated by comma. "
791 "Several shapes can be passed separating them by semicolon. "
792 "This parameter is optional, depending on the network.")
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100793 ("input-tensor-data,d", po::value(&inputTensorDataFilePaths)->default_value(""),
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100794 "Path to files containing the input data as a flat array separated by whitespace. "
Aron Virginas-Tarc82c8732019-10-24 17:07:43 +0100795 "Several paths can be passed separating them by comma. If not specified, the network will be run with dummy "
796 "data (useful for profiling).")
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100797 ("input-type,y",po::value(&inputTypes), "The type of the input tensors in the network separated by comma. "
798 "If unset, defaults to \"float\" for all defined inputs. "
799 "Accepted values (float, int or qasymm8).")
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100800 ("quantize-input,q",po::bool_switch()->default_value(false),
801 "If this option is enabled, all float inputs will be quantized to qasymm8. "
802 "If unset, default to not quantized. "
803 "Accepted values (true or false)")
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100804 ("output-type,z",po::value(&outputTypes), "The type of the output tensors in the network separated by comma. "
805 "If unset, defaults to \"float\" for all defined outputs. "
806 "Accepted values (float, int or qasymm8).")
807 ("output-name,o", po::value(&outputNames),
Sadik Armagan77086282019-09-02 11:46:28 +0100808 "Identifier of the output tensors in the network separated by comma.")
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000809 ("dequantize-output,l",po::bool_switch()->default_value(false),
810 "If this option is enabled, all quantized outputs will be dequantized to float. "
811 "If unset, default to not get dequantized. "
812 "Accepted values (true or false)")
Sadik Armagan77086282019-09-02 11:46:28 +0100813 ("write-outputs-to-file,w", po::value(&outputTensorFiles),
814 "Comma-separated list of output file paths keyed with the binding-id of the output slot. "
815 "If left empty (the default), the output tensors will not be written to a file.");
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100816 }
817 catch (const std::exception& e)
818 {
819 // Coverity points out that default_value(...) can throw a bad_lexical_cast,
820 // and that desc.add_options() can throw boost::io::too_few_args.
821 // They really won't in any of these cases.
Narumol Prangnawaratac2770a2020-04-01 16:51:23 +0100822 ARMNN_ASSERT_MSG(false, "Caught unexpected exception");
Derek Lamberti08446972019-11-26 16:38:31 +0000823 ARMNN_LOG(fatal) << "Fatal internal error: " << e.what();
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100824 return EXIT_FAILURE;
825 }
826
827 std::vector<const char*> clOptions;
828 clOptions.reserve(csvRow.values.size());
829 for (const std::string& value : csvRow.values)
830 {
831 clOptions.push_back(value.c_str());
832 }
833
834 po::variables_map vm;
835 try
836 {
837 po::store(po::parse_command_line(static_cast<int>(clOptions.size()), clOptions.data(), desc), vm);
838
839 po::notify(vm);
840
841 CheckOptionDependencies(vm);
842 }
843 catch (const po::error& e)
844 {
845 std::cerr << e.what() << std::endl << std::endl;
846 std::cerr << desc << std::endl;
847 return EXIT_FAILURE;
848 }
849
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100850 // Get the value of the switch arguments.
851 bool quantizeInput = vm["quantize-input"].as<bool>();
Georgios Pinitas50311ba2020-02-18 13:25:23 +0000852 bool dequantizeOutput = vm["dequantize-output"].as<bool>();
Narumol Prangnawarat610256f2019-06-26 15:10:46 +0100853
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100854 // Get the preferred order of compute devices.
855 std::vector<armnn::BackendId> computeDevices = vm["compute"].as<std::vector<armnn::BackendId>>();
856
857 // Remove duplicates from the list of compute devices.
858 RemoveDuplicateDevices(computeDevices);
859
860 // Check that the specified compute devices are valid.
861 std::string invalidBackends;
862 if (!CheckRequestedBackendsAreValid(computeDevices, armnn::Optional<std::string&>(invalidBackends)))
863 {
Derek Lamberti08446972019-11-26 16:38:31 +0000864 ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
Francis Murtaghbee4bc92019-06-18 12:30:37 +0100865 << invalidBackends;
866 return EXIT_FAILURE;
867 }
868
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100869 return RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
Sadik Armagan77086282019-09-02 11:46:28 +0100870 inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames, outputTensorFiles,
Narumol Prangnawaratd8cc8112020-03-24 13:54:05 +0000871 dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
872 thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnuspported);
Matteo Martincigh00dda4a2019-08-14 11:42:30 +0100873}
alered01a7227ac2020-05-07 14:58:29 +0100874
875#if defined(ARMCOMPUTECL_ENABLED)
876int RunCLTuning(const std::string& tuningPath,
877 const int tuningLevel,
878 const std::string& modelFormat,
879 const std::string& inputTensorShapes,
880 const vector<armnn::BackendId>& computeDevices,
881 const std::string& dynamicBackendsPath,
882 const std::string& modelPath,
883 const std::string& inputNames,
884 const std::string& inputTensorDataFilePaths,
885 const std::string& inputTypes,
886 bool quantizeInput,
887 const std::string& outputTypes,
888 const std::string& outputNames,
889 const std::string& outputTensorFiles,
890 bool dequantizeOutput,
891 bool enableProfiling,
892 bool enableFp16TurboMode,
893 bool enableBf16TurboMode,
894 const double& thresholdTime,
895 bool printIntermediate,
896 const size_t subgraphId,
897 bool enableLayerDetails = false,
898 bool parseUnsupported = false)
899{
900 armnn::IRuntime::CreationOptions options;
901 options.m_BackendOptions.emplace_back(
902 armnn::BackendOptions
903 {
904 "GpuAcc",
905 {
906 {"TuningLevel", tuningLevel},
907 {"TuningFile", tuningPath.c_str()},
908 {"KernelProfilingEnabled", enableProfiling}
909 }
910 }
911 );
912
913 std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(options));
914 const auto start_time = armnn::GetTimeNow();
915
916 ARMNN_LOG(info) << "Tuning run...\n";
917 int state = RunTest(modelFormat, inputTensorShapes, computeDevices, dynamicBackendsPath, modelPath, inputNames,
918 inputTensorDataFilePaths, inputTypes, quantizeInput, outputTypes, outputNames,
919 outputTensorFiles, dequantizeOutput, enableProfiling, enableFp16TurboMode, enableBf16TurboMode,
920 thresholdTime, printIntermediate, subgraphId, enableLayerDetails, parseUnsupported, 1, runtime);
921
922 ARMNN_LOG(info) << "Tuning time: " << std::setprecision(2)
923 << std::fixed << armnn::GetTimeDuration(start_time).count() << " ms\n";
924
925 return state;
926}
927#endif