blob: 8f1cb0b5998228a006c2b6afecc6eb37d04fbce9 [file] [log] [blame]
Jan Eilers45274902020-10-15 18:34:43 +01001//
2// Copyright © 2020 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "ExecuteNetworkParams.hpp"
7
8#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
9#include <InferenceModel.hpp>
10#include <armnn/Logging.hpp>
11
12#include <fmt/format.h>
13
14bool IsModelBinary(const std::string& modelFormat)
15{
16 // Parse model binary flag from the model-format string we got from the command-line
17 if (modelFormat.find("binary") != std::string::npos)
18 {
19 return true;
20 }
21 else if (modelFormat.find("txt") != std::string::npos || modelFormat.find("text") != std::string::npos)
22 {
23 return false;
24 }
25 else
26 {
27 throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
28 "Please include 'binary' or 'text'",
29 modelFormat));
30 }
31}
32
33void CheckModelFormat(const std::string& modelFormat)
34{
35 // Forward to implementation based on the parser type
36 if (modelFormat.find("armnn") != std::string::npos)
37 {
38#if defined(ARMNN_SERIALIZER)
39#else
Keith Daviscb8e3502020-11-12 10:27:19 +000040 throw armnn::InvalidArgumentException("Can't run model in armnn format without a "
41 "built with serialization support.");
Jan Eilers45274902020-10-15 18:34:43 +010042#endif
43 }
Jan Eilers45274902020-10-15 18:34:43 +010044 else if (modelFormat.find("onnx") != std::string::npos)
45 {
46#if defined(ARMNN_ONNX_PARSER)
47#else
48 throw armnn::InvalidArgumentException("Can't run model in onnx format without a "
49 "built with Onnx parser support.");
50#endif
51 }
52 else if (modelFormat.find("tensorflow") != std::string::npos)
53 {
54#if defined(ARMNN_TF_PARSER)
55#else
56 throw armnn::InvalidArgumentException("Can't run model in onnx format without a "
57 "built with Tensorflow parser support.");
58#endif
59 }
Keith Daviscb8e3502020-11-12 10:27:19 +000060 else if (modelFormat.find("tflite") != std::string::npos)
Jan Eilers45274902020-10-15 18:34:43 +010061 {
62#if defined(ARMNN_TF_LITE_PARSER)
63 if (!IsModelBinary(modelFormat))
64 {
Keith Daviscb8e3502020-11-12 10:27:19 +000065 throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. Only 'binary' "
66 "format supported for tflite files",
Jan Eilers45274902020-10-15 18:34:43 +010067 modelFormat));
68 }
Sadik Armagan5d03e312020-11-17 16:43:56 +000069#elif defined(ARMNN_TFLITE_DELEGATE)
Jan Eilers45274902020-10-15 18:34:43 +010070#else
71 throw armnn::InvalidArgumentException("Can't run model in tflite format without a "
72 "built with Tensorflow Lite parser support.");
73#endif
74 }
75 else
76 {
77 throw armnn::InvalidArgumentException(fmt::format("Unknown model format: '{}'. "
Nikhil Raj6dd178f2021-04-02 22:04:39 +010078 "Please include 'tensorflow', 'tflite' or 'onnx'",
Jan Eilers45274902020-10-15 18:34:43 +010079 modelFormat));
80 }
81}
82
83void CheckClTuningParameter(const int& tuningLevel,
84 const std::string& tuningPath,
85 const std::vector<armnn::BackendId> computeDevices)
86{
87 if (!tuningPath.empty())
88 {
Keith Daviscb8e3502020-11-12 10:27:19 +000089 if (tuningLevel == 0)
Jan Eilers45274902020-10-15 18:34:43 +010090 {
91 ARMNN_LOG(info) << "Using cl tuning file: " << tuningPath << "\n";
Keith Daviscb8e3502020-11-12 10:27:19 +000092 if (!ValidatePath(tuningPath, true))
Jan Eilers45274902020-10-15 18:34:43 +010093 {
94 throw armnn::InvalidArgumentException("The tuning path is not valid");
95 }
96 }
97 else if ((1 <= tuningLevel) && (tuningLevel <= 3))
98 {
99 ARMNN_LOG(info) << "Starting execution to generate a cl tuning file: " << tuningPath << "\n"
100 << "Tuning level in use: " << tuningLevel << "\n";
101 }
102 else if ((0 < tuningLevel) || (tuningLevel > 3))
103 {
Keith Daviscb8e3502020-11-12 10:27:19 +0000104 throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.",
105 tuningLevel));
Jan Eilers45274902020-10-15 18:34:43 +0100106 }
107
108 // Ensure that a GpuAcc is enabled. Otherwise no tuning data are used or genereted
109 // Only warn if it's not enabled
110 auto it = std::find(computeDevices.begin(), computeDevices.end(), "GpuAcc");
111 if (it == computeDevices.end())
112 {
113 ARMNN_LOG(warning) << "To use Cl Tuning the compute device GpuAcc needs to be active.";
114 }
115 }
116
Jan Eilers45274902020-10-15 18:34:43 +0100117}
118
119void ExecuteNetworkParams::ValidateParams()
120{
Keith Daviscb8e3502020-11-12 10:27:19 +0000121 // Set to true if it is preferred to throw an exception rather than use ARMNN_LOG
122 bool throwExc = false;
123
124 try
Jan Eilers45274902020-10-15 18:34:43 +0100125 {
Keith Daviscb8e3502020-11-12 10:27:19 +0000126 if (m_DynamicBackendsPath == "")
Francis Murtaghbf18a262020-10-27 15:20:40 +0000127 {
Keith Daviscb8e3502020-11-12 10:27:19 +0000128 // Check compute devices are valid unless they are dynamically loaded at runtime
129 std::string invalidBackends;
130 if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
131 {
132 ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
133 << invalidBackends;
134 }
135 }
136
137 CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices);
138
139 if (m_EnableBf16TurboMode && m_EnableFp16TurboMode)
140 {
141 ARMNN_LOG(fatal) << "BFloat16 and Float16 turbo mode cannot be enabled at the same time.";
142 }
143
144 m_IsModelBinary = IsModelBinary(m_ModelFormat);
145
146 CheckModelFormat(m_ModelFormat);
147
Sadik Armagana04a9d72021-04-27 10:02:10 +0100148 // Check number of simultaneous iterations
149 if ((m_SimultaneousIterations < 1))
150 {
151 ARMNN_LOG(fatal) << "simultaneous-iterations cannot be less than 1. ";
152 }
153
Keith Daviscb8e3502020-11-12 10:27:19 +0000154 // Check input tensor shapes
155 if ((m_InputTensorShapes.size() != 0) &&
156 (m_InputTensorShapes.size() != m_InputNames.size()))
157 {
158 ARMNN_LOG(fatal) << "input-name and input-tensor-shape must have the same amount of elements. ";
159 }
160
161 if (m_InputTensorDataFilePaths.size() != 0)
162 {
163 if (!ValidatePaths(m_InputTensorDataFilePaths, true))
164 {
165 ARMNN_LOG(fatal) << "One or more input data file paths are not valid. ";
166 }
167
Sadik Armagana04a9d72021-04-27 10:02:10 +0100168 if (!m_Concurrent && m_InputTensorDataFilePaths.size() != m_InputNames.size())
Keith Daviscb8e3502020-11-12 10:27:19 +0000169 {
170 ARMNN_LOG(fatal) << "input-name and input-tensor-data must have the same amount of elements. ";
171 }
Sadik Armagana04a9d72021-04-27 10:02:10 +0100172
173 if (m_InputTensorDataFilePaths.size() < m_SimultaneousIterations * m_InputNames.size())
174 {
175 ARMNN_LOG(fatal) << "There is not enough input data for " << m_SimultaneousIterations << " execution.";
176 }
177 if (m_InputTensorDataFilePaths.size() > m_SimultaneousIterations * m_InputNames.size())
178 {
179 ARMNN_LOG(fatal) << "There is more input data for " << m_SimultaneousIterations << " execution.";
180 }
Keith Daviscb8e3502020-11-12 10:27:19 +0000181 }
182
183 if ((m_OutputTensorFiles.size() != 0) &&
184 (m_OutputTensorFiles.size() != m_OutputNames.size()))
185 {
186 ARMNN_LOG(fatal) << "output-name and write-outputs-to-file must have the same amount of elements. ";
187 }
188
Sadik Armagana04a9d72021-04-27 10:02:10 +0100189 if ((m_OutputTensorFiles.size() != 0)
190 && m_OutputTensorFiles.size() != m_SimultaneousIterations * m_OutputNames.size())
191 {
192 ARMNN_LOG(fatal) << "There is not enough output data for " << m_SimultaneousIterations << " execution.";
193 }
194
Keith Daviscb8e3502020-11-12 10:27:19 +0000195 if (m_InputTypes.size() == 0)
196 {
197 //Defaults the value of all inputs to "float"
198 m_InputTypes.assign(m_InputNames.size(), "float");
199 }
200 else if ((m_InputTypes.size() != 0) &&
201 (m_InputTypes.size() != m_InputNames.size()))
202 {
203 ARMNN_LOG(fatal) << "input-name and input-type must have the same amount of elements.";
204 }
205
206 if (m_OutputTypes.size() == 0)
207 {
208 //Defaults the value of all outputs to "float"
209 m_OutputTypes.assign(m_OutputNames.size(), "float");
210 }
211 else if ((m_OutputTypes.size() != 0) &&
212 (m_OutputTypes.size() != m_OutputNames.size()))
213 {
214 ARMNN_LOG(fatal) << "output-name and output-type must have the same amount of elements.";
215 }
216
217 // Check that threshold time is not less than zero
218 if (m_ThresholdTime < 0)
219 {
220 ARMNN_LOG(fatal) << "Threshold time supplied as a command line argument is less than zero.";
Francis Murtaghbf18a262020-10-27 15:20:40 +0000221 }
Jan Eilers45274902020-10-15 18:34:43 +0100222 }
Keith Daviscb8e3502020-11-12 10:27:19 +0000223 catch (std::string& exc)
224 {
225 if (throwExc)
226 {
227 throw armnn::InvalidArgumentException(exc);
228 }
229 else
230 {
231 std::cout << exc;
232 exit(EXIT_FAILURE);
233 }
234 }
Jan Eilers45274902020-10-15 18:34:43 +0100235 // Check turbo modes
Jan Eilers45274902020-10-15 18:34:43 +0100236
237 // Warn if ExecuteNetwork will generate dummy input data
238 if (m_GenerateTensorData)
239 {
240 ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
241 }
242}