Laurent Carlier | 749294b | 2020-06-01 09:03:17 +0100 | [diff] [blame] | 1 | // |
Sadik Armagan | a9c2ce1 | 2020-07-14 10:02:22 +0100 | [diff] [blame] | 2 | // Copyright © 2017 Arm Ltd and Contributors. All rights reserved. |
David Beck | ecb56cd | 2018-09-05 12:52:57 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 4 | // |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 5 | |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 6 | #include "NetworkExecutionUtils/NetworkExecutionUtils.hpp" |
| 7 | #include "ExecuteNetworkProgramOptions.hpp" |
| 8 | |
| 9 | #include <armnn/Logging.hpp> |
| 10 | #include <Filesystem.hpp> |
| 11 | #include <InferenceTest.hpp> |
| 12 | |
| 13 | #if defined(ARMNN_SERIALIZER) |
| 14 | #include "armnnDeserializer/IDeserializer.hpp" |
| 15 | #endif |
| 16 | #if defined(ARMNN_CAFFE_PARSER) |
| 17 | #include "armnnCaffeParser/ICaffeParser.hpp" |
| 18 | #endif |
| 19 | #if defined(ARMNN_TF_PARSER) |
| 20 | #include "armnnTfParser/ITfParser.hpp" |
| 21 | #endif |
| 22 | #if defined(ARMNN_TF_LITE_PARSER) |
| 23 | #include "armnnTfLiteParser/ITfLiteParser.hpp" |
| 24 | #endif |
| 25 | #if defined(ARMNN_ONNX_PARSER) |
| 26 | #include "armnnOnnxParser/IOnnxParser.hpp" |
| 27 | #endif |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 28 | #if defined(ARMNN_TFLITE_DELEGATE) |
| 29 | #include <armnn_delegate.hpp> |
| 30 | #include <DelegateOptions.hpp> |
| 31 | |
| 32 | #include <tensorflow/lite/builtin_ops.h> |
| 33 | #include <tensorflow/lite/c/builtin_op_data.h> |
| 34 | #include <tensorflow/lite/c/common.h> |
| 35 | #include <tensorflow/lite/optional_debug_tools.h> |
| 36 | #include <tensorflow/lite/kernels/builtin_op_kernels.h> |
| 37 | #include <tensorflow/lite/interpreter.h> |
| 38 | #include <tensorflow/lite/kernels/register.h> |
| 39 | #endif |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 40 | |
| 41 | #include <future> |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 42 | #if defined(ARMNN_TFLITE_DELEGATE) |
| 43 | int TfLiteDelegateMainImpl(const ExecuteNetworkParams& params, |
| 44 | const std::shared_ptr<armnn::IRuntime>& runtime = nullptr) |
| 45 | { |
| 46 | using namespace tflite; |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 47 | |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 48 | std::unique_ptr<tflite::FlatBufferModel> model = tflite::FlatBufferModel::BuildFromFile(params.m_ModelPath.c_str()); |
| 49 | |
| 50 | auto tfLiteInterpreter = std::make_unique<Interpreter>(); |
| 51 | tflite::ops::builtin::BuiltinOpResolver resolver; |
| 52 | |
| 53 | tflite::InterpreterBuilder builder(*model, resolver); |
| 54 | builder(&tfLiteInterpreter); |
| 55 | tfLiteInterpreter->AllocateTensors(); |
| 56 | |
| 57 | // Create the Armnn Delegate |
| 58 | armnnDelegate::DelegateOptions delegateOptions(params.m_ComputeDevices); |
| 59 | std::unique_ptr<TfLiteDelegate, decltype(&armnnDelegate::TfLiteArmnnDelegateDelete)> |
| 60 | theArmnnDelegate(armnnDelegate::TfLiteArmnnDelegateCreate(delegateOptions), |
| 61 | armnnDelegate::TfLiteArmnnDelegateDelete); |
| 62 | // Register armnn_delegate to TfLiteInterpreter |
| 63 | int status = tfLiteInterpreter->ModifyGraphWithDelegate(std::move(theArmnnDelegate)); |
| 64 | |
| 65 | std::vector<std::string> inputBindings; |
| 66 | for (const std::string& inputName: params.m_InputNames) |
| 67 | { |
| 68 | inputBindings.push_back(inputName); |
| 69 | } |
| 70 | |
| 71 | armnn::Optional<std::string> dataFile = params.m_GenerateTensorData |
| 72 | ? armnn::EmptyOptional() |
| 73 | : armnn::MakeOptional<std::string>(params.m_InputTensorDataFilePaths[0]); |
| 74 | |
| 75 | const size_t numInputs = inputBindings.size(); |
| 76 | |
| 77 | for(unsigned int inputIndex = 0; inputIndex < numInputs; ++inputIndex) |
| 78 | { |
| 79 | int input = tfLiteInterpreter->inputs()[inputIndex]; |
Sadik Armagan | 15f7fae | 2020-11-18 09:37:03 +0000 | [diff] [blame] | 80 | TfLiteIntArray* inputDims = tfLiteInterpreter->tensor(input)->dims; |
| 81 | |
| 82 | long inputSize = 1; |
| 83 | for (unsigned int dim = 0; dim < static_cast<unsigned int>(inputDims->size); ++dim) |
| 84 | { |
| 85 | inputSize *= inputDims->data[dim]; |
| 86 | } |
| 87 | |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 88 | if (params.m_InputTypes[inputIndex].compare("float") == 0) |
| 89 | { |
| 90 | auto inputData = tfLiteInterpreter->typed_tensor<float>(input); |
| 91 | TContainer tensorData; |
| 92 | PopulateTensorWithData(tensorData, |
| 93 | params.m_InputTensorShapes[inputIndex]->GetNumElements(), |
| 94 | params.m_InputTypes[inputIndex], |
| 95 | armnn::EmptyOptional(), |
| 96 | dataFile); |
Sadik Armagan | 15f7fae | 2020-11-18 09:37:03 +0000 | [diff] [blame] | 97 | |
| 98 | mapbox::util::apply_visitor([&](auto&& value) |
| 99 | { |
| 100 | for (unsigned int i = 0; i < inputSize; ++i) |
| 101 | { |
| 102 | inputData[i] = value.data()[i]; |
| 103 | } |
| 104 | }, |
| 105 | tensorData); |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 106 | } |
| 107 | else if (params.m_InputTypes[inputIndex].compare("int") == 0) |
| 108 | { |
| 109 | auto inputData = tfLiteInterpreter->typed_tensor<int32_t>(input); |
| 110 | TContainer tensorData; |
| 111 | PopulateTensorWithData(tensorData, |
| 112 | params.m_InputTensorShapes[inputIndex]->GetNumElements(), |
| 113 | params.m_InputTypes[inputIndex], |
| 114 | armnn::EmptyOptional(), |
| 115 | dataFile); |
Sadik Armagan | 15f7fae | 2020-11-18 09:37:03 +0000 | [diff] [blame] | 116 | mapbox::util::apply_visitor([&](auto&& value) |
| 117 | { |
| 118 | for (unsigned int i = 0; i < inputSize; ++i) |
| 119 | { |
| 120 | inputData[i] = value.data()[i]; |
| 121 | } |
| 122 | }, |
| 123 | tensorData); |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 124 | } |
| 125 | else if (params.m_InputTypes[inputIndex].compare("qasymm8") == 0) |
| 126 | { |
| 127 | auto inputData = tfLiteInterpreter->typed_tensor<uint8_t>(input); |
| 128 | TContainer tensorData; |
| 129 | PopulateTensorWithData(tensorData, |
| 130 | params.m_InputTensorShapes[inputIndex]->GetNumElements(), |
| 131 | params.m_InputTypes[inputIndex], |
| 132 | armnn::EmptyOptional(), |
| 133 | dataFile); |
Sadik Armagan | 15f7fae | 2020-11-18 09:37:03 +0000 | [diff] [blame] | 134 | mapbox::util::apply_visitor([&](auto&& value) |
| 135 | { |
| 136 | for (unsigned int i = 0; i < inputSize; ++i) |
| 137 | { |
| 138 | inputData[i] = value.data()[i]; |
| 139 | } |
| 140 | }, |
| 141 | tensorData); |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 142 | } |
| 143 | else |
| 144 | { |
| 145 | ARMNN_LOG(fatal) << "Unsupported input tensor data type \"" << params.m_InputTypes[inputIndex] << "\". "; |
| 146 | return EXIT_FAILURE; |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | for (size_t x = 0; x < params.m_Iterations; x++) |
| 151 | { |
| 152 | // Run the inference |
| 153 | tfLiteInterpreter->Invoke(); |
| 154 | |
| 155 | // Print out the output |
| 156 | for (unsigned int outputIndex = 0; outputIndex < params.m_OutputNames.size(); ++outputIndex) |
| 157 | { |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 158 | auto tfLiteDelegateOutputId = tfLiteInterpreter->outputs()[outputIndex]; |
Sadik Armagan | 15f7fae | 2020-11-18 09:37:03 +0000 | [diff] [blame] | 159 | TfLiteIntArray* outputDims = tfLiteInterpreter->tensor(tfLiteDelegateOutputId)->dims; |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 160 | |
Sadik Armagan | 15f7fae | 2020-11-18 09:37:03 +0000 | [diff] [blame] | 161 | long outputSize = 1; |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 162 | for (unsigned int dim = 0; dim < static_cast<unsigned int>(outputDims->size); ++dim) |
| 163 | { |
Sadik Armagan | 15f7fae | 2020-11-18 09:37:03 +0000 | [diff] [blame] | 164 | outputSize *= outputDims->data[dim]; |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | std::cout << params.m_OutputNames[outputIndex] << ": "; |
| 168 | if (params.m_OutputTypes[outputIndex].compare("float") == 0) |
| 169 | { |
| 170 | auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<float>(tfLiteDelegateOutputId); |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 171 | if(tfLiteDelageOutputData == NULL) |
| 172 | { |
| 173 | ARMNN_LOG(fatal) << "Output tensor is null, output type: " |
| 174 | "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; |
| 175 | return EXIT_FAILURE; |
| 176 | } |
| 177 | |
| 178 | for (int i = 0; i < outputSize; ++i) |
| 179 | { |
| 180 | std::cout << tfLiteDelageOutputData[i] << ", "; |
| 181 | if (i % 60 == 0) |
| 182 | { |
| 183 | std::cout << std::endl; |
| 184 | } |
| 185 | } |
| 186 | } |
| 187 | else if (params.m_OutputTypes[outputIndex].compare("int") == 0) |
| 188 | { |
| 189 | auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<int32_t>(tfLiteDelegateOutputId); |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 190 | if(tfLiteDelageOutputData == NULL) |
| 191 | { |
| 192 | ARMNN_LOG(fatal) << "Output tensor is null, output type: " |
| 193 | "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; |
| 194 | return EXIT_FAILURE; |
| 195 | } |
| 196 | |
| 197 | for (int i = 0; i < outputSize; ++i) |
| 198 | { |
| 199 | std::cout << tfLiteDelageOutputData[i] << ", "; |
| 200 | if (i % 60 == 0) |
| 201 | { |
| 202 | std::cout << std::endl; |
| 203 | } |
| 204 | } |
| 205 | } |
| 206 | else if (params.m_OutputTypes[outputIndex].compare("qasymm8") == 0) |
| 207 | { |
| 208 | auto tfLiteDelageOutputData = tfLiteInterpreter->typed_tensor<uint8_t>(tfLiteDelegateOutputId); |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 209 | if(tfLiteDelageOutputData == NULL) |
| 210 | { |
| 211 | ARMNN_LOG(fatal) << "Output tensor is null, output type: " |
| 212 | "\"" << params.m_OutputTypes[outputIndex] << "\" may be incorrect."; |
| 213 | return EXIT_FAILURE; |
| 214 | } |
| 215 | |
| 216 | for (int i = 0; i < outputSize; ++i) |
| 217 | { |
| 218 | std::cout << unsigned(tfLiteDelageOutputData[i]) << ", "; |
| 219 | if (i % 60 == 0) |
| 220 | { |
| 221 | std::cout << std::endl; |
| 222 | } |
| 223 | } |
| 224 | } |
| 225 | else |
| 226 | { |
| 227 | ARMNN_LOG(fatal) << "Output tensor is null, output type: " |
| 228 | "\"" << params.m_OutputTypes[outputIndex] << |
| 229 | "\" may be incorrect. Output type can be specified with -z argument"; |
| 230 | return EXIT_FAILURE; |
| 231 | } |
| 232 | std::cout << std::endl; |
| 233 | } |
| 234 | } |
| 235 | |
| 236 | return status; |
| 237 | } |
| 238 | #endif |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 239 | template<typename TParser, typename TDataType> |
| 240 | int MainImpl(const ExecuteNetworkParams& params, |
| 241 | const std::shared_ptr<armnn::IRuntime>& runtime = nullptr) |
| 242 | { |
| 243 | using TContainer = mapbox::util::variant<std::vector<float>, std::vector<int>, std::vector<unsigned char>>; |
| 244 | |
| 245 | std::vector<TContainer> inputDataContainers; |
| 246 | |
| 247 | try |
| 248 | { |
| 249 | // Creates an InferenceModel, which will parse the model and load it into an IRuntime. |
| 250 | typename InferenceModel<TParser, TDataType>::Params inferenceModelParams; |
| 251 | inferenceModelParams.m_ModelPath = params.m_ModelPath; |
| 252 | inferenceModelParams.m_IsModelBinary = params.m_IsModelBinary; |
| 253 | inferenceModelParams.m_ComputeDevices = params.m_ComputeDevices; |
| 254 | inferenceModelParams.m_DynamicBackendsPath = params.m_DynamicBackendsPath; |
| 255 | inferenceModelParams.m_PrintIntermediateLayers = params.m_PrintIntermediate; |
| 256 | inferenceModelParams.m_VisualizePostOptimizationModel = params.m_EnableLayerDetails; |
| 257 | inferenceModelParams.m_ParseUnsupported = params.m_ParseUnsupported; |
| 258 | inferenceModelParams.m_InferOutputShape = params.m_InferOutputShape; |
| 259 | inferenceModelParams.m_EnableFastMath = params.m_EnableFastMath; |
| 260 | |
| 261 | for(const std::string& inputName: params.m_InputNames) |
| 262 | { |
| 263 | inferenceModelParams.m_InputBindings.push_back(inputName); |
| 264 | } |
| 265 | |
| 266 | for(unsigned int i = 0; i < params.m_InputTensorShapes.size(); ++i) |
| 267 | { |
| 268 | inferenceModelParams.m_InputShapes.push_back(*params.m_InputTensorShapes[i]); |
| 269 | } |
| 270 | |
| 271 | for(const std::string& outputName: params.m_OutputNames) |
| 272 | { |
| 273 | inferenceModelParams.m_OutputBindings.push_back(outputName); |
| 274 | } |
| 275 | |
| 276 | inferenceModelParams.m_SubgraphId = params.m_SubgraphId; |
| 277 | inferenceModelParams.m_EnableFp16TurboMode = params.m_EnableFp16TurboMode; |
| 278 | inferenceModelParams.m_EnableBf16TurboMode = params.m_EnableBf16TurboMode; |
| 279 | |
| 280 | InferenceModel<TParser, TDataType> model(inferenceModelParams, |
| 281 | params.m_EnableProfiling, |
| 282 | params.m_DynamicBackendsPath, |
| 283 | runtime); |
| 284 | |
| 285 | const size_t numInputs = inferenceModelParams.m_InputBindings.size(); |
| 286 | for(unsigned int i = 0; i < numInputs; ++i) |
| 287 | { |
| 288 | armnn::Optional<QuantizationParams> qParams = params.m_QuantizeInput ? |
| 289 | armnn::MakeOptional<QuantizationParams>( |
| 290 | model.GetInputQuantizationParams()) : |
| 291 | armnn::EmptyOptional(); |
| 292 | |
| 293 | armnn::Optional<std::string> dataFile = params.m_GenerateTensorData ? |
| 294 | armnn::EmptyOptional() : |
| 295 | armnn::MakeOptional<std::string>( |
| 296 | params.m_InputTensorDataFilePaths[i]); |
| 297 | |
| 298 | unsigned int numElements = model.GetInputSize(i); |
| 299 | if (params.m_InputTensorShapes.size() > i && params.m_InputTensorShapes[i]) |
| 300 | { |
| 301 | // If the user has provided a tensor shape for the current input, |
| 302 | // override numElements |
| 303 | numElements = params.m_InputTensorShapes[i]->GetNumElements(); |
| 304 | } |
| 305 | |
| 306 | TContainer tensorData; |
| 307 | PopulateTensorWithData(tensorData, |
| 308 | numElements, |
| 309 | params.m_InputTypes[i], |
| 310 | qParams, |
| 311 | dataFile); |
| 312 | |
| 313 | inputDataContainers.push_back(tensorData); |
| 314 | } |
| 315 | |
| 316 | const size_t numOutputs = inferenceModelParams.m_OutputBindings.size(); |
| 317 | std::vector<TContainer> outputDataContainers; |
| 318 | |
| 319 | for (unsigned int i = 0; i < numOutputs; ++i) |
| 320 | { |
| 321 | if (params.m_OutputTypes[i].compare("float") == 0) |
| 322 | { |
| 323 | outputDataContainers.push_back(std::vector<float>(model.GetOutputSize(i))); |
| 324 | } |
| 325 | else if (params.m_OutputTypes[i].compare("int") == 0) |
| 326 | { |
| 327 | outputDataContainers.push_back(std::vector<int>(model.GetOutputSize(i))); |
| 328 | } |
| 329 | else if (params.m_OutputTypes[i].compare("qasymm8") == 0) |
| 330 | { |
| 331 | outputDataContainers.push_back(std::vector<uint8_t>(model.GetOutputSize(i))); |
| 332 | } |
| 333 | else |
| 334 | { |
| 335 | ARMNN_LOG(fatal) << "Unsupported tensor data type \"" << params.m_OutputTypes[i] << "\". "; |
| 336 | return EXIT_FAILURE; |
| 337 | } |
| 338 | } |
| 339 | |
| 340 | for (size_t x = 0; x < params.m_Iterations; x++) |
| 341 | { |
| 342 | // model.Run returns the inference time elapsed in EnqueueWorkload (in milliseconds) |
| 343 | auto inference_duration = model.Run(inputDataContainers, outputDataContainers); |
| 344 | |
| 345 | if (params.m_GenerateTensorData) |
| 346 | { |
| 347 | ARMNN_LOG(warning) << "The input data was generated, note that the output will not be useful"; |
| 348 | } |
| 349 | |
| 350 | // Print output tensors |
| 351 | const auto& infosOut = model.GetOutputBindingInfos(); |
| 352 | for (size_t i = 0; i < numOutputs; i++) |
| 353 | { |
| 354 | const armnn::TensorInfo& infoOut = infosOut[i].second; |
| 355 | auto outputTensorFile = params.m_OutputTensorFiles.empty() ? "" : params.m_OutputTensorFiles[i]; |
| 356 | |
| 357 | TensorPrinter printer(inferenceModelParams.m_OutputBindings[i], |
| 358 | infoOut, |
| 359 | outputTensorFile, |
| 360 | params.m_DequantizeOutput); |
| 361 | mapbox::util::apply_visitor(printer, outputDataContainers[i]); |
| 362 | } |
| 363 | |
| 364 | ARMNN_LOG(info) << "\nInference time: " << std::setprecision(2) |
| 365 | << std::fixed << inference_duration.count() << " ms\n"; |
| 366 | |
| 367 | // If thresholdTime == 0.0 (default), then it hasn't been supplied at command line |
| 368 | if (params.m_ThresholdTime != 0.0) |
| 369 | { |
| 370 | ARMNN_LOG(info) << "Threshold time: " << std::setprecision(2) |
| 371 | << std::fixed << params.m_ThresholdTime << " ms"; |
| 372 | auto thresholdMinusInference = params.m_ThresholdTime - inference_duration.count(); |
| 373 | ARMNN_LOG(info) << "Threshold time - Inference time: " << std::setprecision(2) |
| 374 | << std::fixed << thresholdMinusInference << " ms" << "\n"; |
| 375 | |
| 376 | if (thresholdMinusInference < 0) |
| 377 | { |
| 378 | std::string errorMessage = "Elapsed inference time is greater than provided threshold time."; |
| 379 | ARMNN_LOG(fatal) << errorMessage; |
| 380 | } |
| 381 | } |
| 382 | } |
| 383 | } |
| 384 | catch (const armnn::Exception& e) |
| 385 | { |
| 386 | ARMNN_LOG(fatal) << "Armnn Error: " << e.what(); |
| 387 | return EXIT_FAILURE; |
| 388 | } |
| 389 | |
| 390 | return EXIT_SUCCESS; |
| 391 | } |
| 392 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 393 | |
James Conroy | 7b4886f | 2019-04-11 10:23:58 +0100 | [diff] [blame] | 394 | // MAIN |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 395 | int main(int argc, const char* argv[]) |
| 396 | { |
| 397 | // Configures logging for both the ARMNN library and this test program. |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 398 | #ifdef NDEBUG |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 399 | armnn::LogSeverity level = armnn::LogSeverity::Info; |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 400 | #else |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 401 | armnn::LogSeverity level = armnn::LogSeverity::Debug; |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 402 | #endif |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 403 | armnn::ConfigureLogging(true, true, level); |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 404 | |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 405 | |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 406 | // Get ExecuteNetwork parameters and runtime options from command line |
| 407 | ProgramOptions ProgramOptions(argc, argv); |
Narumol Prangnawarat | d8cc811 | 2020-03-24 13:54:05 +0000 | [diff] [blame] | 408 | |
Finn Williams | d7fcafa | 2020-04-23 17:55:18 +0100 | [diff] [blame] | 409 | // Create runtime |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 410 | std::shared_ptr<armnn::IRuntime> runtime(armnn::IRuntime::Create(ProgramOptions.m_RuntimeOptions)); |
Finn Williams | d7fcafa | 2020-04-23 17:55:18 +0100 | [diff] [blame] | 411 | |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 412 | std::string modelFormat = ProgramOptions.m_ExNetParams.m_ModelFormat; |
| 413 | |
| 414 | // Forward to implementation based on the parser type |
| 415 | if (modelFormat.find("armnn") != std::string::npos) |
Finn Williams | d7fcafa | 2020-04-23 17:55:18 +0100 | [diff] [blame] | 416 | { |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 417 | #if defined(ARMNN_SERIALIZER) |
| 418 | return MainImpl<armnnDeserializer::IDeserializer, float>(ProgramOptions.m_ExNetParams, runtime); |
| 419 | #else |
| 420 | ARMNN_LOG(fatal) << "Not built with serialization support."; |
Finn Williams | d7fcafa | 2020-04-23 17:55:18 +0100 | [diff] [blame] | 421 | return EXIT_FAILURE; |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 422 | #endif |
Finn Williams | d7fcafa | 2020-04-23 17:55:18 +0100 | [diff] [blame] | 423 | } |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 424 | else if (modelFormat.find("caffe") != std::string::npos) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 425 | { |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 426 | #if defined(ARMNN_CAFFE_PARSER) |
| 427 | return MainImpl<armnnCaffeParser::ICaffeParser, float>(ProgramOptions.m_ExNetParams, runtime); |
| 428 | #else |
| 429 | ARMNN_LOG(fatal) << "Not built with Caffe parser support."; |
| 430 | return EXIT_FAILURE; |
| 431 | #endif |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 432 | } |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 433 | else if (modelFormat.find("onnx") != std::string::npos) |
telsoa01 | c577f2c | 2018-08-31 09:22:23 +0100 | [diff] [blame] | 434 | { |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 435 | #if defined(ARMNN_ONNX_PARSER) |
| 436 | return MainImpl<armnnOnnxParser::IOnnxParser, float>(ProgramOptions.m_ExNetParams, runtime); |
| 437 | #else |
| 438 | ARMNN_LOG(fatal) << "Not built with Onnx parser support."; |
| 439 | return EXIT_FAILURE; |
| 440 | #endif |
| 441 | } |
| 442 | else if (modelFormat.find("tensorflow") != std::string::npos) |
| 443 | { |
| 444 | #if defined(ARMNN_TF_PARSER) |
| 445 | return MainImpl<armnnTfParser::ITfParser, float>(ProgramOptions.m_ExNetParams, runtime); |
| 446 | #else |
| 447 | ARMNN_LOG(fatal) << "Not built with Tensorflow parser support."; |
| 448 | return EXIT_FAILURE; |
| 449 | #endif |
| 450 | } |
| 451 | else if(modelFormat.find("tflite") != std::string::npos) |
| 452 | { |
Sadik Armagan | 5d03e31 | 2020-11-17 16:43:56 +0000 | [diff] [blame] | 453 | |
| 454 | if (ProgramOptions.m_ExNetParams.m_EnableDelegate) |
| 455 | { |
| 456 | #if defined(ARMNN_TF_LITE_DELEGATE) |
| 457 | return TfLiteDelegateMainImpl(ProgramOptions.m_ExNetParams, runtime); |
| 458 | #else |
| 459 | ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support."; |
| 460 | return EXIT_FAILURE; |
| 461 | #endif |
| 462 | } |
Jan Eilers | 4527490 | 2020-10-15 18:34:43 +0100 | [diff] [blame] | 463 | #if defined(ARMNN_TF_LITE_PARSER) |
| 464 | return MainImpl<armnnTfLiteParser::ITfLiteParser, float>(ProgramOptions.m_ExNetParams, runtime); |
| 465 | #else |
| 466 | ARMNN_LOG(fatal) << "Not built with Tensorflow-Lite parser support."; |
| 467 | return EXIT_FAILURE; |
| 468 | #endif |
| 469 | } |
| 470 | else |
| 471 | { |
| 472 | ARMNN_LOG(fatal) << "Unknown model format: '" << modelFormat |
| 473 | << "'. Please include 'caffe', 'tensorflow', 'tflite' or 'onnx'"; |
| 474 | return EXIT_FAILURE; |
telsoa01 | 4fcda01 | 2018-03-09 14:13:49 +0000 | [diff] [blame] | 475 | } |
| 476 | } |