blob: fa467c93f80a90117d0f3f959526874b74512f4f [file] [log] [blame]
Jan Eilers45274902020-10-15 18:34:43 +01001//
Teresa Charlin83b42912022-07-07 14:24:59 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
Jan Eilers45274902020-10-15 18:34:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "ExecuteNetworkParams.hpp"
7
8#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
Jan Eilers45274902020-10-15 18:34:43 +01009#include <armnn/Logging.hpp>
10
11#include <fmt/format.h>
Teresa Charlin83b42912022-07-07 14:24:59 +010012#include <armnnUtils/Filesystem.hpp>
Jan Eilers45274902020-10-15 18:34:43 +010013
14void CheckClTuningParameter(const int& tuningLevel,
15 const std::string& tuningPath,
16 const std::vector<armnn::BackendId> computeDevices)
17{
18 if (!tuningPath.empty())
19 {
Keith Daviscb8e3502020-11-12 10:27:19 +000020 if (tuningLevel == 0)
Jan Eilers45274902020-10-15 18:34:43 +010021 {
22 ARMNN_LOG(info) << "Using cl tuning file: " << tuningPath << "\n";
Keith Daviscb8e3502020-11-12 10:27:19 +000023 if (!ValidatePath(tuningPath, true))
Jan Eilers45274902020-10-15 18:34:43 +010024 {
25 throw armnn::InvalidArgumentException("The tuning path is not valid");
26 }
27 }
28 else if ((1 <= tuningLevel) && (tuningLevel <= 3))
29 {
30 ARMNN_LOG(info) << "Starting execution to generate a cl tuning file: " << tuningPath << "\n"
31 << "Tuning level in use: " << tuningLevel << "\n";
32 }
33 else if ((0 < tuningLevel) || (tuningLevel > 3))
34 {
Keith Daviscb8e3502020-11-12 10:27:19 +000035 throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.",
36 tuningLevel));
Jan Eilers45274902020-10-15 18:34:43 +010037 }
38
39 // Ensure that a GpuAcc is enabled. Otherwise no tuning data are used or genereted
40 // Only warn if it's not enabled
41 auto it = std::find(computeDevices.begin(), computeDevices.end(), "GpuAcc");
42 if (it == computeDevices.end())
43 {
44 ARMNN_LOG(warning) << "To use Cl Tuning the compute device GpuAcc needs to be active.";
45 }
46 }
Jan Eilers45274902020-10-15 18:34:43 +010047}
48
49void ExecuteNetworkParams::ValidateParams()
50{
Jan Eilersf17fcd52021-07-26 22:20:00 +010051 if (m_DynamicBackendsPath == "")
Jan Eilers45274902020-10-15 18:34:43 +010052 {
Jan Eilersf17fcd52021-07-26 22:20:00 +010053 // Check compute devices are valid unless they are dynamically loaded at runtime
54 std::string invalidBackends;
55 if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
Francis Murtaghbf18a262020-10-27 15:20:40 +000056 {
Jan Eilersf17fcd52021-07-26 22:20:00 +010057 ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
58 << invalidBackends;
Francis Murtaghbf18a262020-10-27 15:20:40 +000059 }
Jan Eilers45274902020-10-15 18:34:43 +010060 }
Jan Eilersf17fcd52021-07-26 22:20:00 +010061 CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices);
62
Ryan OShea31441592022-11-07 16:20:48 +000063 if (m_EnableBf16TurboMode && !m_EnableFastMath)
Keith Daviscb8e3502020-11-12 10:27:19 +000064 {
Ryan OShea31441592022-11-07 16:20:48 +000065 throw armnn::InvalidArgumentException("To use BF16 please use --enable-fast-math. ");
Jan Eilersf17fcd52021-07-26 22:20:00 +010066 }
67
Jan Eilersf17fcd52021-07-26 22:20:00 +010068 // Check input tensor shapes
69 if ((m_InputTensorShapes.size() != 0) &&
70 (m_InputTensorShapes.size() != m_InputNames.size()))
71 {
72 throw armnn::InvalidArgumentException("input-name and input-tensor-shape must have "
73 "the same amount of elements. ");
74 }
75
76 if (m_InputTensorDataFilePaths.size() != 0)
77 {
78 if (!ValidatePaths(m_InputTensorDataFilePaths, true))
Keith Daviscb8e3502020-11-12 10:27:19 +000079 {
Jan Eilersf17fcd52021-07-26 22:20:00 +010080 throw armnn::InvalidArgumentException("One or more input data file paths are not valid.");
Keith Daviscb8e3502020-11-12 10:27:19 +000081 }
Jan Eilersf17fcd52021-07-26 22:20:00 +010082
83 if (m_InputTensorDataFilePaths.size() < m_InputNames.size())
Keith Daviscb8e3502020-11-12 10:27:19 +000084 {
Jan Eilersf17fcd52021-07-26 22:20:00 +010085 throw armnn::InvalidArgumentException(
86 fmt::format("According to the number of input names the user provided the network has {} "
87 "inputs. But only {} input-tensor-data file paths were provided. Each input of the "
88 "model is expected to be stored in it's own file.",
89 m_InputNames.size(),
90 m_InputTensorDataFilePaths.size()));
91 }
Jan Eilersf17fcd52021-07-26 22:20:00 +010092 }
93
94 // Check that threshold time is not less than zero
95 if (m_ThresholdTime < 0)
96 {
97 throw armnn::InvalidArgumentException("Threshold time supplied as a command line argument is less than zero.");
98 }
Jan Eilers45274902020-10-15 18:34:43 +010099
100 // Warn if ExecuteNetwork will generate dummy input data
101 if (m_GenerateTensorData)
102 {
103 ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
104 }
Mike Kelly80512b02022-05-16 23:10:42 +0100105
106 if (m_AllowExpandedDims && m_InferOutputShape)
107 {
108 throw armnn::InvalidArgumentException("infer-output-shape and allow-expanded-dims cannot be used together.");
109 }
Colm Donelan3cff15a2021-10-12 15:06:19 +0100110}
111
112#if defined(ARMNN_TFLITE_DELEGATE)
113/**
114 * A utility method that populates a DelegateOptions object from this ExecuteNetworkParams.
115 *
116 * @return a populated armnnDelegate::DelegateOptions object.
117 */
118armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
119{
120 armnnDelegate::DelegateOptions delegateOptions(m_ComputeDevices);
121 delegateOptions.SetDynamicBackendsPath(m_DynamicBackendsPath);
122 delegateOptions.SetGpuProfilingState(m_EnableProfiling);
123
124 armnn::OptimizerOptions options;
125 options.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
Colm Donelan3cff15a2021-10-12 15:06:19 +0100126 options.m_Debug = m_PrintIntermediate;
Keith Davis15f9c682022-10-14 15:50:33 +0100127 options.m_DebugToFile = m_PrintIntermediateOutputsToFile;
Colm Donelan45142282021-10-21 23:39:52 +0100128 options.m_ProfilingEnabled = m_EnableProfiling;
129 delegateOptions.SetInternalProfilingParams(m_EnableProfiling, armnn::ProfilingDetailsMethod::DetailsWithEvents);
Colm Donelan3cff15a2021-10-12 15:06:19 +0100130 options.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly;
131 if (m_InferOutputShape)
132 {
133 options.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::InferAndValidate;
134 }
135
136 armnn::BackendOptions gpuAcc("GpuAcc",
137 {
138 { "FastMathEnabled", m_EnableFastMath },
139 { "SaveCachedNetwork", m_SaveCachedNetwork },
140 { "CachedNetworkFilePath", m_CachedNetworkFilePath },
141 { "TuningLevel", m_TuningLevel},
142 { "TuningFile", m_TuningPath.c_str()},
143 { "KernelProfilingEnabled", m_EnableProfiling},
144 { "MLGOTuningFilePath", m_MLGOTuningFilePath}
145 });
146
147 armnn::BackendOptions cpuAcc("CpuAcc",
148 {
149 { "FastMathEnabled", m_EnableFastMath },
150 { "NumberOfThreads", m_NumberOfThreads }
151 });
152 options.m_ModelOptions.push_back(gpuAcc);
153 options.m_ModelOptions.push_back(cpuAcc);
154
Mike Kelly80512b02022-05-16 23:10:42 +0100155 if (m_InferOutputShape)
156 {
157 armnn::BackendOptions networkOption("ShapeInferenceMethod",
158 {
159 {"InferAndValidate", true}
160 });
161 options.m_ModelOptions.push_back(networkOption);
162 }
163 if (m_AllowExpandedDims)
164 {
165 armnn::BackendOptions networkOption("AllowExpandedDims",
166 {
167 {"AllowExpandedDims", true}
168 });
169 options.m_ModelOptions.push_back(networkOption);
170 }
Colm Donelan3cff15a2021-10-12 15:06:19 +0100171 delegateOptions.SetOptimizerOptions(options);
172
Colm Donelan3cff15a2021-10-12 15:06:19 +0100173 return delegateOptions;
174}
Teresa Charlin83b42912022-07-07 14:24:59 +0100175
Colm Donelan3cff15a2021-10-12 15:06:19 +0100176#endif