blob: 17c08717e417b92acc4bd7920e8b396812c497ab [file] [log] [blame]
Jan Eilers45274902020-10-15 18:34:43 +01001//
Teresa Charlinfbd28172022-07-07 14:24:59 +01002// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
Jan Eilers45274902020-10-15 18:34:43 +01003// SPDX-License-Identifier: MIT
4//
5
6#include "ExecuteNetworkParams.hpp"
7
8#include "NetworkExecutionUtils/NetworkExecutionUtils.hpp"
Jan Eilers45274902020-10-15 18:34:43 +01009#include <armnn/Logging.hpp>
10
11#include <fmt/format.h>
Teresa Charlinfbd28172022-07-07 14:24:59 +010012#include <armnnUtils/Filesystem.hpp>
Jan Eilers45274902020-10-15 18:34:43 +010013
14void CheckClTuningParameter(const int& tuningLevel,
15 const std::string& tuningPath,
16 const std::vector<armnn::BackendId> computeDevices)
17{
18 if (!tuningPath.empty())
19 {
Keith Daviscb8e3502020-11-12 10:27:19 +000020 if (tuningLevel == 0)
Jan Eilers45274902020-10-15 18:34:43 +010021 {
22 ARMNN_LOG(info) << "Using cl tuning file: " << tuningPath << "\n";
Keith Daviscb8e3502020-11-12 10:27:19 +000023 if (!ValidatePath(tuningPath, true))
Jan Eilers45274902020-10-15 18:34:43 +010024 {
25 throw armnn::InvalidArgumentException("The tuning path is not valid");
26 }
27 }
28 else if ((1 <= tuningLevel) && (tuningLevel <= 3))
29 {
30 ARMNN_LOG(info) << "Starting execution to generate a cl tuning file: " << tuningPath << "\n"
31 << "Tuning level in use: " << tuningLevel << "\n";
32 }
33 else if ((0 < tuningLevel) || (tuningLevel > 3))
34 {
Keith Daviscb8e3502020-11-12 10:27:19 +000035 throw armnn::InvalidArgumentException(fmt::format("The tuning level {} is not valid.",
36 tuningLevel));
Jan Eilers45274902020-10-15 18:34:43 +010037 }
38
39 // Ensure that a GpuAcc is enabled. Otherwise no tuning data are used or genereted
40 // Only warn if it's not enabled
41 auto it = std::find(computeDevices.begin(), computeDevices.end(), "GpuAcc");
42 if (it == computeDevices.end())
43 {
44 ARMNN_LOG(warning) << "To use Cl Tuning the compute device GpuAcc needs to be active.";
45 }
46 }
Jan Eilers45274902020-10-15 18:34:43 +010047}
48
49void ExecuteNetworkParams::ValidateParams()
50{
Jan Eilersf17fcd52021-07-26 22:20:00 +010051 if (m_DynamicBackendsPath == "")
Jan Eilers45274902020-10-15 18:34:43 +010052 {
Jan Eilersf17fcd52021-07-26 22:20:00 +010053 // Check compute devices are valid unless they are dynamically loaded at runtime
54 std::string invalidBackends;
55 if (!CheckRequestedBackendsAreValid(m_ComputeDevices, armnn::Optional<std::string&>(invalidBackends)))
Francis Murtaghbf18a262020-10-27 15:20:40 +000056 {
Jan Eilersf17fcd52021-07-26 22:20:00 +010057 ARMNN_LOG(fatal) << "The list of preferred devices contains invalid backend IDs: "
58 << invalidBackends;
Francis Murtaghbf18a262020-10-27 15:20:40 +000059 }
Jan Eilers45274902020-10-15 18:34:43 +010060 }
Jan Eilersf17fcd52021-07-26 22:20:00 +010061 CheckClTuningParameter(m_TuningLevel, m_TuningPath, m_ComputeDevices);
62
63 if (m_EnableBf16TurboMode && m_EnableFp16TurboMode)
Keith Daviscb8e3502020-11-12 10:27:19 +000064 {
Jan Eilersf17fcd52021-07-26 22:20:00 +010065 throw armnn::InvalidArgumentException("BFloat16 and Float16 turbo mode cannot be "
66 "enabled at the same time.");
67 }
68
Jan Eilersf17fcd52021-07-26 22:20:00 +010069 // Check input tensor shapes
70 if ((m_InputTensorShapes.size() != 0) &&
71 (m_InputTensorShapes.size() != m_InputNames.size()))
72 {
73 throw armnn::InvalidArgumentException("input-name and input-tensor-shape must have "
74 "the same amount of elements. ");
75 }
76
77 if (m_InputTensorDataFilePaths.size() != 0)
78 {
79 if (!ValidatePaths(m_InputTensorDataFilePaths, true))
Keith Daviscb8e3502020-11-12 10:27:19 +000080 {
Jan Eilersf17fcd52021-07-26 22:20:00 +010081 throw armnn::InvalidArgumentException("One or more input data file paths are not valid.");
Keith Daviscb8e3502020-11-12 10:27:19 +000082 }
Jan Eilersf17fcd52021-07-26 22:20:00 +010083
84 if (m_InputTensorDataFilePaths.size() < m_InputNames.size())
Keith Daviscb8e3502020-11-12 10:27:19 +000085 {
Jan Eilersf17fcd52021-07-26 22:20:00 +010086 throw armnn::InvalidArgumentException(
87 fmt::format("According to the number of input names the user provided the network has {} "
88 "inputs. But only {} input-tensor-data file paths were provided. Each input of the "
89 "model is expected to be stored in it's own file.",
90 m_InputNames.size(),
91 m_InputTensorDataFilePaths.size()));
92 }
Jan Eilersf17fcd52021-07-26 22:20:00 +010093 }
94
95 // Check that threshold time is not less than zero
96 if (m_ThresholdTime < 0)
97 {
98 throw armnn::InvalidArgumentException("Threshold time supplied as a command line argument is less than zero.");
99 }
Jan Eilers45274902020-10-15 18:34:43 +0100100
101 // Warn if ExecuteNetwork will generate dummy input data
102 if (m_GenerateTensorData)
103 {
104 ARMNN_LOG(warning) << "No input files provided, input tensors will be filled with 0s.";
105 }
Mike Kelly80512b02022-05-16 23:10:42 +0100106
107 if (m_AllowExpandedDims && m_InferOutputShape)
108 {
109 throw armnn::InvalidArgumentException("infer-output-shape and allow-expanded-dims cannot be used together.");
110 }
Colm Donelan3cff15a2021-10-12 15:06:19 +0100111}
112
113#if defined(ARMNN_TFLITE_DELEGATE)
114/**
115 * A utility method that populates a DelegateOptions object from this ExecuteNetworkParams.
116 *
117 * @return a populated armnnDelegate::DelegateOptions object.
118 */
119armnnDelegate::DelegateOptions ExecuteNetworkParams::ToDelegateOptions() const
120{
121 armnnDelegate::DelegateOptions delegateOptions(m_ComputeDevices);
122 delegateOptions.SetDynamicBackendsPath(m_DynamicBackendsPath);
123 delegateOptions.SetGpuProfilingState(m_EnableProfiling);
124
125 armnn::OptimizerOptions options;
126 options.m_ReduceFp32ToFp16 = m_EnableFp16TurboMode;
127 options.m_ReduceFp32ToBf16 = m_EnableBf16TurboMode;
128 options.m_Debug = m_PrintIntermediate;
Colm Donelan45142282021-10-21 23:39:52 +0100129 options.m_ProfilingEnabled = m_EnableProfiling;
130 delegateOptions.SetInternalProfilingParams(m_EnableProfiling, armnn::ProfilingDetailsMethod::DetailsWithEvents);
Colm Donelan3cff15a2021-10-12 15:06:19 +0100131 options.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::ValidateOnly;
132 if (m_InferOutputShape)
133 {
134 options.m_shapeInferenceMethod = armnn::ShapeInferenceMethod::InferAndValidate;
135 }
136
137 armnn::BackendOptions gpuAcc("GpuAcc",
138 {
139 { "FastMathEnabled", m_EnableFastMath },
140 { "SaveCachedNetwork", m_SaveCachedNetwork },
141 { "CachedNetworkFilePath", m_CachedNetworkFilePath },
142 { "TuningLevel", m_TuningLevel},
143 { "TuningFile", m_TuningPath.c_str()},
144 { "KernelProfilingEnabled", m_EnableProfiling},
145 { "MLGOTuningFilePath", m_MLGOTuningFilePath}
146 });
147
148 armnn::BackendOptions cpuAcc("CpuAcc",
149 {
150 { "FastMathEnabled", m_EnableFastMath },
151 { "NumberOfThreads", m_NumberOfThreads }
152 });
153 options.m_ModelOptions.push_back(gpuAcc);
154 options.m_ModelOptions.push_back(cpuAcc);
155
Mike Kelly80512b02022-05-16 23:10:42 +0100156 if (m_InferOutputShape)
157 {
158 armnn::BackendOptions networkOption("ShapeInferenceMethod",
159 {
160 {"InferAndValidate", true}
161 });
162 options.m_ModelOptions.push_back(networkOption);
163 }
164 if (m_AllowExpandedDims)
165 {
166 armnn::BackendOptions networkOption("AllowExpandedDims",
167 {
168 {"AllowExpandedDims", true}
169 });
170 options.m_ModelOptions.push_back(networkOption);
171 }
Colm Donelan3cff15a2021-10-12 15:06:19 +0100172 delegateOptions.SetOptimizerOptions(options);
173
Colm Donelan3cff15a2021-10-12 15:06:19 +0100174 return delegateOptions;
175}
Teresa Charlinfbd28172022-07-07 14:24:59 +0100176
Colm Donelan3cff15a2021-10-12 15:06:19 +0100177#endif