blob: 21a4f2e37401d980e41429960540d3bc89086427 [file] [log] [blame]
telsoa01ce3e84a2018-08-31 09:31:35 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// See LICENSE file in the project root for full license information.
4//
5
6#include "ArmnnDriverImpl.hpp"
7#include "ModelToINetworkConverter.hpp"
8#include "ArmnnPreparedModel.hpp"
9#include "SystemPropertiesUtils.hpp"
10
11#if defined(ARMNN_ANDROID_P)
12// The headers of the ML framework have changed between Android O and Android P.
13// The validation functions have been moved into their own header, ValidateHal.h.
14#include <ValidateHal.h>
15#endif
16
17#include <log/log.h>
18
19using namespace std;
20using namespace android;
21using namespace android::nn;
22using namespace android::hardware;
23
24namespace
25{
26
27const char *g_Float32PerformanceExecTimeName = "ArmNN.float32Performance.execTime";
28const char *g_Float32PerformancePowerUsageName = "ArmNN.float32Performance.powerUsage";
29const char *g_Quantized8PerformanceExecTimeName = "ArmNN.quantized8Performance.execTime";
30const char *g_Quantized8PerformancePowerUsageName = "ArmNN.quantized8Performance.powerUsage";
31
32void NotifyCallbackAndCheck(const sp<IPreparedModelCallback>& callback,
33 ErrorStatus errorStatus,
34 const sp<IPreparedModel>& preparedModelPtr)
35{
36 Return<void> returned = callback->notify(errorStatus, preparedModelPtr);
37 // This check is required, if the callback fails and it isn't checked it will bring down the service
38 if (!returned.isOk())
39 {
40 ALOGE("V1_0::ArmnnDriverImpl::prepareModel: hidl callback failed to return properly: %s ",
41 returned.description().c_str());
42 }
43}
44
45Return<ErrorStatus> FailPrepareModel(ErrorStatus error,
46 const string& message,
47 const sp<IPreparedModelCallback>& callback)
48{
49 ALOGW("V1_0::ArmnnDriverImpl::prepareModel: %s", message.c_str());
50 NotifyCallbackAndCheck(callback, error, nullptr);
51 return error;
52}
53
54} // namespace
55
56namespace armnn_driver
57{
58namespace V1_0
59{
60
61Return<void> ArmnnDriverImpl::getCapabilities(
62 const armnn::IRuntimePtr& runtime,
63 neuralnetworks::V1_0::IDevice::getCapabilities_cb cb)
64{
65 ALOGV("V1_0::ArmnnDriverImpl::getCapabilities()");
66
67 neuralnetworks::V1_0::Capabilities capabilities;
68 if (runtime)
69 {
70 capabilities.float32Performance.execTime =
71 ParseSystemProperty(g_Float32PerformanceExecTimeName, .1f);
72
73 capabilities.float32Performance.powerUsage =
74 ParseSystemProperty(g_Float32PerformancePowerUsageName, .1f);
75
76 capabilities.quantized8Performance.execTime =
77 ParseSystemProperty(g_Quantized8PerformanceExecTimeName, .1f);
78
79 capabilities.quantized8Performance.powerUsage =
80 ParseSystemProperty(g_Quantized8PerformancePowerUsageName, .1f);
81
82 cb(ErrorStatus::NONE, capabilities);
83 }
84 else
85 {
86 capabilities.float32Performance.execTime = 0;
87 capabilities.float32Performance.powerUsage = 0;
88 capabilities.quantized8Performance.execTime = 0;
89 capabilities.quantized8Performance.powerUsage = 0;
90
91 cb(ErrorStatus::DEVICE_UNAVAILABLE, capabilities);
92 }
93
94 return Void();
95}
96
97Return<void> ArmnnDriverImpl::getSupportedOperations(
98 const armnn::IRuntimePtr& runtime,
99 const DriverOptions& options,
100 const neuralnetworks::V1_0::Model& model,
101 neuralnetworks::V1_0::IDevice::getSupportedOperations_cb cb)
102{
103 ALOGV("V1_0::ArmnnDriverImpl::getSupportedOperations()");
104
105 vector<bool> result;
106
107 if (!runtime)
108 {
109 cb(ErrorStatus::DEVICE_UNAVAILABLE, result);
110 return Void();
111 }
112
113 // Run general model validation, if this doesn't pass we shouldn't analyse the model anyway
114 if (!android::nn::validateModel(model))
115 {
116 cb(ErrorStatus::INVALID_ARGUMENT, result);
117 return Void();
118 }
119
120 // Attempt to convert the model to an ArmNN input network (INetwork).
kevmay01bc5f7842018-08-30 12:34:39 +0100121 armnn_driver::ModelToINetworkConverter<HalVersion_1_0> modelConverter(options.GetComputeDevice(),
122 model, options.GetForcedUnsupportedOperations());
telsoa01ce3e84a2018-08-31 09:31:35 +0100123
124 if (modelConverter.GetConversionResult() != ConversionResult::Success
125 && modelConverter.GetConversionResult() != ConversionResult::UnsupportedFeature)
126 {
127 cb(ErrorStatus::GENERAL_FAILURE, result);
128 return Void();
129 }
130
131 // Check each operation if it was converted successfully and copy the flags
132 // into the result (vector<bool>) that we need to return to Android
133 result.reserve(model.operations.size());
134 for (uint32_t operationIdx = 0; operationIdx < model.operations.size(); operationIdx++)
135 {
136 bool operationSupported = modelConverter.IsOperationSupported(operationIdx);
137 result.push_back(operationSupported);
138 }
139
140 cb(ErrorStatus::NONE, result);
141 return Void();
142}
143
144Return<ErrorStatus> ArmnnDriverImpl::prepareModel(
145 const armnn::IRuntimePtr& runtime,
146 const armnn::IGpuAccTunedParametersPtr& clTunedParameters,
147 const DriverOptions& options,
148 const neuralnetworks::V1_0::Model& model,
149 const sp<IPreparedModelCallback>& cb,
150 bool float32ToFloat16)
151{
152 ALOGV("V1_0::ArmnnDriverImpl::prepareModel()");
153
154 if (cb.get() == nullptr)
155 {
156 ALOGW("V1_0::ArmnnDriverImpl::prepareModel: Invalid callback passed to prepareModel");
157 return ErrorStatus::INVALID_ARGUMENT;
158 }
159
160 if (!runtime)
161 {
162 return FailPrepareModel(ErrorStatus::DEVICE_UNAVAILABLE,
163 "V1_0::ArmnnDriverImpl::prepareModel: Device unavailable", cb);
164 }
165
166 if (!android::nn::validateModel(model))
167 {
168 return FailPrepareModel(ErrorStatus::INVALID_ARGUMENT,
169 "V1_0::ArmnnDriverImpl::prepareModel: Invalid model passed as input", cb);
170 }
171
172 // Deliberately ignore any unsupported operations requested by the options -
173 // at this point we're being asked to prepare a model that we've already declared support for
174 // and the operation indices may be different to those in getSupportedOperations anyway.
175 set<unsigned int> unsupportedOperations;
kevmay01bc5f7842018-08-30 12:34:39 +0100176 armnn_driver::ModelToINetworkConverter<HalVersion_1_0> modelConverter(options.GetComputeDevice(), model,
telsoa01ce3e84a2018-08-31 09:31:35 +0100177 unsupportedOperations);
178
179 if (modelConverter.GetConversionResult() != ConversionResult::Success)
180 {
181 FailPrepareModel(ErrorStatus::GENERAL_FAILURE, "ModelToINetworkConverter failed", cb);
182 return ErrorStatus::NONE;
183 }
184
185 // optimize the network
186 armnn::IOptimizedNetworkPtr optNet(nullptr, nullptr);
187 armnn::OptimizerOptions OptOptions;
188 OptOptions.m_ReduceFp32ToFp16 = float32ToFloat16;
189
190 try
191 {
192 optNet = armnn::Optimize(*modelConverter.GetINetwork(),
193 {options.GetComputeDevice()},
194 runtime->GetDeviceSpec(),
195 OptOptions);
196 }
197 catch (armnn::Exception &e)
198 {
199 stringstream message;
200 message << "armnn::Exception (" << e.what() << ") caught from optimize.";
201 FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
202 return ErrorStatus::NONE;
203 }
204
205 // Check that the optimized network is valid.
206 if (!optNet)
207 {
208 FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
209 "V1_0::ArmnnDriverImpl::prepareModel: Invalid optimized network", cb);
210 return ErrorStatus::NONE;
211 }
212
213 // Export the optimized network graph to a dot file if an output dump directory
214 // has been specified in the drivers' arguments.
215 ExportNetworkGraphToDotFile(*optNet,
216 options.GetRequestInputsAndOutputsDumpDir(),
217 model);
218
219 // load it into the runtime
220 armnn::NetworkId netId = 0;
221 try
222 {
223 if (runtime->LoadNetwork(netId, move(optNet)) != armnn::Status::Success)
224 {
225 return FailPrepareModel(ErrorStatus::GENERAL_FAILURE,
226 "V1_0::ArmnnDriverImpl::prepareModel: Network could not be loaded", cb);
227 }
228 }
229 catch (armnn::Exception& e)
230 {
231 stringstream message;
232 message << "armnn::Exception (" << e.what()<< ") caught from LoadNetwork.";
233 FailPrepareModel(ErrorStatus::GENERAL_FAILURE, message.str(), cb);
234 return ErrorStatus::NONE;
235 }
236
237 unique_ptr<ArmnnPreparedModel> preparedModel(new ArmnnPreparedModel(
238 netId,
239 runtime.get(),
240 model,
241 options.GetRequestInputsAndOutputsDumpDir(),
242 options.IsGpuProfilingEnabled()
243 ));
244
245 // Run a single 'dummy' inference of the model. This means that CL kernels will get compiled (and tuned if
246 // this is enabled) before the first 'real' inference which removes the overhead of the first inference.
247 preparedModel->ExecuteWithDummyInputs();
248
249 if (clTunedParameters &&
250 options.GetClTunedParametersMode() == armnn::IGpuAccTunedParameters::Mode::UpdateTunedParameters)
251 {
252 // Now that we've done one inference the CL kernel parameters will have been tuned, so save the updated file.
253 try
254 {
255 clTunedParameters->Save(options.GetClTunedParametersFile().c_str());
256 }
257 catch (const armnn::Exception& error)
258 {
259 ALOGE("V1_0::ArmnnDriverImpl: Failed to save CL tuned parameters file '%s': %s",
260 options.GetClTunedParametersFile().c_str(), error.what());
261 }
262 }
263
264 NotifyCallbackAndCheck(cb, ErrorStatus::NONE, preparedModel.release());
265
266 return ErrorStatus::NONE;
267}
268
269Return<DeviceStatus> ArmnnDriverImpl::getStatus()
270{
271 ALOGV("V1_0::ArmnnDriverImpl::getStatus()");
272
273 return DeviceStatus::AVAILABLE;
274}
275
276} // armnn_driver::namespace V1_0
277} // namespace armnn_driver