Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #define LOG_TAG "ArmnnDriver" |
| 7 | |
| 8 | #include "ArmnnPreparedModel_1_2.hpp" |
| 9 | #include "Utils.hpp" |
| 10 | |
| 11 | #include <boost/format.hpp> |
| 12 | #include <log/log.h> |
| 13 | #include <OperationsUtils.h> |
| 14 | #include <ExecutionBurstServer.h> |
| 15 | #include <ValidateHal.h> |
| 16 | |
| 17 | #include <cassert> |
| 18 | #include <cinttypes> |
| 19 | |
| 20 | using namespace android; |
| 21 | using namespace android::hardware; |
| 22 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 23 | namespace { |
| 24 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 25 | static const Timing g_NoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 26 | using namespace armnn_driver; |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 27 | using TimePoint = std::chrono::steady_clock::time_point; |
| 28 | |
| 29 | TimePoint Now() |
| 30 | { |
| 31 | return std::chrono::steady_clock::now(); |
| 32 | } |
| 33 | |
| 34 | unsigned long MicrosecondsDuration(TimePoint endPoint, TimePoint startPoint) |
| 35 | { |
| 36 | return static_cast<unsigned long>(std::chrono::duration_cast<std::chrono::microseconds>( |
| 37 | endPoint - startPoint).count()); |
| 38 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 39 | |
| 40 | void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback, ErrorStatus errorStatus, |
| 41 | std::string callingFunction) |
| 42 | { |
| 43 | Return<void> returned = callback->notify(errorStatus); |
| 44 | // This check is required, if the callback fails and it isn't checked it will bring down the service |
| 45 | if (!returned.isOk()) |
| 46 | { |
| 47 | ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s", |
| 48 | callingFunction.c_str(), returned.description().c_str()); |
| 49 | } |
| 50 | } |
| 51 | |
| 52 | void NotifyCallbackAndCheck(const ::android::sp<V1_2::IExecutionCallback>& callback, ErrorStatus errorStatus, |
| 53 | std::string callingFunction) |
| 54 | { |
| 55 | Return<void> returned = callback->notify(errorStatus); |
| 56 | // This check is required, if the callback fails and it isn't checked it will bring down the service |
| 57 | if (!returned.isOk()) |
| 58 | { |
| 59 | ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s", |
| 60 | callingFunction.c_str(), returned.description().c_str()); |
| 61 | } |
| 62 | } |
| 63 | |
| 64 | bool ValidateRequestArgument(const RequestArgument& requestArg, const armnn::TensorInfo& tensorInfo) |
| 65 | { |
| 66 | if (requestArg.dimensions.size() != 0) |
| 67 | { |
| 68 | if (requestArg.dimensions.size() != tensorInfo.GetNumDimensions()) |
| 69 | { |
| 70 | ALOGE("Mismatched dimensions (request argument: %zu, expected: %u)", |
| 71 | requestArg.dimensions.size(), tensorInfo.GetNumDimensions()); |
| 72 | return false; |
| 73 | } |
| 74 | |
| 75 | for (unsigned int d = 0; d < tensorInfo.GetNumDimensions(); ++d) |
| 76 | { |
| 77 | if (requestArg.dimensions[d] != tensorInfo.GetShape()[d]) |
| 78 | { |
| 79 | ALOGE("Mismatched size for dimension %d (request argument: %u, expected %u)", |
| 80 | d, requestArg.dimensions[d], tensorInfo.GetShape()[d]); |
| 81 | return false; |
| 82 | } |
| 83 | } |
| 84 | } |
| 85 | |
| 86 | return true; |
| 87 | } |
| 88 | |
| 89 | armnn::Tensor GetTensorForRequestArgument(const RequestArgument& requestArg, |
| 90 | const armnn::TensorInfo& tensorInfo, |
| 91 | const std::vector<::android::nn::RunTimePoolInfo>& requestPools) |
| 92 | { |
| 93 | if (!ValidateRequestArgument(requestArg, tensorInfo)) |
| 94 | { |
| 95 | return armnn::Tensor(); |
| 96 | } |
| 97 | |
| 98 | return armnn::Tensor(tensorInfo, GetMemoryFromPool(requestArg.location, requestPools)); |
| 99 | } |
| 100 | |
| 101 | inline std::string BuildTensorName(const char* tensorNamePrefix, std::size_t index) |
| 102 | { |
| 103 | return tensorNamePrefix + std::to_string(index); |
| 104 | } |
| 105 | |
| 106 | } // anonymous namespace |
| 107 | |
| 108 | using namespace android::hardware; |
| 109 | |
| 110 | namespace armnn_driver |
| 111 | { |
| 112 | |
| 113 | template<typename HalVersion> |
| 114 | RequestThread<ArmnnPreparedModel_1_2, HalVersion> ArmnnPreparedModel_1_2<HalVersion>::m_RequestThread; |
| 115 | |
| 116 | template<typename HalVersion> |
| 117 | template<typename TensorBindingCollection> |
| 118 | void ArmnnPreparedModel_1_2<HalVersion>::DumpTensorsIfRequired(char const* tensorNamePrefix, |
| 119 | const TensorBindingCollection& tensorBindings) |
| 120 | { |
| 121 | if (!m_RequestInputsAndOutputsDumpDir.empty()) |
| 122 | { |
| 123 | const std::string requestName = boost::str(boost::format("%1%_%2%.dump") % m_NetworkId % m_RequestCount); |
| 124 | for (std::size_t i = 0u; i < tensorBindings.size(); ++i) |
| 125 | { |
| 126 | DumpTensor(m_RequestInputsAndOutputsDumpDir, |
| 127 | requestName, |
| 128 | BuildTensorName(tensorNamePrefix, i), |
| 129 | tensorBindings[i].second); |
| 130 | } |
| 131 | } |
| 132 | } |
| 133 | |
| 134 | template<typename HalVersion> |
| 135 | ArmnnPreparedModel_1_2<HalVersion>::ArmnnPreparedModel_1_2(armnn::NetworkId networkId, |
| 136 | armnn::IRuntime* runtime, |
| 137 | const V1_2::Model& model, |
| 138 | const std::string& requestInputsAndOutputsDumpDir, |
| 139 | const bool gpuProfilingEnabled) |
| 140 | : m_NetworkId(networkId) |
| 141 | , m_Runtime(runtime) |
| 142 | , m_Model(model) |
| 143 | , m_RequestCount(0) |
| 144 | , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir) |
| 145 | , m_GpuProfilingEnabled(gpuProfilingEnabled) |
| 146 | { |
| 147 | // Enable profiling if required. |
| 148 | m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled); |
| 149 | } |
| 150 | |
| 151 | template<typename HalVersion> |
| 152 | ArmnnPreparedModel_1_2<HalVersion>::~ArmnnPreparedModel_1_2() |
| 153 | { |
| 154 | // Get a hold of the profiler used by this model. |
| 155 | std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId); |
| 156 | |
| 157 | // Unload the network associated with this model. |
| 158 | m_Runtime->UnloadNetwork(m_NetworkId); |
| 159 | |
| 160 | // Dump the profiling info to a file if required. |
| 161 | DumpJsonProfilingIfRequired(m_GpuProfilingEnabled, m_RequestInputsAndOutputsDumpDir, m_NetworkId, profiler.get()); |
| 162 | } |
| 163 | |
| 164 | template<typename HalVersion> |
| 165 | Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute(const Request& request, |
| 166 | const ::android::sp<V1_0::IExecutionCallback>& callback) |
| 167 | { |
| 168 | return Execute<V1_0::IExecutionCallback>(request, callback); |
| 169 | } |
| 170 | |
| 171 | template<typename HalVersion> |
| 172 | Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute_1_2(const Request& request, |
| 173 | MeasureTiming, |
| 174 | const sp<V1_2::IExecutionCallback>& callback) |
| 175 | { |
| 176 | return Execute<V1_2::IExecutionCallback>(request, callback); |
| 177 | } |
| 178 | |
| 179 | template<typename HalVersion> |
| 180 | Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const Request& request, |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 181 | MeasureTiming measureTiming, |
| 182 | executeSynchronously_cb cb) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 183 | { |
| 184 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously(): %s", GetModelSummary(m_Model).c_str()); |
| 185 | m_RequestCount++; |
| 186 | |
| 187 | if (cb == nullptr) |
| 188 | { |
| 189 | ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid callback passed"); |
| 190 | return Void(); |
| 191 | } |
| 192 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 193 | TimePoint driverStart, driverEnd, deviceStart, deviceEnd; |
| 194 | |
| 195 | if (measureTiming == MeasureTiming::YES) |
| 196 | { |
| 197 | driverStart = Now(); |
| 198 | } |
| 199 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 200 | if (!android::nn::validateRequest(request, m_Model)) |
| 201 | { |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 202 | ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid request model"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 203 | cb(ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming); |
| 204 | return Void(); |
| 205 | } |
| 206 | |
| 207 | // allocate the tensors on the heap, as they are passed to the request thread |
| 208 | auto pInputTensors = std::make_shared<armnn::InputTensors>(); |
| 209 | auto pOutputTensors = std::make_shared<armnn::OutputTensors>(); |
| 210 | |
| 211 | // map the memory pool into shared pointers |
| 212 | // use a shared memory pools vector on the heap, as it is passed to the request thread |
| 213 | auto pMemPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>(); |
| 214 | |
| 215 | if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools)) |
| 216 | { |
| 217 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 218 | return Void(); |
| 219 | } |
| 220 | |
| 221 | // add the inputs and outputs with their data |
| 222 | try |
| 223 | { |
| 224 | pInputTensors->reserve(request.inputs.size()); |
| 225 | for (unsigned int i = 0; i < request.inputs.size(); i++) |
| 226 | { |
| 227 | const auto& inputArg = request.inputs[i]; |
| 228 | |
| 229 | const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); |
| 230 | const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, *pMemPools); |
| 231 | |
| 232 | if (inputTensor.GetMemoryArea() == nullptr) |
| 233 | { |
| 234 | ALOGE("Cannot execute request. Error converting request input %u to tensor", i); |
| 235 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 236 | return Void(); |
| 237 | } |
| 238 | |
| 239 | pInputTensors->emplace_back(i, inputTensor); |
| 240 | } |
| 241 | |
| 242 | pOutputTensors->reserve(request.outputs.size()); |
| 243 | for (unsigned int i = 0; i < request.outputs.size(); i++) |
| 244 | { |
| 245 | const auto& outputArg = request.outputs[i]; |
| 246 | |
| 247 | const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); |
| 248 | const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, *pMemPools); |
| 249 | |
| 250 | if (outputTensor.GetMemoryArea() == nullptr) |
| 251 | { |
| 252 | ALOGE("Cannot execute request. Error converting request output %u to tensor", i); |
| 253 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 254 | return Void(); |
| 255 | } |
| 256 | |
| 257 | pOutputTensors->emplace_back(i, outputTensor); |
| 258 | } |
| 259 | } |
| 260 | catch (armnn::Exception& e) |
| 261 | { |
| 262 | ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what()); |
| 263 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 264 | return Void(); |
| 265 | } |
| 266 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously() before Execution"); |
| 267 | |
| 268 | DumpTensorsIfRequired("Input", *pInputTensors); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 269 | // run it |
| 270 | try |
| 271 | { |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 272 | if (measureTiming == MeasureTiming::YES) |
| 273 | { |
| 274 | deviceStart = Now(); |
| 275 | } |
| 276 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 277 | armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, *pInputTensors, *pOutputTensors); |
| 278 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 279 | if (measureTiming == MeasureTiming::YES) |
| 280 | { |
| 281 | deviceEnd = Now(); |
| 282 | } |
| 283 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 284 | if (status != armnn::Status::Success) |
| 285 | { |
| 286 | ALOGW("EnqueueWorkload failed"); |
| 287 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 288 | return Void(); |
| 289 | } |
| 290 | } |
| 291 | catch (armnn::Exception& e) |
| 292 | { |
| 293 | ALOGW("armnn::Exception caught from EnqueueWorkload: %s", e.what()); |
| 294 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 295 | return Void(); |
| 296 | } |
| 297 | |
| 298 | DumpTensorsIfRequired("Output", *pOutputTensors); |
| 299 | |
| 300 | // Commit output buffers. |
| 301 | // Note that we update *all* pools, even if they aren't actually used as outputs - |
| 302 | // this is simpler and is what the CpuExecutor does. |
| 303 | for (android::nn::RunTimePoolInfo& pool : *pMemPools) |
| 304 | { |
| 305 | pool.update(); |
| 306 | } |
| 307 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously() after Execution"); |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 308 | |
| 309 | if (measureTiming == MeasureTiming::YES) |
| 310 | { |
| 311 | driverEnd = Now(); |
| 312 | Timing timing; |
| 313 | timing.timeOnDevice = MicrosecondsDuration(deviceEnd, deviceStart); |
| 314 | timing.timeInDriver = MicrosecondsDuration(driverEnd, driverStart); |
| 315 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously timing Device = %lu Driver = %lu", timing.timeOnDevice, |
| 316 | timing.timeInDriver); |
| 317 | cb(ErrorStatus::NONE, {}, timing); |
| 318 | } |
| 319 | else |
| 320 | { |
| 321 | cb(ErrorStatus::NONE, {}, g_NoTiming); |
| 322 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 323 | return Void(); |
| 324 | } |
| 325 | |
| 326 | template<typename HalVersion> |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 327 | class ArmnnBurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCache |
| 328 | { |
| 329 | public: |
| 330 | ArmnnBurstExecutorWithCache(ArmnnPreparedModel_1_2<HalVersion>* preparedModel) |
| 331 | : m_PreparedModel(preparedModel) |
| 332 | {} |
| 333 | |
| 334 | bool isCacheEntryPresent(int slot) const override |
| 335 | { |
| 336 | const auto it = m_MemoryCache.find(slot); |
| 337 | return (it != m_MemoryCache.end()) && it->second.valid(); |
| 338 | } |
| 339 | |
| 340 | void addCacheEntry(const hidl_memory& memory, int slot) override |
| 341 | { |
| 342 | m_MemoryCache[slot] = memory; |
| 343 | } |
| 344 | |
| 345 | void removeCacheEntry(int slot) override |
| 346 | { |
| 347 | m_MemoryCache.erase(slot); |
| 348 | } |
| 349 | |
| 350 | std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> execute( |
| 351 | const Request& request, const std::vector<int>& slots, |
| 352 | MeasureTiming measure) override { |
| 353 | ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache::execute"); |
| 354 | TimePoint driverStart, driverEnd, deviceStart, deviceEnd; |
| 355 | |
| 356 | if (measure == MeasureTiming::YES) |
| 357 | { |
| 358 | driverStart = Now(); |
| 359 | } |
| 360 | hidl_vec<hidl_memory> pools(slots.size()); |
| 361 | |
| 362 | for (int slot : slots) |
| 363 | { |
| 364 | if (!isCacheEntryPresent(slot)) |
| 365 | { |
| 366 | ALOGE("ArmnnPreparedModel_1_2::BurstExecutorWithCache::no cache entry present"); |
| 367 | return std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing>(ErrorStatus::INVALID_ARGUMENT, |
| 368 | {}, |
| 369 | g_NoTiming); |
| 370 | } |
| 371 | pools[slot] = m_MemoryCache[slot]; |
| 372 | } |
| 373 | |
| 374 | Request fullRequest = request; |
| 375 | fullRequest.pools = std::move(pools); |
| 376 | |
| 377 | // Setup callback |
| 378 | ErrorStatus returnedStatus = ErrorStatus::GENERAL_FAILURE; |
| 379 | hidl_vec<OutputShape> returnedOutputShapes; |
| 380 | Timing returnedTiming; |
| 381 | |
| 382 | auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](ErrorStatus status, |
| 383 | const hidl_vec<OutputShape>& outputShapes, |
| 384 | const Timing& timing) |
| 385 | { |
| 386 | returnedStatus = status; |
| 387 | returnedOutputShapes = outputShapes; |
| 388 | returnedTiming = timing; |
| 389 | }; |
| 390 | |
| 391 | // Execute |
| 392 | ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache executing"); |
| 393 | Return<void> ret = m_PreparedModel->executeSynchronously(fullRequest, measure, cb); |
| 394 | |
| 395 | if (!ret.isOk() || returnedStatus != ErrorStatus::NONE) |
| 396 | { |
| 397 | ALOGE("ArmnnPreparedModel_1_2::BurstExecutorWithCache::error executing"); |
| 398 | return std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing>(returnedStatus, {}, returnedTiming); |
| 399 | } |
| 400 | |
| 401 | return std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing>(returnedStatus, |
| 402 | std::move(returnedOutputShapes), |
| 403 | returnedTiming); |
| 404 | } |
| 405 | |
| 406 | private: |
| 407 | Model m_Model; |
| 408 | ArmnnPreparedModel_1_2<HalVersion>* m_PreparedModel; |
| 409 | std::map<int, hidl_memory> m_MemoryCache; |
| 410 | }; |
| 411 | |
| 412 | template<typename HalVersion> |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 413 | Return<void> ArmnnPreparedModel_1_2<HalVersion>::configureExecutionBurst( |
| 414 | const sp<V1_2::IBurstCallback>& callback, |
| 415 | const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, |
| 416 | const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, |
| 417 | V1_2::IPreparedModel::configureExecutionBurst_cb cb) |
| 418 | { |
| 419 | ALOGV("ArmnnPreparedModel_1_2::configureExecutionBurst"); |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 420 | const std::shared_ptr<ArmnnBurstExecutorWithCache<HalVersion>> executorWithCache = |
| 421 | std::make_shared<ArmnnBurstExecutorWithCache<HalVersion>>(this); |
| 422 | const sp<V1_2::IBurstContext> burst = ExecutionBurstServer::create( |
| 423 | callback, requestChannel, resultChannel, executorWithCache); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 424 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 425 | if (burst == nullptr) |
| 426 | { |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 427 | cb(ErrorStatus::GENERAL_FAILURE, {}); |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 428 | } |
| 429 | else |
| 430 | { |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 431 | cb(ErrorStatus::NONE, burst); |
| 432 | } |
| 433 | return Void(); |
| 434 | } |
| 435 | |
| 436 | template<typename HalVersion> |
| 437 | void ArmnnPreparedModel_1_2<HalVersion>::ExecuteGraph( |
| 438 | std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools, |
| 439 | std::shared_ptr<armnn::InputTensors>& pInputTensors, |
| 440 | std::shared_ptr<armnn::OutputTensors>& pOutputTensors, |
| 441 | const ::android::sp<V1_0::IExecutionCallback>& callback) |
| 442 | { |
| 443 | ALOGV("ArmnnPreparedModel_1_2::ExecuteGraph(...)"); |
| 444 | |
| 445 | DumpTensorsIfRequired("Input", *pInputTensors); |
| 446 | |
| 447 | // run it |
| 448 | try |
| 449 | { |
| 450 | armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, *pInputTensors, *pOutputTensors); |
| 451 | if (status != armnn::Status::Success) |
| 452 | { |
| 453 | ALOGW("EnqueueWorkload failed"); |
| 454 | NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel_1_2::ExecuteGraph"); |
| 455 | return; |
| 456 | } |
| 457 | } |
| 458 | catch (armnn::Exception& e) |
| 459 | { |
| 460 | ALOGW("armnn::Exception caught from EnqueueWorkload: %s", e.what()); |
| 461 | NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel_1_2::ExecuteGraph"); |
| 462 | return; |
| 463 | } |
| 464 | |
| 465 | DumpTensorsIfRequired("Output", *pOutputTensors); |
| 466 | |
| 467 | // Commit output buffers. |
| 468 | // Note that we update *all* pools, even if they aren't actually used as outputs - |
| 469 | // this is simpler and is what the CpuExecutor does. |
| 470 | for (android::nn::RunTimePoolInfo& pool : *pMemPools) |
| 471 | { |
| 472 | pool.update(); |
| 473 | } |
| 474 | |
| 475 | NotifyCallbackAndCheck(callback, ErrorStatus::NONE, "ExecuteGraph"); |
| 476 | } |
| 477 | |
| 478 | template<typename HalVersion> |
| 479 | bool ArmnnPreparedModel_1_2<HalVersion>::ExecuteWithDummyInputs() |
| 480 | { |
| 481 | std::vector<std::vector<char>> storage; |
| 482 | armnn::InputTensors inputTensors; |
| 483 | for (unsigned int i = 0; i < m_Model.inputIndexes.size(); i++) |
| 484 | { |
| 485 | const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); |
| 486 | storage.emplace_back(inputTensorInfo.GetNumBytes()); |
| 487 | const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data()); |
| 488 | |
| 489 | inputTensors.emplace_back(i, inputTensor); |
| 490 | } |
| 491 | |
| 492 | armnn::OutputTensors outputTensors; |
| 493 | for (unsigned int i = 0; i < m_Model.outputIndexes.size(); i++) |
| 494 | { |
| 495 | const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); |
| 496 | storage.emplace_back(outputTensorInfo.GetNumBytes()); |
| 497 | const armnn::Tensor outputTensor(outputTensorInfo, storage.back().data()); |
| 498 | |
| 499 | outputTensors.emplace_back(i, outputTensor); |
| 500 | } |
| 501 | |
| 502 | try |
| 503 | { |
| 504 | armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); |
| 505 | if (status != armnn::Status::Success) |
| 506 | { |
| 507 | ALOGW("ExecuteWithDummyInputs: EnqueueWorkload failed"); |
| 508 | return false; |
| 509 | } |
| 510 | } |
| 511 | catch (armnn::Exception& e) |
| 512 | { |
| 513 | ALOGW("ExecuteWithDummyInputs: armnn::Exception caught from EnqueueWorkload: %s", e.what()); |
| 514 | return false; |
| 515 | } |
| 516 | return true; |
| 517 | } |
| 518 | |
| 519 | template<typename HalVersion> |
| 520 | template<typename ExecutionCallback> |
| 521 | Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::Execute(const Request& request, |
| 522 | const sp<ExecutionCallback>& callback) |
| 523 | { |
| 524 | ALOGV("ArmnnPreparedModel_1_2::execute(): %s", GetModelSummary(m_Model).c_str()); |
| 525 | m_RequestCount++; |
| 526 | |
| 527 | if (callback.get() == nullptr) |
| 528 | { |
| 529 | ALOGE("ArmnnPreparedModel_1_2::execute invalid callback passed"); |
| 530 | return ErrorStatus::INVALID_ARGUMENT; |
| 531 | } |
| 532 | |
| 533 | if (!android::nn::validateRequest(request, m_Model)) |
| 534 | { |
| 535 | NotifyCallbackAndCheck(callback, ErrorStatus::INVALID_ARGUMENT, "ArmnnPreparedModel_1_2::execute"); |
| 536 | return ErrorStatus::INVALID_ARGUMENT; |
| 537 | } |
| 538 | |
| 539 | if (!m_RequestInputsAndOutputsDumpDir.empty()) |
| 540 | { |
| 541 | ALOGD("Dumping inputs and outputs for request %" PRIuPTR, reinterpret_cast<std::uintptr_t>(callback.get())); |
| 542 | } |
| 543 | |
| 544 | // allocate the tensors on the heap, as they are passed to the request thread |
| 545 | auto pInputTensors = std::make_shared<armnn::InputTensors>(); |
| 546 | auto pOutputTensors = std::make_shared<armnn::OutputTensors>(); |
| 547 | |
| 548 | // map the memory pool into shared pointers |
| 549 | // use a shared memory pools vector on the heap, as it is passed to the request thread |
| 550 | auto pMemPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>(); |
| 551 | |
| 552 | if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools)) |
| 553 | { |
| 554 | NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel_1_2::execute"); |
| 555 | return ErrorStatus::GENERAL_FAILURE; |
| 556 | } |
| 557 | |
| 558 | // add the inputs and outputs with their data |
| 559 | try |
| 560 | { |
| 561 | pInputTensors->reserve(request.inputs.size()); |
| 562 | for (unsigned int i = 0; i < request.inputs.size(); i++) |
| 563 | { |
| 564 | const auto& inputArg = request.inputs[i]; |
| 565 | |
| 566 | const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); |
| 567 | const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, *pMemPools); |
| 568 | |
| 569 | if (inputTensor.GetMemoryArea() == nullptr) |
| 570 | { |
| 571 | ALOGE("Cannot execute request. Error converting request input %u to tensor", i); |
| 572 | NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, |
| 573 | "ArmnnPreparedModel_1_2::execute"); |
| 574 | return ErrorStatus::GENERAL_FAILURE; |
| 575 | } |
| 576 | |
| 577 | pInputTensors->emplace_back(i, inputTensor); |
| 578 | } |
| 579 | |
| 580 | pOutputTensors->reserve(request.outputs.size()); |
| 581 | for (unsigned int i = 0; i < request.outputs.size(); i++) |
| 582 | { |
| 583 | const auto& outputArg = request.outputs[i]; |
| 584 | |
| 585 | const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); |
| 586 | const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, *pMemPools); |
| 587 | if (outputTensor.GetMemoryArea() == nullptr) |
| 588 | |
| 589 | { |
| 590 | ALOGE("Cannot execute request. Error converting request output %u to tensor", i); |
| 591 | NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, |
| 592 | "ArmnnPreparedModel_1_2::execute"); |
| 593 | return ErrorStatus::GENERAL_FAILURE; |
| 594 | } |
| 595 | |
| 596 | pOutputTensors->emplace_back(i, outputTensor); |
| 597 | } |
| 598 | } |
| 599 | catch (armnn::Exception& e) |
| 600 | { |
| 601 | ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what()); |
| 602 | NotifyCallbackAndCheck(callback, ErrorStatus::GENERAL_FAILURE, "ArmnnPreparedModel_1_2::execute"); |
| 603 | return ErrorStatus::GENERAL_FAILURE; |
| 604 | } |
| 605 | |
| 606 | ALOGV("ArmnnPreparedModel_1_2::execute(...) before PostMsg"); |
| 607 | // post the request for asynchronous execution |
| 608 | m_RequestThread.PostMsg(this, pMemPools, pInputTensors, pOutputTensors, callback); |
| 609 | ALOGV("ArmnnPreparedModel_1_2::execute(...) after PostMsg"); |
| 610 | |
| 611 | return ErrorStatus::NONE; |
| 612 | } |
| 613 | |
| 614 | |
| 615 | #ifdef ARMNN_ANDROID_NN_V1_2 |
| 616 | template class ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>; |
| 617 | #endif |
| 618 | |
| 619 | } // namespace armnn_driver |