Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
| 3 | // SPDX-License-Identifier: MIT |
| 4 | // |
| 5 | |
| 6 | #define LOG_TAG "ArmnnDriver" |
| 7 | |
| 8 | #include "ArmnnPreparedModel_1_2.hpp" |
| 9 | #include "Utils.hpp" |
| 10 | |
| 11 | #include <boost/format.hpp> |
| 12 | #include <log/log.h> |
| 13 | #include <OperationsUtils.h> |
| 14 | #include <ExecutionBurstServer.h> |
| 15 | #include <ValidateHal.h> |
| 16 | |
| 17 | #include <cassert> |
| 18 | #include <cinttypes> |
| 19 | |
| 20 | using namespace android; |
| 21 | using namespace android::hardware; |
| 22 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 23 | namespace { |
| 24 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 25 | static const Timing g_NoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX}; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 26 | using namespace armnn_driver; |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 27 | using TimePoint = std::chrono::steady_clock::time_point; |
| 28 | |
| 29 | TimePoint Now() |
| 30 | { |
| 31 | return std::chrono::steady_clock::now(); |
| 32 | } |
| 33 | |
| 34 | unsigned long MicrosecondsDuration(TimePoint endPoint, TimePoint startPoint) |
| 35 | { |
| 36 | return static_cast<unsigned long>(std::chrono::duration_cast<std::chrono::microseconds>( |
| 37 | endPoint - startPoint).count()); |
| 38 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 39 | |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 40 | void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback, |
| 41 | ErrorStatus errorStatus, |
| 42 | std::vector<OutputShape>, |
| 43 | const Timing, |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 44 | std::string callingFunction) |
| 45 | { |
| 46 | Return<void> returned = callback->notify(errorStatus); |
| 47 | // This check is required, if the callback fails and it isn't checked it will bring down the service |
| 48 | if (!returned.isOk()) |
| 49 | { |
| 50 | ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s", |
| 51 | callingFunction.c_str(), returned.description().c_str()); |
| 52 | } |
| 53 | } |
| 54 | |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 55 | void NotifyCallbackAndCheck(const ::android::sp<V1_2::IExecutionCallback>& callback, |
| 56 | ErrorStatus errorStatus, |
| 57 | std::vector<OutputShape> outputShapes, |
| 58 | const Timing timing, |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 59 | std::string callingFunction) |
| 60 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 61 | Return<void> returned = callback->notify_1_2(errorStatus, outputShapes, timing); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 62 | // This check is required, if the callback fails and it isn't checked it will bring down the service |
| 63 | if (!returned.isOk()) |
| 64 | { |
| 65 | ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s", |
| 66 | callingFunction.c_str(), returned.description().c_str()); |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | bool ValidateRequestArgument(const RequestArgument& requestArg, const armnn::TensorInfo& tensorInfo) |
| 71 | { |
| 72 | if (requestArg.dimensions.size() != 0) |
| 73 | { |
| 74 | if (requestArg.dimensions.size() != tensorInfo.GetNumDimensions()) |
| 75 | { |
| 76 | ALOGE("Mismatched dimensions (request argument: %zu, expected: %u)", |
| 77 | requestArg.dimensions.size(), tensorInfo.GetNumDimensions()); |
| 78 | return false; |
| 79 | } |
| 80 | |
| 81 | for (unsigned int d = 0; d < tensorInfo.GetNumDimensions(); ++d) |
| 82 | { |
| 83 | if (requestArg.dimensions[d] != tensorInfo.GetShape()[d]) |
| 84 | { |
| 85 | ALOGE("Mismatched size for dimension %d (request argument: %u, expected %u)", |
| 86 | d, requestArg.dimensions[d], tensorInfo.GetShape()[d]); |
| 87 | return false; |
| 88 | } |
| 89 | } |
| 90 | } |
| 91 | |
| 92 | return true; |
| 93 | } |
| 94 | |
| 95 | armnn::Tensor GetTensorForRequestArgument(const RequestArgument& requestArg, |
| 96 | const armnn::TensorInfo& tensorInfo, |
| 97 | const std::vector<::android::nn::RunTimePoolInfo>& requestPools) |
| 98 | { |
| 99 | if (!ValidateRequestArgument(requestArg, tensorInfo)) |
| 100 | { |
| 101 | return armnn::Tensor(); |
| 102 | } |
| 103 | |
| 104 | return armnn::Tensor(tensorInfo, GetMemoryFromPool(requestArg.location, requestPools)); |
| 105 | } |
| 106 | |
| 107 | inline std::string BuildTensorName(const char* tensorNamePrefix, std::size_t index) |
| 108 | { |
| 109 | return tensorNamePrefix + std::to_string(index); |
| 110 | } |
| 111 | |
| 112 | } // anonymous namespace |
| 113 | |
| 114 | using namespace android::hardware; |
| 115 | |
| 116 | namespace armnn_driver |
| 117 | { |
| 118 | |
| 119 | template<typename HalVersion> |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 120 | RequestThread<ArmnnPreparedModel_1_2, HalVersion, ArmnnCallback_1_2> |
| 121 | ArmnnPreparedModel_1_2<HalVersion>::m_RequestThread; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 122 | |
| 123 | template<typename HalVersion> |
| 124 | template<typename TensorBindingCollection> |
| 125 | void ArmnnPreparedModel_1_2<HalVersion>::DumpTensorsIfRequired(char const* tensorNamePrefix, |
| 126 | const TensorBindingCollection& tensorBindings) |
| 127 | { |
| 128 | if (!m_RequestInputsAndOutputsDumpDir.empty()) |
| 129 | { |
| 130 | const std::string requestName = boost::str(boost::format("%1%_%2%.dump") % m_NetworkId % m_RequestCount); |
| 131 | for (std::size_t i = 0u; i < tensorBindings.size(); ++i) |
| 132 | { |
| 133 | DumpTensor(m_RequestInputsAndOutputsDumpDir, |
| 134 | requestName, |
| 135 | BuildTensorName(tensorNamePrefix, i), |
| 136 | tensorBindings[i].second); |
| 137 | } |
| 138 | } |
| 139 | } |
| 140 | |
| 141 | template<typename HalVersion> |
| 142 | ArmnnPreparedModel_1_2<HalVersion>::ArmnnPreparedModel_1_2(armnn::NetworkId networkId, |
| 143 | armnn::IRuntime* runtime, |
| 144 | const V1_2::Model& model, |
| 145 | const std::string& requestInputsAndOutputsDumpDir, |
| 146 | const bool gpuProfilingEnabled) |
| 147 | : m_NetworkId(networkId) |
| 148 | , m_Runtime(runtime) |
| 149 | , m_Model(model) |
| 150 | , m_RequestCount(0) |
| 151 | , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir) |
| 152 | , m_GpuProfilingEnabled(gpuProfilingEnabled) |
| 153 | { |
| 154 | // Enable profiling if required. |
| 155 | m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled); |
| 156 | } |
| 157 | |
| 158 | template<typename HalVersion> |
| 159 | ArmnnPreparedModel_1_2<HalVersion>::~ArmnnPreparedModel_1_2() |
| 160 | { |
| 161 | // Get a hold of the profiler used by this model. |
| 162 | std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId); |
| 163 | |
| 164 | // Unload the network associated with this model. |
| 165 | m_Runtime->UnloadNetwork(m_NetworkId); |
| 166 | |
| 167 | // Dump the profiling info to a file if required. |
| 168 | DumpJsonProfilingIfRequired(m_GpuProfilingEnabled, m_RequestInputsAndOutputsDumpDir, m_NetworkId, profiler.get()); |
| 169 | } |
| 170 | |
| 171 | template<typename HalVersion> |
| 172 | Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute(const Request& request, |
| 173 | const ::android::sp<V1_0::IExecutionCallback>& callback) |
| 174 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 175 | if (callback.get() == nullptr) |
| 176 | { |
| 177 | ALOGE("ArmnnPreparedModel_1_2::execute invalid callback passed"); |
| 178 | return ErrorStatus::INVALID_ARGUMENT; |
| 179 | } |
| 180 | |
| 181 | auto cb = [callback](ErrorStatus errorStatus, |
| 182 | std::vector<OutputShape> outputShapes, |
| 183 | const Timing& timing, |
| 184 | std::string callingFunction) |
| 185 | { |
| 186 | NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction); |
| 187 | }; |
| 188 | |
| 189 | return Execute(request, MeasureTiming::NO, cb); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 190 | } |
| 191 | |
| 192 | template<typename HalVersion> |
| 193 | Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute_1_2(const Request& request, |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 194 | MeasureTiming measureTiming, |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 195 | const sp<V1_2::IExecutionCallback>& callback) |
| 196 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 197 | if (callback.get() == nullptr) |
| 198 | { |
| 199 | ALOGE("ArmnnPreparedModel_1_2::execute_1_2 invalid callback passed"); |
| 200 | return ErrorStatus::INVALID_ARGUMENT; |
| 201 | } |
| 202 | |
| 203 | auto cb = [callback](ErrorStatus errorStatus, |
| 204 | std::vector<OutputShape> outputShapes, |
| 205 | const Timing& timing, |
| 206 | std::string callingFunction) |
| 207 | { |
| 208 | NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction); |
| 209 | }; |
| 210 | |
| 211 | return Execute(request, measureTiming, cb); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 212 | } |
| 213 | |
| 214 | template<typename HalVersion> |
| 215 | Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const Request& request, |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 216 | MeasureTiming measureTiming, |
| 217 | executeSynchronously_cb cb) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 218 | { |
| 219 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously(): %s", GetModelSummary(m_Model).c_str()); |
| 220 | m_RequestCount++; |
| 221 | |
| 222 | if (cb == nullptr) |
| 223 | { |
| 224 | ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid callback passed"); |
| 225 | return Void(); |
| 226 | } |
| 227 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 228 | TimePoint driverStart, driverEnd, deviceStart, deviceEnd; |
| 229 | |
| 230 | if (measureTiming == MeasureTiming::YES) |
| 231 | { |
| 232 | driverStart = Now(); |
| 233 | } |
| 234 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 235 | if (!android::nn::validateRequest(request, m_Model)) |
| 236 | { |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 237 | ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid request model"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 238 | cb(ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming); |
| 239 | return Void(); |
| 240 | } |
| 241 | |
| 242 | // allocate the tensors on the heap, as they are passed to the request thread |
| 243 | auto pInputTensors = std::make_shared<armnn::InputTensors>(); |
| 244 | auto pOutputTensors = std::make_shared<armnn::OutputTensors>(); |
| 245 | |
| 246 | // map the memory pool into shared pointers |
| 247 | // use a shared memory pools vector on the heap, as it is passed to the request thread |
| 248 | auto pMemPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>(); |
| 249 | |
| 250 | if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools)) |
| 251 | { |
| 252 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 253 | return Void(); |
| 254 | } |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 255 | std::vector<OutputShape> outputShapes(request.outputs.size()); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 256 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 257 | try |
| 258 | { |
| 259 | pInputTensors->reserve(request.inputs.size()); |
| 260 | for (unsigned int i = 0; i < request.inputs.size(); i++) |
| 261 | { |
| 262 | const auto& inputArg = request.inputs[i]; |
| 263 | |
| 264 | const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); |
| 265 | const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, *pMemPools); |
| 266 | |
| 267 | if (inputTensor.GetMemoryArea() == nullptr) |
| 268 | { |
| 269 | ALOGE("Cannot execute request. Error converting request input %u to tensor", i); |
| 270 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 271 | return Void(); |
| 272 | } |
| 273 | |
| 274 | pInputTensors->emplace_back(i, inputTensor); |
| 275 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 276 | pOutputTensors->reserve(request.outputs.size()); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 277 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 278 | for (unsigned int i = 0; i < request.outputs.size(); i++) |
| 279 | { |
| 280 | const auto& outputArg = request.outputs[i]; |
| 281 | |
| 282 | const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); |
| 283 | const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, *pMemPools); |
| 284 | |
| 285 | if (outputTensor.GetMemoryArea() == nullptr) |
| 286 | { |
| 287 | ALOGE("Cannot execute request. Error converting request output %u to tensor", i); |
| 288 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 289 | return Void(); |
| 290 | } |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 291 | const size_t outputSize = outputTensorInfo.GetNumBytes(); |
| 292 | const size_t bufferSize = pMemPools->at(outputArg.location.poolIndex).getHidlMemory().size(); |
| 293 | |
| 294 | hidl_vec<uint32_t> dimensions; |
| 295 | |
| 296 | armnn::TensorShape tensorShape = outputTensorInfo.GetShape(); |
| 297 | const unsigned int numDims = tensorShape.GetNumDimensions(); |
| 298 | dimensions.resize(numDims); |
| 299 | |
| 300 | for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx) |
| 301 | { |
| 302 | dimensions[outputIdx] = tensorShape[outputIdx]; |
| 303 | } |
| 304 | outputShapes[i].dimensions = dimensions; |
| 305 | outputShapes[i].isSufficient = bufferSize >= outputSize; |
| 306 | |
| 307 | if (bufferSize < outputSize) |
| 308 | { |
| 309 | ALOGW("ArmnnPreparedModel_1_2::Execute failed"); |
| 310 | cb(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, outputShapes, g_NoTiming); |
| 311 | return Void(); |
| 312 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 313 | |
| 314 | pOutputTensors->emplace_back(i, outputTensor); |
| 315 | } |
| 316 | } |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 317 | catch (std::exception& e) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 318 | { |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 319 | ALOGW("Exception caught while preparing for EnqueueWorkload: %s", e.what()); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 320 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 321 | return Void(); |
| 322 | } |
| 323 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously() before Execution"); |
| 324 | |
| 325 | DumpTensorsIfRequired("Input", *pInputTensors); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 326 | // run it |
| 327 | try |
| 328 | { |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 329 | if (measureTiming == MeasureTiming::YES) |
| 330 | { |
| 331 | deviceStart = Now(); |
| 332 | } |
| 333 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 334 | armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, *pInputTensors, *pOutputTensors); |
| 335 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 336 | if (measureTiming == MeasureTiming::YES) |
| 337 | { |
| 338 | deviceEnd = Now(); |
| 339 | } |
| 340 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 341 | if (status != armnn::Status::Success) |
| 342 | { |
| 343 | ALOGW("EnqueueWorkload failed"); |
| 344 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 345 | return Void(); |
| 346 | } |
| 347 | } |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 348 | catch (std::exception& e) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 349 | { |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 350 | ALOGW("Exception caught from EnqueueWorkload: %s", e.what()); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 351 | cb(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming); |
| 352 | return Void(); |
| 353 | } |
| 354 | |
| 355 | DumpTensorsIfRequired("Output", *pOutputTensors); |
| 356 | |
| 357 | // Commit output buffers. |
| 358 | // Note that we update *all* pools, even if they aren't actually used as outputs - |
| 359 | // this is simpler and is what the CpuExecutor does. |
| 360 | for (android::nn::RunTimePoolInfo& pool : *pMemPools) |
| 361 | { |
| 362 | pool.update(); |
| 363 | } |
| 364 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously() after Execution"); |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 365 | |
| 366 | if (measureTiming == MeasureTiming::YES) |
| 367 | { |
| 368 | driverEnd = Now(); |
| 369 | Timing timing; |
| 370 | timing.timeOnDevice = MicrosecondsDuration(deviceEnd, deviceStart); |
| 371 | timing.timeInDriver = MicrosecondsDuration(driverEnd, driverStart); |
| 372 | ALOGV("ArmnnPreparedModel_1_2::executeSynchronously timing Device = %lu Driver = %lu", timing.timeOnDevice, |
| 373 | timing.timeInDriver); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 374 | cb(ErrorStatus::NONE, outputShapes, timing); |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 375 | } |
| 376 | else |
| 377 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 378 | cb(ErrorStatus::NONE, outputShapes, g_NoTiming); |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 379 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 380 | return Void(); |
| 381 | } |
| 382 | |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 383 | class ArmnnBurstExecutorWithCache : public ExecutionBurstServer::IBurstExecutorWithCache { |
| 384 | public: |
| 385 | ArmnnBurstExecutorWithCache(IPreparedModel* preparedModel) |
| 386 | : m_PreparedModel(preparedModel) |
| 387 | {} |
| 388 | |
| 389 | bool isCacheEntryPresent(int32_t slot) const override |
| 390 | { |
| 391 | const auto it = m_MemoryCache.find(slot); |
| 392 | return (it != m_MemoryCache.end()) && it->second.valid(); |
| 393 | } |
| 394 | |
| 395 | void addCacheEntry(const hidl_memory& memory, int32_t slot) override |
| 396 | { |
| 397 | m_MemoryCache[slot] = memory; |
| 398 | } |
| 399 | |
| 400 | void removeCacheEntry(int32_t slot) override |
| 401 | { |
| 402 | m_MemoryCache.erase(slot); |
| 403 | } |
| 404 | |
| 405 | std::tuple<ErrorStatus, hidl_vec<OutputShape>, Timing> execute( |
| 406 | const Request& request, const std::vector<int32_t>& slots, |
| 407 | MeasureTiming measure) override |
| 408 | { |
| 409 | ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache::execute"); |
| 410 | hidl_vec<hidl_memory> pools(slots.size()); |
| 411 | |
| 412 | std::transform(slots.begin(), slots.end(), pools.begin(), [this](int32_t slot) |
| 413 | { |
| 414 | return m_MemoryCache[slot]; |
| 415 | }); |
| 416 | |
| 417 | Request fullRequest = request; |
| 418 | fullRequest.pools = std::move(pools); |
| 419 | |
| 420 | // Setup Callback |
| 421 | ErrorStatus returnedStatus = ErrorStatus::GENERAL_FAILURE; |
| 422 | hidl_vec<OutputShape> returnedOutputShapes; |
| 423 | Timing returnedTiming; |
| 424 | auto cb = [&returnedStatus, &returnedOutputShapes, &returnedTiming](ErrorStatus status, |
| 425 | const hidl_vec<OutputShape>& outputShapes, |
| 426 | const Timing& timing) |
| 427 | { |
| 428 | returnedStatus = status; |
| 429 | returnedOutputShapes = outputShapes; |
| 430 | returnedTiming = timing; |
| 431 | }; |
| 432 | |
| 433 | // Execute |
| 434 | ALOGV("ArmnnPreparedModel_1_2::BurstExecutorWithCache executing"); |
| 435 | const Return<void> ret = m_PreparedModel->executeSynchronously(fullRequest, measure, cb); |
| 436 | |
| 437 | if (!ret.isOk() || returnedStatus != ErrorStatus::NONE) |
| 438 | { |
| 439 | ALOGE("ArmnnPreparedModel_1_2::BurstExecutorWithCache::error executing"); |
| 440 | } |
| 441 | return std::make_tuple(returnedStatus, std::move(returnedOutputShapes), returnedTiming); |
| 442 | } |
| 443 | |
| 444 | private: |
| 445 | IPreparedModel* const m_PreparedModel; |
| 446 | std::map<int, hidl_memory> m_MemoryCache; |
| 447 | }; |
| 448 | |
| 449 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 450 | template<typename HalVersion> |
| 451 | Return<void> ArmnnPreparedModel_1_2<HalVersion>::configureExecutionBurst( |
| 452 | const sp<V1_2::IBurstCallback>& callback, |
| 453 | const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel, |
| 454 | const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel, |
| 455 | V1_2::IPreparedModel::configureExecutionBurst_cb cb) |
| 456 | { |
| 457 | ALOGV("ArmnnPreparedModel_1_2::configureExecutionBurst"); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 458 | const std::shared_ptr<ArmnnBurstExecutorWithCache> executorWithCache = |
| 459 | std::make_shared<ArmnnBurstExecutorWithCache>(this); |
| 460 | const sp<V1_2::IBurstContext> burst = ExecutionBurstServer::create(callback, |
| 461 | requestChannel, |
| 462 | resultChannel, |
| 463 | executorWithCache); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 464 | |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 465 | if (burst == nullptr) |
| 466 | { |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 467 | cb(ErrorStatus::GENERAL_FAILURE, {}); |
Mike Kelly | 4438151 | 2019-07-08 17:37:35 +0100 | [diff] [blame] | 468 | } |
| 469 | else |
| 470 | { |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 471 | cb(ErrorStatus::NONE, burst); |
| 472 | } |
| 473 | return Void(); |
| 474 | } |
| 475 | |
| 476 | template<typename HalVersion> |
| 477 | void ArmnnPreparedModel_1_2<HalVersion>::ExecuteGraph( |
| 478 | std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools, |
| 479 | std::shared_ptr<armnn::InputTensors>& pInputTensors, |
| 480 | std::shared_ptr<armnn::OutputTensors>& pOutputTensors, |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 481 | ArmnnCallback_1_2 cb) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 482 | { |
| 483 | ALOGV("ArmnnPreparedModel_1_2::ExecuteGraph(...)"); |
| 484 | |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 485 | TimePoint driverEnd, deviceStart, deviceEnd; |
| 486 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 487 | DumpTensorsIfRequired("Input", *pInputTensors); |
| 488 | |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 489 | std::vector<std::pair<int, armnn::Tensor> > outputTensors = *pOutputTensors.get(); |
| 490 | std::vector<OutputShape> outputShapes(outputTensors.size()); |
| 491 | |
| 492 | for (unsigned int i = 0; i < outputTensors.size(); i++) |
| 493 | { |
| 494 | std::pair<int, armnn::Tensor> outputTensorPair = outputTensors[i]; |
| 495 | const armnn::Tensor outputTensor = outputTensorPair.second; |
| 496 | const armnn::TensorInfo outputTensorInfo = outputTensor.GetInfo(); |
| 497 | |
| 498 | hidl_vec<uint32_t> dimensions; |
| 499 | |
| 500 | armnn::TensorShape tensorShape = outputTensorInfo.GetShape(); |
| 501 | const unsigned int numDims = tensorShape.GetNumDimensions(); |
| 502 | dimensions.resize(numDims); |
| 503 | |
| 504 | for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx) |
| 505 | { |
| 506 | dimensions[outputIdx] = tensorShape[outputIdx]; |
| 507 | } |
| 508 | outputShapes[i].dimensions = dimensions; |
| 509 | outputShapes[i].isSufficient = true; |
| 510 | } |
| 511 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 512 | // run it |
| 513 | try |
| 514 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 515 | if (cb.measureTiming == MeasureTiming::YES) |
| 516 | { |
| 517 | deviceStart = Now(); |
| 518 | } |
| 519 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 520 | armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, *pInputTensors, *pOutputTensors); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 521 | |
| 522 | if (cb.measureTiming == MeasureTiming::YES) |
| 523 | { |
| 524 | deviceEnd = Now(); |
| 525 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 526 | if (status != armnn::Status::Success) |
| 527 | { |
| 528 | ALOGW("EnqueueWorkload failed"); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 529 | cb.callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, |
| 530 | "ArmnnPreparedModel_1_2::ExecuteGraph"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 531 | return; |
| 532 | } |
| 533 | } |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 534 | catch (std::exception& e) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 535 | { |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 536 | ALOGW("Exception caught from EnqueueWorkload: %s", e.what()); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 537 | cb.callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, |
| 538 | "ArmnnPreparedModel_1_2::ExecuteGraph"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 539 | return; |
| 540 | } |
| 541 | |
| 542 | DumpTensorsIfRequired("Output", *pOutputTensors); |
| 543 | |
| 544 | // Commit output buffers. |
| 545 | // Note that we update *all* pools, even if they aren't actually used as outputs - |
| 546 | // this is simpler and is what the CpuExecutor does. |
| 547 | for (android::nn::RunTimePoolInfo& pool : *pMemPools) |
| 548 | { |
| 549 | pool.update(); |
| 550 | } |
| 551 | |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 552 | if (cb.measureTiming == MeasureTiming::YES) |
| 553 | { |
| 554 | driverEnd = Now(); |
| 555 | Timing timing; |
| 556 | timing.timeOnDevice = MicrosecondsDuration(deviceEnd, deviceStart); |
| 557 | timing.timeInDriver = MicrosecondsDuration(driverEnd, cb.driverStart); |
| 558 | cb.callback(ErrorStatus::NONE, outputShapes, timing, "ExecuteGraph"); |
| 559 | } else { |
| 560 | cb.callback(ErrorStatus::NONE, outputShapes, g_NoTiming, "ExecuteGraph"); |
| 561 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 562 | } |
| 563 | |
| 564 | template<typename HalVersion> |
| 565 | bool ArmnnPreparedModel_1_2<HalVersion>::ExecuteWithDummyInputs() |
| 566 | { |
| 567 | std::vector<std::vector<char>> storage; |
| 568 | armnn::InputTensors inputTensors; |
| 569 | for (unsigned int i = 0; i < m_Model.inputIndexes.size(); i++) |
| 570 | { |
| 571 | const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); |
| 572 | storage.emplace_back(inputTensorInfo.GetNumBytes()); |
| 573 | const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data()); |
| 574 | |
| 575 | inputTensors.emplace_back(i, inputTensor); |
| 576 | } |
| 577 | |
| 578 | armnn::OutputTensors outputTensors; |
| 579 | for (unsigned int i = 0; i < m_Model.outputIndexes.size(); i++) |
| 580 | { |
| 581 | const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); |
| 582 | storage.emplace_back(outputTensorInfo.GetNumBytes()); |
| 583 | const armnn::Tensor outputTensor(outputTensorInfo, storage.back().data()); |
| 584 | |
| 585 | outputTensors.emplace_back(i, outputTensor); |
| 586 | } |
| 587 | |
| 588 | try |
| 589 | { |
| 590 | armnn::Status status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors); |
| 591 | if (status != armnn::Status::Success) |
| 592 | { |
| 593 | ALOGW("ExecuteWithDummyInputs: EnqueueWorkload failed"); |
| 594 | return false; |
| 595 | } |
| 596 | } |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 597 | catch (std::exception& e) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 598 | { |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 599 | ALOGW("ExecuteWithDummyInputs: Exception caught from EnqueueWorkload: %s", e.what()); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 600 | return false; |
| 601 | } |
| 602 | return true; |
| 603 | } |
| 604 | |
| 605 | template<typename HalVersion> |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 606 | Return <ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::Execute(const Request& request, |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 607 | MeasureTiming measureTiming, |
| 608 | armnnExecuteCallback_1_2 callback) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 609 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 610 | TimePoint driverStart; |
| 611 | |
| 612 | if (measureTiming == MeasureTiming::YES) |
| 613 | { |
| 614 | driverStart = Now(); |
| 615 | } |
| 616 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 617 | ALOGV("ArmnnPreparedModel_1_2::execute(): %s", GetModelSummary(m_Model).c_str()); |
| 618 | m_RequestCount++; |
| 619 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 620 | if (!android::nn::validateRequest(request, m_Model)) |
| 621 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 622 | callback(ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 623 | return ErrorStatus::INVALID_ARGUMENT; |
| 624 | } |
| 625 | |
| 626 | if (!m_RequestInputsAndOutputsDumpDir.empty()) |
| 627 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 628 | ALOGD("Dumping inputs and outputs for request %" PRIuPTR, reinterpret_cast<std::uintptr_t>(&callback)); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 629 | } |
| 630 | |
| 631 | // allocate the tensors on the heap, as they are passed to the request thread |
| 632 | auto pInputTensors = std::make_shared<armnn::InputTensors>(); |
| 633 | auto pOutputTensors = std::make_shared<armnn::OutputTensors>(); |
| 634 | |
| 635 | // map the memory pool into shared pointers |
| 636 | // use a shared memory pools vector on the heap, as it is passed to the request thread |
| 637 | auto pMemPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>(); |
| 638 | |
| 639 | if (!setRunTimePoolInfosFromHidlMemories(pMemPools.get(), request.pools)) |
| 640 | { |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 641 | callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 642 | return ErrorStatus::GENERAL_FAILURE; |
| 643 | } |
| 644 | |
| 645 | // add the inputs and outputs with their data |
| 646 | try |
| 647 | { |
| 648 | pInputTensors->reserve(request.inputs.size()); |
| 649 | for (unsigned int i = 0; i < request.inputs.size(); i++) |
| 650 | { |
| 651 | const auto& inputArg = request.inputs[i]; |
| 652 | |
| 653 | const armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i); |
| 654 | const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, *pMemPools); |
| 655 | |
| 656 | if (inputTensor.GetMemoryArea() == nullptr) |
| 657 | { |
| 658 | ALOGE("Cannot execute request. Error converting request input %u to tensor", i); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 659 | callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 660 | return ErrorStatus::GENERAL_FAILURE; |
| 661 | } |
| 662 | |
| 663 | pInputTensors->emplace_back(i, inputTensor); |
| 664 | } |
| 665 | |
| 666 | pOutputTensors->reserve(request.outputs.size()); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 667 | std::vector<OutputShape> outputShapes(request.outputs.size()); |
| 668 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 669 | for (unsigned int i = 0; i < request.outputs.size(); i++) |
| 670 | { |
| 671 | const auto& outputArg = request.outputs[i]; |
| 672 | |
| 673 | const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i); |
| 674 | const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, *pMemPools); |
| 675 | if (outputTensor.GetMemoryArea() == nullptr) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 676 | { |
| 677 | ALOGE("Cannot execute request. Error converting request output %u to tensor", i); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 678 | callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 679 | return ErrorStatus::GENERAL_FAILURE; |
| 680 | } |
| 681 | |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 682 | const size_t outputSize = outputTensorInfo.GetNumBytes(); |
| 683 | const size_t bufferSize = pMemPools->at(outputArg.location.poolIndex).getHidlMemory().size(); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 684 | pOutputTensors->emplace_back(i, outputTensor); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 685 | |
| 686 | hidl_vec<uint32_t> dimensions; |
| 687 | |
| 688 | armnn::TensorShape tensorShape = outputTensorInfo.GetShape(); |
| 689 | const unsigned int numDims = tensorShape.GetNumDimensions(); |
| 690 | dimensions.resize(numDims); |
| 691 | |
| 692 | for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx) |
| 693 | { |
| 694 | dimensions[outputIdx] = tensorShape[outputIdx]; |
| 695 | } |
| 696 | outputShapes[i].dimensions = dimensions; |
| 697 | outputShapes[i].isSufficient = bufferSize >= outputSize; |
| 698 | |
| 699 | if (bufferSize < outputSize) |
| 700 | { |
| 701 | ALOGW("ArmnnPreparedModel_1_2::Execute failed"); |
| 702 | callback(ErrorStatus::OUTPUT_INSUFFICIENT_SIZE, |
| 703 | outputShapes, |
| 704 | g_NoTiming, |
| 705 | "ArmnnPreparedModel_1_2::Execute"); |
| 706 | return ErrorStatus::NONE; |
| 707 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 708 | } |
| 709 | } |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 710 | catch (std::exception& e) |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 711 | { |
Mike Kelly | c7d0d44 | 2019-12-11 19:27:11 +0000 | [diff] [blame] | 712 | ALOGW("Exception caught while preparing for EnqueueWorkload: %s", e.what()); |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 713 | callback(ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 714 | return ErrorStatus::GENERAL_FAILURE; |
| 715 | } |
| 716 | |
| 717 | ALOGV("ArmnnPreparedModel_1_2::execute(...) before PostMsg"); |
| 718 | // post the request for asynchronous execution |
Mike Kelly | 65c42dc | 2019-07-22 14:06:00 +0100 | [diff] [blame] | 719 | ArmnnCallback_1_2 armnnCb; |
| 720 | armnnCb.callback = callback; |
| 721 | armnnCb.measureTiming = measureTiming; |
| 722 | armnnCb.driverStart = driverStart; |
| 723 | m_RequestThread.PostMsg(this, pMemPools, pInputTensors, pOutputTensors, armnnCb); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 724 | ALOGV("ArmnnPreparedModel_1_2::execute(...) after PostMsg"); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 725 | return ErrorStatus::NONE; |
| 726 | } |
| 727 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 728 | #ifdef ARMNN_ANDROID_NN_V1_2 |
| 729 | template class ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>; |
| 730 | #endif |
| 731 | |
| 732 | } // namespace armnn_driver |