blob: c54ee354db1b822ee13d94f5714740ee934799ab [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
Mike Kellye2d611e2021-10-14 12:35:58 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Mike Kellyb5fdf382019-06-11 16:35:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "ArmnnPreparedModel_1_2.hpp"
Finn Williamsd8fb5402021-05-19 20:52:00 +01009
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "Utils.hpp"
11
Narumol Prangnawaratd1a947f2022-02-07 13:12:24 +000012#include <armnn/Types.hpp>
13
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include <log/log.h>
15#include <OperationsUtils.h>
16#include <ExecutionBurstServer.h>
17#include <ValidateHal.h>
18
Colm Donelan0fc16c62022-03-16 11:54:13 +000019#include <chrono>
Mike Kellyb5fdf382019-06-11 16:35:25 +010020#include <cinttypes>
21
Sadik Armagan188675f2021-02-12 17:16:42 +000022#ifdef ARMNN_ANDROID_S
23#include <LegacyUtils.h>
24#endif
25
Mike Kellyb5fdf382019-06-11 16:35:25 +010026using namespace android;
27using namespace android::hardware;
28
Mike Kellyb5fdf382019-06-11 16:35:25 +010029namespace {
30
Sadik Armagan188675f2021-02-12 17:16:42 +000031static const V1_2::Timing g_NoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
Mike Kellyb5fdf382019-06-11 16:35:25 +010032using namespace armnn_driver;
Mike Kelly44381512019-07-08 17:37:35 +010033using TimePoint = std::chrono::steady_clock::time_point;
34
35TimePoint Now()
36{
37 return std::chrono::steady_clock::now();
38}
39
40unsigned long MicrosecondsDuration(TimePoint endPoint, TimePoint startPoint)
41{
42 return static_cast<unsigned long>(std::chrono::duration_cast<std::chrono::microseconds>(
43 endPoint - startPoint).count());
44}
Mike Kellyb5fdf382019-06-11 16:35:25 +010045
Mike Kelly65c42dc2019-07-22 14:06:00 +010046void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback,
Kevin Mayec1e5b82020-02-26 17:00:39 +000047 V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +000048 std::vector<V1_2::OutputShape>,
49 const V1_2::Timing,
Mike Kellyb5fdf382019-06-11 16:35:25 +010050 std::string callingFunction)
51{
52 Return<void> returned = callback->notify(errorStatus);
53 // This check is required, if the callback fails and it isn't checked it will bring down the service
54 if (!returned.isOk())
55 {
56 ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s",
57 callingFunction.c_str(), returned.description().c_str());
58 }
59}
60
Mike Kelly65c42dc2019-07-22 14:06:00 +010061void NotifyCallbackAndCheck(const ::android::sp<V1_2::IExecutionCallback>& callback,
Kevin Mayec1e5b82020-02-26 17:00:39 +000062 V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +000063 std::vector<V1_2::OutputShape> outputShapes,
64 const V1_2::Timing timing,
Mike Kellyb5fdf382019-06-11 16:35:25 +010065 std::string callingFunction)
66{
Mike Kelly65c42dc2019-07-22 14:06:00 +010067 Return<void> returned = callback->notify_1_2(errorStatus, outputShapes, timing);
Mike Kellyb5fdf382019-06-11 16:35:25 +010068 // This check is required, if the callback fails and it isn't checked it will bring down the service
69 if (!returned.isOk())
70 {
71 ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s",
72 callingFunction.c_str(), returned.description().c_str());
73 }
74}
75
Sadik Armagan188675f2021-02-12 17:16:42 +000076bool ValidateRequestArgument(const V1_0::RequestArgument& requestArg, const armnn::TensorInfo& tensorInfo)
Mike Kellyb5fdf382019-06-11 16:35:25 +010077{
78 if (requestArg.dimensions.size() != 0)
79 {
80 if (requestArg.dimensions.size() != tensorInfo.GetNumDimensions())
81 {
82 ALOGE("Mismatched dimensions (request argument: %zu, expected: %u)",
83 requestArg.dimensions.size(), tensorInfo.GetNumDimensions());
84 return false;
85 }
86
87 for (unsigned int d = 0; d < tensorInfo.GetNumDimensions(); ++d)
88 {
Finn Williamsa4983ce2020-07-23 12:55:12 +010089 if (requestArg.dimensions[d] != 0 && requestArg.dimensions[d] != tensorInfo.GetShape()[d])
Mike Kellyb5fdf382019-06-11 16:35:25 +010090 {
91 ALOGE("Mismatched size for dimension %d (request argument: %u, expected %u)",
92 d, requestArg.dimensions[d], tensorInfo.GetShape()[d]);
93 return false;
94 }
95 }
96 }
97
98 return true;
99}
100
Sadik Armagan188675f2021-02-12 17:16:42 +0000101armnn::Tensor GetTensorForRequestArgument(const V1_0::RequestArgument& requestArg,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100102 const armnn::TensorInfo& tensorInfo,
103 const std::vector<::android::nn::RunTimePoolInfo>& requestPools)
104{
105 if (!ValidateRequestArgument(requestArg, tensorInfo))
106 {
107 return armnn::Tensor();
108 }
109
110 return armnn::Tensor(tensorInfo, GetMemoryFromPool(requestArg.location, requestPools));
111}
112
113inline std::string BuildTensorName(const char* tensorNamePrefix, std::size_t index)
114{
115 return tensorNamePrefix + std::to_string(index);
116}
117
118} // anonymous namespace
119
120using namespace android::hardware;
121
122namespace armnn_driver
123{
124
125template<typename HalVersion>
Derek Lamberti4de83c52020-03-17 13:40:18 +0000126RequestThread<ArmnnPreparedModel_1_2, HalVersion, CallbackContext_1_2>
Mike Kelly65c42dc2019-07-22 14:06:00 +0100127 ArmnnPreparedModel_1_2<HalVersion>::m_RequestThread;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100128
129template<typename HalVersion>
Finn Williamsfdf2eae2021-07-08 13:07:19 +0100130std::unique_ptr<armnn::Threadpool> ArmnnPreparedModel_1_2<HalVersion>::m_Threadpool(nullptr);
131
132template<typename HalVersion>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100133template<typename TensorBindingCollection>
134void ArmnnPreparedModel_1_2<HalVersion>::DumpTensorsIfRequired(char const* tensorNamePrefix,
135 const TensorBindingCollection& tensorBindings)
136{
137 if (!m_RequestInputsAndOutputsDumpDir.empty())
138 {
Colm Donelan08d9a1c2020-09-09 17:56:55 +0100139 const std::string requestName = std::to_string(m_NetworkId) + "_" + std::to_string(m_RequestCount) + ".dump";
Mike Kellyb5fdf382019-06-11 16:35:25 +0100140 for (std::size_t i = 0u; i < tensorBindings.size(); ++i)
141 {
142 DumpTensor(m_RequestInputsAndOutputsDumpDir,
143 requestName,
144 BuildTensorName(tensorNamePrefix, i),
145 tensorBindings[i].second);
146 }
147 }
148}
149
150template<typename HalVersion>
151ArmnnPreparedModel_1_2<HalVersion>::ArmnnPreparedModel_1_2(armnn::NetworkId networkId,
152 armnn::IRuntime* runtime,
153 const V1_2::Model& model,
154 const std::string& requestInputsAndOutputsDumpDir,
Finn Williamsd8fb5402021-05-19 20:52:00 +0100155 const bool gpuProfilingEnabled,
Finn Williamsca3a3e02021-06-11 15:04:02 +0100156 const bool asyncModelExecutionEnabled,
Narumol Prangnawaratd1a947f2022-02-07 13:12:24 +0000157 const unsigned int numberOfThreads,
158 const bool importEnabled,
159 const bool exportEnabled)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100160 : m_NetworkId(networkId)
161 , m_Runtime(runtime)
162 , m_Model(model)
163 , m_RequestCount(0)
164 , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
165 , m_GpuProfilingEnabled(gpuProfilingEnabled)
Finn Williamsd8fb5402021-05-19 20:52:00 +0100166 , m_AsyncModelExecutionEnabled(asyncModelExecutionEnabled)
Narumol Prangnawaratd1a947f2022-02-07 13:12:24 +0000167 , m_EnableImport(importEnabled)
168 , m_EnableExport(exportEnabled)
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100169 , m_PreparedFromCache(false)
170{
171 // Enable profiling if required.
172 m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled);
173
174 if (m_AsyncModelExecutionEnabled)
175 {
176 std::vector<std::shared_ptr<armnn::IWorkingMemHandle>> memHandles;
177 for (unsigned int i=0; i < numberOfThreads; ++i)
178 {
179 memHandles.emplace_back(m_Runtime->CreateWorkingMemHandle(networkId));
180 }
181
182 if (!m_Threadpool)
183 {
184 m_Threadpool = std::make_unique<armnn::Threadpool>(numberOfThreads, runtime, memHandles);
185 }
186 else
187 {
188 m_Threadpool->LoadMemHandles(memHandles);
189 }
190
191 m_WorkingMemHandle = memHandles.back();
192 }
193}
194
195template<typename HalVersion>
196ArmnnPreparedModel_1_2<HalVersion>::ArmnnPreparedModel_1_2(armnn::NetworkId networkId,
197 armnn::IRuntime* runtime,
198 const std::string& requestInputsAndOutputsDumpDir,
199 const bool gpuProfilingEnabled,
200 const bool asyncModelExecutionEnabled,
201 const unsigned int numberOfThreads,
Narumol Prangnawaratd1a947f2022-02-07 13:12:24 +0000202 const bool importEnabled,
203 const bool exportEnabled,
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100204 const bool preparedFromCache)
205 : m_NetworkId(networkId)
206 , m_Runtime(runtime)
207 , m_RequestCount(0)
208 , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
209 , m_GpuProfilingEnabled(gpuProfilingEnabled)
210 , m_AsyncModelExecutionEnabled(asyncModelExecutionEnabled)
Narumol Prangnawaratd1a947f2022-02-07 13:12:24 +0000211 , m_EnableImport(importEnabled)
212 , m_EnableExport(exportEnabled)
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100213 , m_PreparedFromCache(preparedFromCache)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100214{
215 // Enable profiling if required.
216 m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled);
Finn Williamsd8fb5402021-05-19 20:52:00 +0100217
Finn Williamsfdf2eae2021-07-08 13:07:19 +0100218 if (m_AsyncModelExecutionEnabled)
Finn Williamsd8fb5402021-05-19 20:52:00 +0100219 {
Finn Williamsca3a3e02021-06-11 15:04:02 +0100220 std::vector<std::shared_ptr<armnn::IWorkingMemHandle>> memHandles;
Finn Williamsd27c13b2021-06-25 10:06:09 +0100221 for (unsigned int i=0; i < numberOfThreads; ++i)
Finn Williamsca3a3e02021-06-11 15:04:02 +0100222 {
223 memHandles.emplace_back(m_Runtime->CreateWorkingMemHandle(networkId));
224 }
225
Finn Williamsfdf2eae2021-07-08 13:07:19 +0100226 if (!m_Threadpool)
227 {
228 m_Threadpool = std::make_unique<armnn::Threadpool>(numberOfThreads, runtime, memHandles);
229 }
230 else
231 {
232 m_Threadpool->LoadMemHandles(memHandles);
233 }
234
Finn Williamsca3a3e02021-06-11 15:04:02 +0100235 m_WorkingMemHandle = memHandles.back();
Finn Williamsd8fb5402021-05-19 20:52:00 +0100236 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100237}
238
239template<typename HalVersion>
240ArmnnPreparedModel_1_2<HalVersion>::~ArmnnPreparedModel_1_2()
241{
242 // Get a hold of the profiler used by this model.
243 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
Colm Donelan2048b682022-02-15 14:59:08 +0000244 if (profiler && m_GpuProfilingEnabled)
245 {
246 // Dump the profiling info to a file if required.
247 DumpJsonProfilingIfRequired(m_GpuProfilingEnabled, m_RequestInputsAndOutputsDumpDir, m_NetworkId,
248 profiler.get());
249 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100250
251 // Unload the network associated with this model.
252 m_Runtime->UnloadNetwork(m_NetworkId);
253
Finn Williamsfdf2eae2021-07-08 13:07:19 +0100254 // Unload the network memhandles from the threadpool
255 if (m_AsyncModelExecutionEnabled)
256 {
257 m_Threadpool->UnloadMemHandles(m_NetworkId);
258 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100259}
260
261template<typename HalVersion>
Kevin Mayec1e5b82020-02-26 17:00:39 +0000262Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute(const V1_0::Request& request,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100263 const ::android::sp<V1_0::IExecutionCallback>& callback)
264{
Mike Kelly65c42dc2019-07-22 14:06:00 +0100265 if (callback.get() == nullptr)
266 {
267 ALOGE("ArmnnPreparedModel_1_2::execute invalid callback passed");
Kevin Mayec1e5b82020-02-26 17:00:39 +0000268 return V1_0::ErrorStatus::INVALID_ARGUMENT;
Mike Kelly65c42dc2019-07-22 14:06:00 +0100269 }
270
Kevin Mayec1e5b82020-02-26 17:00:39 +0000271 auto cb = [callback](V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +0000272 std::vector<V1_2::OutputShape> outputShapes,
273 const V1_2::Timing& timing,
Mike Kelly65c42dc2019-07-22 14:06:00 +0100274 std::string callingFunction)
275 {
276 NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction);
277 };
278
Sadik Armagan188675f2021-02-12 17:16:42 +0000279 return Execute(request, V1_2::MeasureTiming::NO, cb);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100280}
281
282template<typename HalVersion>
Kevin Mayec1e5b82020-02-26 17:00:39 +0000283Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute_1_2(
284 const V1_0::Request& request,
Sadik Armagan188675f2021-02-12 17:16:42 +0000285 V1_2::MeasureTiming measureTiming,
Kevin Mayec1e5b82020-02-26 17:00:39 +0000286 const sp<V1_2::IExecutionCallback>& callback)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100287{
Mike Kelly65c42dc2019-07-22 14:06:00 +0100288 if (callback.get() == nullptr)
289 {
290 ALOGE("ArmnnPreparedModel_1_2::execute_1_2 invalid callback passed");
Kevin Mayec1e5b82020-02-26 17:00:39 +0000291 return V1_0::ErrorStatus::INVALID_ARGUMENT;
Mike Kelly65c42dc2019-07-22 14:06:00 +0100292 }
293
Kevin Mayec1e5b82020-02-26 17:00:39 +0000294 auto cb = [callback](V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +0000295 std::vector<V1_2::OutputShape> outputShapes,
296 const V1_2::Timing& timing,
Mike Kelly65c42dc2019-07-22 14:06:00 +0100297 std::string callingFunction)
298 {
299 NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction);
300 };
301
302 return Execute(request, measureTiming, cb);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100303}
304
Derek Lamberti4de83c52020-03-17 13:40:18 +0000305template<typename HalVersion>
306Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForInputs(
307 armnn::InputTensors& inputs,
308 const V1_0::Request& request,
309 const std::vector<android::nn::RunTimePoolInfo>& memPools)
310{
311 inputs.reserve(request.inputs.size());
312 for (unsigned int i = 0; i < request.inputs.size(); i++)
313 {
314 const auto& inputArg = request.inputs[i];
315
Cathal Corbette27d4e82021-10-28 12:28:35 +0100316 armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
317 // inputs (of type InputTensors) is composed of a vector of ConstTensors.
318 // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
319 inputTensorInfo.SetConstant();
Derek Lamberti4de83c52020-03-17 13:40:18 +0000320 const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
321
322 if (inputTensor.GetMemoryArea() == nullptr)
323 {
324 ALOGE("Cannot execute request. Error converting request input %u to tensor", i);
325 return V1_0::ErrorStatus::GENERAL_FAILURE;
326 }
327
328 inputs.emplace_back(i, inputTensor);
329 }
330
331 return V1_0::ErrorStatus::NONE;
332}
333
334template<typename HalVersion>
335Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForOutputs(
336 armnn::OutputTensors& outputs,
Sadik Armagan188675f2021-02-12 17:16:42 +0000337 std::vector<V1_2::OutputShape> &outputShapes,
Derek Lamberti4de83c52020-03-17 13:40:18 +0000338 const V1_0::Request& request,
339 const std::vector<android::nn::RunTimePoolInfo>& memPools)
340{
341 outputs.reserve(request.outputs.size());
342 for (unsigned int i = 0; i < request.outputs.size(); i++)
343 {
344 const auto& outputArg = request.outputs[i];
345
346 const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
347 const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
348 if (outputTensor.GetMemoryArea() == nullptr)
349 {
350 ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
351 return V1_0::ErrorStatus::GENERAL_FAILURE;
352 }
353
354 const size_t outputSize = outputTensorInfo.GetNumBytes();
Finn Williamsa4983ce2020-07-23 12:55:12 +0100355
356 if (outputArg.location.length < outputSize)
357 {
358 ALOGW("ArmnnPreparedModel_1_2::Execute failed: outputArg.location.length < outputSize");
359 return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
360 }
361
Sadik Armagan188675f2021-02-12 17:16:42 +0000362#if !defined(ARMNN_ANDROID_S)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000363 const size_t bufferSize = memPools.at(outputArg.location.poolIndex).getHidlMemory().size();
364 if (bufferSize < outputSize)
365 {
Finn Williamsa4983ce2020-07-23 12:55:12 +0100366 ALOGW("ArmnnPreparedModel_1_2::Execute failed: bufferSize < outputSize");
Derek Lamberti4de83c52020-03-17 13:40:18 +0000367 return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
368 }
Sadik Armagan188675f2021-02-12 17:16:42 +0000369#else
Kevin Maydc873f62021-06-14 11:21:11 +0100370 const size_t bufferSize = memPools.at(outputArg.location.poolIndex).getSize();
Sadik Armagan188675f2021-02-12 17:16:42 +0000371 if (bufferSize < outputSize)
372 {
373 ALOGW("ArmnnPreparedModel_1_2::Execute failed bufferSize (%s) < outputSize (%s)",
374 std::to_string(bufferSize).c_str(), std::to_string(outputSize).c_str());
375 outputShapes[i].isSufficient = false;
376 return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
377 }
378#endif
Derek Lamberti4de83c52020-03-17 13:40:18 +0000379 outputs.emplace_back(i, outputTensor);
380 outputShapes[i] = ComputeShape(outputTensorInfo);
381 }
382
383 return V1_0::ErrorStatus::NONE;
384}
385
386template<typename HalVersion>
387Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForIO(
388 armnn::InputTensors& inputs,
389 armnn::OutputTensors& outputs,
390 std::vector<android::nn::RunTimePoolInfo>& memPools,
391 const V1_0::Request& request,
392 CallbackAsync_1_2 callback)
393{
Sadik Armagan188675f2021-02-12 17:16:42 +0000394#if !defined(ARMNN_ANDROID_S)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000395 if (!setRunTimePoolInfosFromHidlMemories(&memPools, request.pools))
Sadik Armagan188675f2021-02-12 17:16:42 +0000396#else
397 if (!setRunTimePoolInfosFromCanonicalMemories(&memPools, uncheckedConvert(request.pools)))
398#endif
Derek Lamberti4de83c52020-03-17 13:40:18 +0000399 {
400 callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
401 return V1_0::ErrorStatus::GENERAL_FAILURE;
402 }
Derek Lamberti4de83c52020-03-17 13:40:18 +0000403 // add the inputs and outputs with their data
404 try
405 {
406 if (PrepareMemoryForInputs(inputs, request, memPools) != V1_0::ErrorStatus::NONE)
407 {
408 callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
409 return V1_0::ErrorStatus::GENERAL_FAILURE;
410 }
411
Sadik Armagan188675f2021-02-12 17:16:42 +0000412 std::vector<V1_2::OutputShape> outputShapes(request.outputs.size());
Derek Lamberti4de83c52020-03-17 13:40:18 +0000413
414 auto errorStatus = PrepareMemoryForOutputs(outputs, outputShapes, request, memPools);
415 if (errorStatus != V1_0::ErrorStatus::NONE)
416 {
417 callback(errorStatus,
418 outputShapes,
419 g_NoTiming,
420 "ArmnnPreparedModel_1_2::Execute");
421 return errorStatus;
422 }
423 }
424 catch (armnn::Exception& e)
425 {
426 ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what());
427 callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
428 return V1_0::ErrorStatus::GENERAL_FAILURE;
429 }
430 catch (std::exception& e)
431 {
432 ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what());
433 callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
434 return V1_0::ErrorStatus::GENERAL_FAILURE;
435 }
436
437 return V1_0::ErrorStatus::NONE;
438}
439
Mike Kellyb5fdf382019-06-11 16:35:25 +0100440template<typename HalVersion>
Kevin Mayec1e5b82020-02-26 17:00:39 +0000441Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const V1_0::Request& request,
Sadik Armagan188675f2021-02-12 17:16:42 +0000442 V1_2::MeasureTiming measureTiming,
Mike Kelly44381512019-07-08 17:37:35 +0100443 executeSynchronously_cb cb)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100444{
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100445 if (!m_PreparedFromCache)
446 {
447 ALOGV("ArmnnPreparedModel_1_2::executeSynchronously(): %s", GetModelSummary(m_Model).c_str());
448 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100449 m_RequestCount++;
450
451 if (cb == nullptr)
452 {
453 ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid callback passed");
454 return Void();
455 }
456
Derek Lamberti4de83c52020-03-17 13:40:18 +0000457 TimePoint driverStart;
Mike Kelly44381512019-07-08 17:37:35 +0100458
Sadik Armagan188675f2021-02-12 17:16:42 +0000459 if (measureTiming == V1_2::MeasureTiming::YES)
Mike Kelly44381512019-07-08 17:37:35 +0100460 {
461 driverStart = Now();
462 }
463
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100464 if (!m_PreparedFromCache && !android::nn::validateRequest(request, m_Model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100465 {
Mike Kelly44381512019-07-08 17:37:35 +0100466 ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid request model");
Kevin Mayec1e5b82020-02-26 17:00:39 +0000467 cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100468 return Void();
469 }
470
Derek Lamberti4de83c52020-03-17 13:40:18 +0000471 auto cbWrapper = [cb](V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +0000472 std::vector<V1_2::OutputShape> outputShapes,
473 const V1_2::Timing& timing,
Derek Lamberti4de83c52020-03-17 13:40:18 +0000474 std::string)
475 {
476 cb(errorStatus, outputShapes, timing);
477 };
Mike Kellyb5fdf382019-06-11 16:35:25 +0100478
479 // map the memory pool into shared pointers
480 // use a shared memory pools vector on the heap, as it is passed to the request thread
Derek Lamberti4de83c52020-03-17 13:40:18 +0000481 auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
Mike Kellyb5fdf382019-06-11 16:35:25 +0100482
Derek Lamberti4de83c52020-03-17 13:40:18 +0000483 // allocate the tensors on the heap, as they are passed to the request thread
484 auto inputs = std::make_shared<armnn::InputTensors>();
485 auto outputs = std::make_shared<armnn::OutputTensors>();
486
487 auto prepareStatus = PrepareMemoryForIO(*inputs, *outputs, *memPools, request, cbWrapper);
488 if (prepareStatus != V1_0::ErrorStatus::NONE)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100489 {
Kevin May7bdaac52020-02-10 12:10:07 +0000490 return Void();
491 }
492
Mike Kellyb5fdf382019-06-11 16:35:25 +0100493 ALOGV("ArmnnPreparedModel_1_2::executeSynchronously() before Execution");
494
Derek Lamberti4de83c52020-03-17 13:40:18 +0000495 CallbackContext_1_2 cbCtx;
496 cbCtx.callback = cbWrapper;
497 cbCtx.ctx.measureTimings = measureTiming;
498 cbCtx.ctx.driverStart = driverStart;
499 ExecuteGraph(memPools, *inputs, *outputs, cbCtx);
500
501 return Void();
502}
503
504template<typename HalVersion>
505template<typename CallbackContext>
506bool ArmnnPreparedModel_1_2<HalVersion>::ExecuteGraph(
507 std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
508 armnn::InputTensors& inputTensors,
509 armnn::OutputTensors& outputTensors,
510 CallbackContext cb)
511{
512 ALOGV("ArmnnPreparedModel_1_2::ExecuteGraph(...)");
513
514 TimePoint driverEnd, deviceStart, deviceEnd;
Colm Donelan0fc16c62022-03-16 11:54:13 +0000515 // Capture the graph execution start time.
516 std::chrono::time_point<std::chrono::system_clock> graphExecutionStart = std::chrono::system_clock::now();
Derek Lamberti4de83c52020-03-17 13:40:18 +0000517
518 DumpTensorsIfRequired("Input", inputTensors);
519
Sadik Armagan188675f2021-02-12 17:16:42 +0000520 std::vector<V1_2::OutputShape> outputShapes(outputTensors.size());
Derek Lamberti4de83c52020-03-17 13:40:18 +0000521 for (unsigned int i = 0; i < outputTensors.size(); i++)
522 {
523 std::pair<int, armnn::Tensor> outputTensorPair = outputTensors[i];
524 const armnn::Tensor outputTensor = outputTensorPair.second;
525 const armnn::TensorInfo outputTensorInfo = outputTensor.GetInfo();
526
527 outputShapes[i] = ComputeShape(outputTensorInfo);
528 }
529
Mike Kellyb5fdf382019-06-11 16:35:25 +0100530 // run it
531 try
532 {
Sadik Armagan188675f2021-02-12 17:16:42 +0000533 if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
Mike Kelly44381512019-07-08 17:37:35 +0100534 {
535 deviceStart = Now();
536 }
537
Finn Williamsd8fb5402021-05-19 20:52:00 +0100538 armnn::Status status;
539 if (m_AsyncModelExecutionEnabled)
540 {
541 ALOGW("ArmnnPreparedModel_1_2::ExecuteGraph m_AsyncModelExecutionEnabled true");
542 status = m_Runtime->Execute(*m_WorkingMemHandle, inputTensors, outputTensors);
543 }
544 else
545 {
546 ALOGW("ArmnnPreparedModel_1_2::ExecuteGraph m_AsyncModelExecutionEnabled false");
Narumol Prangnawaratd1a947f2022-02-07 13:12:24 +0000547
548 // Create a vector of Input and Output Ids which can be imported. An empty vector means all will be copied.
549 std::vector<armnn::ImportedInputId> importedInputIds;
550 if (m_EnableImport)
551 {
552 importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc);
553 }
554 std::vector<armnn::ImportedOutputId> importedOutputIds;
555 if (m_EnableExport)
556 {
557 importedOutputIds = m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc);
558 }
559 status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors,
560 importedInputIds, importedOutputIds);
Finn Williamsd8fb5402021-05-19 20:52:00 +0100561 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100562
Sadik Armagan188675f2021-02-12 17:16:42 +0000563 if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
Mike Kelly44381512019-07-08 17:37:35 +0100564 {
565 deviceEnd = Now();
566 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100567 if (status != armnn::Status::Success)
568 {
569 ALOGW("EnqueueWorkload failed");
Derek Lamberti4de83c52020-03-17 13:40:18 +0000570 cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming,
571 "ArmnnPreparedModel_1_2::ExecuteGraph");
572 return false;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100573 }
574 }
Kevin May7bdaac52020-02-10 12:10:07 +0000575 catch (armnn::Exception& e)
576 {
Derek Lamberti4de83c52020-03-17 13:40:18 +0000577 ALOGW("armnn:Exception caught from EnqueueWorkload: %s", e.what());
578 cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
579 return false;
Kevin May7bdaac52020-02-10 12:10:07 +0000580 }
Derek Lambertib9cb8442019-11-28 13:34:48 +0000581 catch (std::exception& e)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100582 {
Kevin May7bdaac52020-02-10 12:10:07 +0000583 ALOGE("std::exception caught from EnqueueWorkload: %s", e.what());
Derek Lamberti4de83c52020-03-17 13:40:18 +0000584 cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
585 return false;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100586 }
587
Derek Lamberti4de83c52020-03-17 13:40:18 +0000588 CommitPools(*pMemPools);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100589
Derek Lamberti4de83c52020-03-17 13:40:18 +0000590 DumpTensorsIfRequired("Output", outputTensors);
Kevin Mayec1e5b82020-02-26 17:00:39 +0000591
Sadik Armagan188675f2021-02-12 17:16:42 +0000592 if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
Mike Kelly44381512019-07-08 17:37:35 +0100593 {
594 driverEnd = Now();
Sadik Armagan188675f2021-02-12 17:16:42 +0000595 V1_2::Timing timing;
Mike Kelly44381512019-07-08 17:37:35 +0100596 timing.timeOnDevice = MicrosecondsDuration(deviceEnd, deviceStart);
Derek Lamberti4de83c52020-03-17 13:40:18 +0000597 timing.timeInDriver = MicrosecondsDuration(driverEnd, cb.ctx.driverStart);
Zingo Andersen7c561492022-01-25 11:09:41 +0100598 ALOGV("ArmnnPreparedModel_1_2::execute timing - Device = %lu Driver = %lu",
599 static_cast<unsigned long>(timing.timeOnDevice), static_cast<unsigned long>(timing.timeInDriver));
Derek Lamberti4de83c52020-03-17 13:40:18 +0000600 cb.callback(V1_0::ErrorStatus::NONE, outputShapes, timing, "ArmnnPreparedModel_1_2::ExecuteGraph");
601 } else {
602 cb.callback(V1_0::ErrorStatus::NONE, outputShapes, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
Mike Kelly44381512019-07-08 17:37:35 +0100603 }
Derek Lamberti4de83c52020-03-17 13:40:18 +0000604
Colm Donelan0fc16c62022-03-16 11:54:13 +0000605 // Log the total time in this call. This is a good number to compare to that printed out by
606 // RuntimeImpl::EnqueueWorkload. The difference should be the execution overhead of the driver.
607 ALOGI("ArmnnPreparedModel_1_2::ExecuteGraph Execution time = %lld µs",
608 std::chrono::duration_cast<std::chrono::microseconds>
609 (std::chrono::system_clock::now() - graphExecutionStart).count());
Derek Lamberti4de83c52020-03-17 13:40:18 +0000610 return true;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100611}
612
Derek Lamberti4de83c52020-03-17 13:40:18 +0000613template<typename HalVersion>
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100614bool ArmnnPreparedModel_1_2<HalVersion>::ExecuteWithDummyInputs(unsigned int numInputs, unsigned int numOutputs)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000615{
616 std::vector<std::vector<char>> storage;
617 armnn::InputTensors inputTensors;
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100618 for (unsigned int i = 0; i < numInputs; i++)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000619 {
Cathal Corbette27d4e82021-10-28 12:28:35 +0100620 armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
621 // pInputTensors (of type InputTensors) is composed of a vector of ConstTensors.
622 // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
623 inputTensorInfo.SetConstant();
624
Derek Lamberti4de83c52020-03-17 13:40:18 +0000625 storage.emplace_back(inputTensorInfo.GetNumBytes());
626 const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data());
627
628 inputTensors.emplace_back(i, inputTensor);
629 }
630
631 armnn::OutputTensors outputTensors;
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100632 for (unsigned int i = 0; i < numOutputs; i++)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000633 {
634 const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
635 storage.emplace_back(outputTensorInfo.GetNumBytes());
636 const armnn::Tensor outputTensor(outputTensorInfo, storage.back().data());
637
638 outputTensors.emplace_back(i, outputTensor);
639 }
640
Sadik Armagan188675f2021-02-12 17:16:42 +0000641 auto nullCallback = [](V1_0::ErrorStatus, std::vector<V1_2::OutputShape>, const V1_2::Timing&, std::string) {};
Derek Lamberti4de83c52020-03-17 13:40:18 +0000642 CallbackContext_1_2 callbackContext;
643 callbackContext.callback = nullCallback;
Sadik Armagan188675f2021-02-12 17:16:42 +0000644 callbackContext.ctx.measureTimings = V1_2::MeasureTiming::NO;
Derek Lamberti4de83c52020-03-17 13:40:18 +0000645 auto memPools = std::make_shared<std::vector<::android::nn::RunTimePoolInfo>>();
646 return ExecuteGraph(memPools,
647 inputTensors,
648 outputTensors,
649 callbackContext);
650}
651
652template<typename HalVersion>
653Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::Execute(const V1_0::Request& request,
Sadik Armagan188675f2021-02-12 17:16:42 +0000654 V1_2::MeasureTiming measureTiming,
Derek Lamberti4de83c52020-03-17 13:40:18 +0000655 CallbackAsync_1_2 callback)
656{
657 ExecutionContext_1_2 ctx;
Sadik Armagan188675f2021-02-12 17:16:42 +0000658 if (measureTiming == V1_2::MeasureTiming::YES)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000659 {
660 ctx.measureTimings = measureTiming;
661 ctx.driverStart = Now();
662 }
663
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100664 if (!m_PreparedFromCache)
665 {
666 ALOGV("ArmnnPreparedModel_1_2::execute(): %s", GetModelSummary(m_Model).c_str());
667 }
Derek Lamberti4de83c52020-03-17 13:40:18 +0000668 m_RequestCount++;
669
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100670 if (!m_PreparedFromCache && !android::nn::validateRequest(request, m_Model))
Derek Lamberti4de83c52020-03-17 13:40:18 +0000671 {
672 callback(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
673 return V1_0::ErrorStatus::INVALID_ARGUMENT;
674 }
675
676 if (!m_RequestInputsAndOutputsDumpDir.empty())
677 {
678 ALOGD("Dumping inputs and outputs for request %" PRIuPTR, reinterpret_cast<std::uintptr_t>(&callback));
679 }
680
681 // map the memory pool into shared pointers
682 // use a shared memory pools vector on the heap, as it is passed to the request thread
683 auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
684
685 // allocate the tensors on the heap, as they are passed to the request thread
686 auto inputTensors = std::make_shared<armnn::InputTensors>();
687 auto outputTensors = std::make_shared<armnn::OutputTensors>();
688
689 auto prepareStatus = PrepareMemoryForIO(*inputTensors, *outputTensors, *memPools, request, callback);
690 switch(prepareStatus)
691 {
692 case V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
693 return V1_0::ErrorStatus::NONE;
694 case V1_0::ErrorStatus::GENERAL_FAILURE:
695 return V1_0::ErrorStatus::GENERAL_FAILURE;
696 default:
697 {}
698 }
699
Derek Lamberti4de83c52020-03-17 13:40:18 +0000700
701 // post the request for asynchronous execution
702 CallbackContext_1_2 cb;
703 cb.callback = callback;
704 cb.ctx = ctx;
Finn Williamsd8fb5402021-05-19 20:52:00 +0100705
706 if (m_AsyncModelExecutionEnabled)
707 {
708 ALOGV("ArmnnPreparedModel_1_2::execute(...) before ScheduleGraphForExecution");
709 ScheduleGraphForExecution(memPools, inputTensors, outputTensors, cb);
710 ALOGV("ArmnnPreparedModel_1_2::execute(...) after ScheduleGraphForExecution");
711 return V1_0::ErrorStatus::NONE;
712 }
713
714 ALOGV("ArmnnPreparedModel_1_2::execute(...) before PostMsg");
Derek Lamberti4de83c52020-03-17 13:40:18 +0000715 m_RequestThread.PostMsg(this, memPools, inputTensors, outputTensors, cb);
716 ALOGV("ArmnnPreparedModel_1_2::execute(...) after PostMsg");
717 return V1_0::ErrorStatus::NONE;
718}
719
Mike Kellyb5fdf382019-06-11 16:35:25 +0100720template<typename HalVersion>
721Return<void> ArmnnPreparedModel_1_2<HalVersion>::configureExecutionBurst(
Derek Lamberti4de83c52020-03-17 13:40:18 +0000722 const sp<V1_2::IBurstCallback>& callback,
723 const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
724 const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
725 V1_2::IPreparedModel::configureExecutionBurst_cb cb)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100726{
727 ALOGV("ArmnnPreparedModel_1_2::configureExecutionBurst");
Mike Kelly65c42dc2019-07-22 14:06:00 +0100728 const sp<V1_2::IBurstContext> burst = ExecutionBurstServer::create(callback,
729 requestChannel,
730 resultChannel,
Kevin May42477c12020-03-26 13:34:14 +0000731 this);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100732
Mike Kelly44381512019-07-08 17:37:35 +0100733 if (burst == nullptr)
734 {
Kevin Mayec1e5b82020-02-26 17:00:39 +0000735 cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
Mike Kelly44381512019-07-08 17:37:35 +0100736 }
737 else
738 {
Kevin Mayec1e5b82020-02-26 17:00:39 +0000739 cb(V1_0::ErrorStatus::NONE, burst);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100740 }
741 return Void();
742}
743
Finn Williamsd8fb5402021-05-19 20:52:00 +0100744/// Schedule the graph prepared from the request for execution
745template<typename HalVersion>
746template<typename CallbackContext>
747void ArmnnPreparedModel_1_2<HalVersion>::ScheduleGraphForExecution(
748 std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
749 std::shared_ptr<armnn::InputTensors>& inputTensors,
750 std::shared_ptr<armnn::OutputTensors>& outputTensors,
751 CallbackContext callbackContext)
752{
753 ALOGV("ArmnnPreparedModel_1_2::ScheduleGraphForExecution(...)");
754
755 DumpTensorsIfRequired("Input", *inputTensors);
756
757 unsigned int outputTensorSize = outputTensors.get()->size();
758 std::vector<V1_2::OutputShape> outputShapes(outputTensorSize);
759 for (unsigned int i = 0; i < outputTensorSize; i++)
760 {
761 std::pair<int, armnn::Tensor> outputTensorPair = outputTensors.get()->at(i);
762 const armnn::Tensor outputTensor = outputTensorPair.second;
763 const armnn::TensorInfo outputTensorInfo = outputTensor.GetInfo();
764
765 outputShapes[i] = ComputeShape(outputTensorInfo);
766 }
767
768 auto tpCb = std::make_shared<
769 ArmnnThreadPoolCallback_1_2<CallbackContext_1_2>>(this,
770 pMemPools,
771 outputShapes,
772 inputTensors,
773 outputTensors,
774 callbackContext);
775
Finn Williamsca3a3e02021-06-11 15:04:02 +0100776 m_Threadpool->Schedule(m_NetworkId,
777 *tpCb->m_InputTensors,
778 *tpCb->m_OutputTensors,
779 armnn::QosExecPriority::Medium,
780 tpCb);
Finn Williamsd8fb5402021-05-19 20:52:00 +0100781 ALOGV("ArmnnPreparedModel_1_2::ScheduleGraphForExecution end");
782}
783
784template<typename HalVersion>
785template <typename CallbackContext>
786void ArmnnPreparedModel_1_2<HalVersion>::ArmnnThreadPoolCallback_1_2<CallbackContext>::Notify(
787 armnn::Status status, armnn::InferenceTimingPair timeTaken)
788{
789 ALOGV("ArmnnPreparedModel_1_2::ArmnnThreadPoolCallback_1_2 Notify");
790
791 TimePoint driverEnd;
792
793 CommitPools(*m_MemPools);
794
795 m_Model->DumpTensorsIfRequired("Output", *m_OutputTensors);
796
797 if (status != armnn::Status::Success)
798 {
799 ALOGW("ArmnnThreadPoolCallback::Notify EnqueueWorkload failed");
800 m_CallbackContext.callback(
801 V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel::ExecuteGraph");
802 return;
803 }
804
805 if (m_CallbackContext.ctx.measureTimings == V1_2::MeasureTiming::YES)
806 {
807 driverEnd = std::chrono::steady_clock::now();
808 V1_2::Timing timing;
809 timing.timeOnDevice = MicrosecondsDuration(timeTaken.second, timeTaken.first);
810 timing.timeInDriver = MicrosecondsDuration(driverEnd, m_CallbackContext.ctx.driverStart);
Zingo Andersen7c561492022-01-25 11:09:41 +0100811 ALOGV("ArmnnPreparedModel_1_2::execute timing - Device = %lu Driver = %lu",
812 static_cast<unsigned long>(timing.timeOnDevice), static_cast<unsigned long>(timing.timeInDriver));
Finn Williamsd8fb5402021-05-19 20:52:00 +0100813 m_CallbackContext.callback(
814 V1_0::ErrorStatus::NONE, m_OutputShapes, timing, "ArmnnPreparedModel_1_2::ExecuteGraph");
815 } else {
816 m_CallbackContext.callback(
817 V1_0::ErrorStatus::NONE, m_OutputShapes, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
818 }
819 return;
820}
821
Kevin May42477c12020-03-26 13:34:14 +0000822#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100823template class ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>;
Derek Lamberti4de83c52020-03-17 13:40:18 +0000824template bool ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>::ExecuteGraph<CallbackContext_1_2>(
825 std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
826 armnn::InputTensors& pInputTensors,
827 armnn::OutputTensors& pOutputTensors,
828 CallbackContext_1_2 cb);
Finn Williamsd8fb5402021-05-19 20:52:00 +0100829
830template void ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>::ScheduleGraphForExecution<CallbackContext_1_2>(
831 std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
832 std::shared_ptr<armnn::InputTensors>& inputTensors,
833 std::shared_ptr<armnn::OutputTensors>& outputTensors,
834 CallbackContext_1_2 callbackContext);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100835#endif
836
837} // namespace armnn_driver