blob: 37bc3a49f0e27d5b7b79f647c1a2d752be52f7d5 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
Mike Kellye2d611e2021-10-14 12:35:58 +01002// Copyright © 2017 Arm Ltd and Contributors. All rights reserved.
Mike Kellyb5fdf382019-06-11 16:35:25 +01003// SPDX-License-Identifier: MIT
4//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "ArmnnPreparedModel_1_2.hpp"
Finn Williamsd8fb5402021-05-19 20:52:00 +01009
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "Utils.hpp"
11
Narumol Prangnawarat558a1d42022-02-07 13:12:24 +000012#include <armnn/Types.hpp>
13
Mike Kellyb5fdf382019-06-11 16:35:25 +010014#include <log/log.h>
15#include <OperationsUtils.h>
16#include <ExecutionBurstServer.h>
17#include <ValidateHal.h>
18
Mike Kellyb5fdf382019-06-11 16:35:25 +010019#include <cinttypes>
20
Sadik Armagan188675f2021-02-12 17:16:42 +000021#ifdef ARMNN_ANDROID_S
22#include <LegacyUtils.h>
23#endif
24
Mike Kellyb5fdf382019-06-11 16:35:25 +010025using namespace android;
26using namespace android::hardware;
27
Mike Kellyb5fdf382019-06-11 16:35:25 +010028namespace {
29
Sadik Armagan188675f2021-02-12 17:16:42 +000030static const V1_2::Timing g_NoTiming = {.timeOnDevice = UINT64_MAX, .timeInDriver = UINT64_MAX};
Mike Kellyb5fdf382019-06-11 16:35:25 +010031using namespace armnn_driver;
Mike Kelly44381512019-07-08 17:37:35 +010032using TimePoint = std::chrono::steady_clock::time_point;
33
34TimePoint Now()
35{
36 return std::chrono::steady_clock::now();
37}
38
39unsigned long MicrosecondsDuration(TimePoint endPoint, TimePoint startPoint)
40{
41 return static_cast<unsigned long>(std::chrono::duration_cast<std::chrono::microseconds>(
42 endPoint - startPoint).count());
43}
Mike Kellyb5fdf382019-06-11 16:35:25 +010044
Mike Kelly65c42dc2019-07-22 14:06:00 +010045void NotifyCallbackAndCheck(const ::android::sp<V1_0::IExecutionCallback>& callback,
Kevin Mayec1e5b82020-02-26 17:00:39 +000046 V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +000047 std::vector<V1_2::OutputShape>,
48 const V1_2::Timing,
Mike Kellyb5fdf382019-06-11 16:35:25 +010049 std::string callingFunction)
50{
51 Return<void> returned = callback->notify(errorStatus);
52 // This check is required, if the callback fails and it isn't checked it will bring down the service
53 if (!returned.isOk())
54 {
55 ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s",
56 callingFunction.c_str(), returned.description().c_str());
57 }
58}
59
Mike Kelly65c42dc2019-07-22 14:06:00 +010060void NotifyCallbackAndCheck(const ::android::sp<V1_2::IExecutionCallback>& callback,
Kevin Mayec1e5b82020-02-26 17:00:39 +000061 V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +000062 std::vector<V1_2::OutputShape> outputShapes,
63 const V1_2::Timing timing,
Mike Kellyb5fdf382019-06-11 16:35:25 +010064 std::string callingFunction)
65{
Mike Kelly65c42dc2019-07-22 14:06:00 +010066 Return<void> returned = callback->notify_1_2(errorStatus, outputShapes, timing);
Mike Kellyb5fdf382019-06-11 16:35:25 +010067 // This check is required, if the callback fails and it isn't checked it will bring down the service
68 if (!returned.isOk())
69 {
70 ALOGE("ArmnnDriver::%s: hidl callback failed to return properly: %s",
71 callingFunction.c_str(), returned.description().c_str());
72 }
73}
74
Sadik Armagan188675f2021-02-12 17:16:42 +000075bool ValidateRequestArgument(const V1_0::RequestArgument& requestArg, const armnn::TensorInfo& tensorInfo)
Mike Kellyb5fdf382019-06-11 16:35:25 +010076{
77 if (requestArg.dimensions.size() != 0)
78 {
79 if (requestArg.dimensions.size() != tensorInfo.GetNumDimensions())
80 {
81 ALOGE("Mismatched dimensions (request argument: %zu, expected: %u)",
82 requestArg.dimensions.size(), tensorInfo.GetNumDimensions());
83 return false;
84 }
85
86 for (unsigned int d = 0; d < tensorInfo.GetNumDimensions(); ++d)
87 {
Finn Williamsa4983ce2020-07-23 12:55:12 +010088 if (requestArg.dimensions[d] != 0 && requestArg.dimensions[d] != tensorInfo.GetShape()[d])
Mike Kellyb5fdf382019-06-11 16:35:25 +010089 {
90 ALOGE("Mismatched size for dimension %d (request argument: %u, expected %u)",
91 d, requestArg.dimensions[d], tensorInfo.GetShape()[d]);
92 return false;
93 }
94 }
95 }
96
97 return true;
98}
99
Sadik Armagan188675f2021-02-12 17:16:42 +0000100armnn::Tensor GetTensorForRequestArgument(const V1_0::RequestArgument& requestArg,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100101 const armnn::TensorInfo& tensorInfo,
102 const std::vector<::android::nn::RunTimePoolInfo>& requestPools)
103{
104 if (!ValidateRequestArgument(requestArg, tensorInfo))
105 {
106 return armnn::Tensor();
107 }
108
109 return armnn::Tensor(tensorInfo, GetMemoryFromPool(requestArg.location, requestPools));
110}
111
112inline std::string BuildTensorName(const char* tensorNamePrefix, std::size_t index)
113{
114 return tensorNamePrefix + std::to_string(index);
115}
116
117} // anonymous namespace
118
119using namespace android::hardware;
120
121namespace armnn_driver
122{
123
124template<typename HalVersion>
Derek Lamberti4de83c52020-03-17 13:40:18 +0000125RequestThread<ArmnnPreparedModel_1_2, HalVersion, CallbackContext_1_2>
Mike Kelly65c42dc2019-07-22 14:06:00 +0100126 ArmnnPreparedModel_1_2<HalVersion>::m_RequestThread;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100127
128template<typename HalVersion>
Finn Williamsfdf2eae2021-07-08 13:07:19 +0100129std::unique_ptr<armnn::Threadpool> ArmnnPreparedModel_1_2<HalVersion>::m_Threadpool(nullptr);
130
131template<typename HalVersion>
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132template<typename TensorBindingCollection>
133void ArmnnPreparedModel_1_2<HalVersion>::DumpTensorsIfRequired(char const* tensorNamePrefix,
134 const TensorBindingCollection& tensorBindings)
135{
136 if (!m_RequestInputsAndOutputsDumpDir.empty())
137 {
Colm Donelan08d9a1c2020-09-09 17:56:55 +0100138 const std::string requestName = std::to_string(m_NetworkId) + "_" + std::to_string(m_RequestCount) + ".dump";
Mike Kellyb5fdf382019-06-11 16:35:25 +0100139 for (std::size_t i = 0u; i < tensorBindings.size(); ++i)
140 {
141 DumpTensor(m_RequestInputsAndOutputsDumpDir,
142 requestName,
143 BuildTensorName(tensorNamePrefix, i),
144 tensorBindings[i].second);
145 }
146 }
147}
148
149template<typename HalVersion>
150ArmnnPreparedModel_1_2<HalVersion>::ArmnnPreparedModel_1_2(armnn::NetworkId networkId,
151 armnn::IRuntime* runtime,
152 const V1_2::Model& model,
153 const std::string& requestInputsAndOutputsDumpDir,
Finn Williamsd8fb5402021-05-19 20:52:00 +0100154 const bool gpuProfilingEnabled,
Finn Williamsca3a3e02021-06-11 15:04:02 +0100155 const bool asyncModelExecutionEnabled,
Narumol Prangnawarat558a1d42022-02-07 13:12:24 +0000156 const unsigned int numberOfThreads,
157 const bool importEnabled,
158 const bool exportEnabled)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100159 : m_NetworkId(networkId)
160 , m_Runtime(runtime)
161 , m_Model(model)
162 , m_RequestCount(0)
163 , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
164 , m_GpuProfilingEnabled(gpuProfilingEnabled)
Finn Williamsd8fb5402021-05-19 20:52:00 +0100165 , m_AsyncModelExecutionEnabled(asyncModelExecutionEnabled)
Narumol Prangnawarat558a1d42022-02-07 13:12:24 +0000166 , m_EnableImport(importEnabled)
167 , m_EnableExport(exportEnabled)
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100168 , m_PreparedFromCache(false)
169{
170 // Enable profiling if required.
171 m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled);
172
173 if (m_AsyncModelExecutionEnabled)
174 {
175 std::vector<std::shared_ptr<armnn::IWorkingMemHandle>> memHandles;
176 for (unsigned int i=0; i < numberOfThreads; ++i)
177 {
178 memHandles.emplace_back(m_Runtime->CreateWorkingMemHandle(networkId));
179 }
180
181 if (!m_Threadpool)
182 {
183 m_Threadpool = std::make_unique<armnn::Threadpool>(numberOfThreads, runtime, memHandles);
184 }
185 else
186 {
187 m_Threadpool->LoadMemHandles(memHandles);
188 }
189
190 m_WorkingMemHandle = memHandles.back();
191 }
192}
193
194template<typename HalVersion>
195ArmnnPreparedModel_1_2<HalVersion>::ArmnnPreparedModel_1_2(armnn::NetworkId networkId,
196 armnn::IRuntime* runtime,
197 const std::string& requestInputsAndOutputsDumpDir,
198 const bool gpuProfilingEnabled,
199 const bool asyncModelExecutionEnabled,
200 const unsigned int numberOfThreads,
Narumol Prangnawarat558a1d42022-02-07 13:12:24 +0000201 const bool importEnabled,
202 const bool exportEnabled,
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100203 const bool preparedFromCache)
204 : m_NetworkId(networkId)
205 , m_Runtime(runtime)
206 , m_RequestCount(0)
207 , m_RequestInputsAndOutputsDumpDir(requestInputsAndOutputsDumpDir)
208 , m_GpuProfilingEnabled(gpuProfilingEnabled)
209 , m_AsyncModelExecutionEnabled(asyncModelExecutionEnabled)
Narumol Prangnawarat558a1d42022-02-07 13:12:24 +0000210 , m_EnableImport(importEnabled)
211 , m_EnableExport(exportEnabled)
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100212 , m_PreparedFromCache(preparedFromCache)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100213{
214 // Enable profiling if required.
215 m_Runtime->GetProfiler(m_NetworkId)->EnableProfiling(m_GpuProfilingEnabled);
Finn Williamsd8fb5402021-05-19 20:52:00 +0100216
Finn Williamsfdf2eae2021-07-08 13:07:19 +0100217 if (m_AsyncModelExecutionEnabled)
Finn Williamsd8fb5402021-05-19 20:52:00 +0100218 {
Finn Williamsca3a3e02021-06-11 15:04:02 +0100219 std::vector<std::shared_ptr<armnn::IWorkingMemHandle>> memHandles;
Finn Williamsd27c13b2021-06-25 10:06:09 +0100220 for (unsigned int i=0; i < numberOfThreads; ++i)
Finn Williamsca3a3e02021-06-11 15:04:02 +0100221 {
222 memHandles.emplace_back(m_Runtime->CreateWorkingMemHandle(networkId));
223 }
224
Finn Williamsfdf2eae2021-07-08 13:07:19 +0100225 if (!m_Threadpool)
226 {
227 m_Threadpool = std::make_unique<armnn::Threadpool>(numberOfThreads, runtime, memHandles);
228 }
229 else
230 {
231 m_Threadpool->LoadMemHandles(memHandles);
232 }
233
Finn Williamsca3a3e02021-06-11 15:04:02 +0100234 m_WorkingMemHandle = memHandles.back();
Finn Williamsd8fb5402021-05-19 20:52:00 +0100235 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100236}
237
238template<typename HalVersion>
239ArmnnPreparedModel_1_2<HalVersion>::~ArmnnPreparedModel_1_2()
240{
241 // Get a hold of the profiler used by this model.
242 std::shared_ptr<armnn::IProfiler> profiler = m_Runtime->GetProfiler(m_NetworkId);
Colm Donelan12396f72022-02-15 14:59:08 +0000243 if (profiler && m_GpuProfilingEnabled)
244 {
245 // Dump the profiling info to a file if required.
246 DumpJsonProfilingIfRequired(m_GpuProfilingEnabled, m_RequestInputsAndOutputsDumpDir, m_NetworkId,
247 profiler.get());
248 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100249
250 // Unload the network associated with this model.
251 m_Runtime->UnloadNetwork(m_NetworkId);
252
Finn Williamsfdf2eae2021-07-08 13:07:19 +0100253 // Unload the network memhandles from the threadpool
254 if (m_AsyncModelExecutionEnabled)
255 {
256 m_Threadpool->UnloadMemHandles(m_NetworkId);
257 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100258}
259
260template<typename HalVersion>
Kevin Mayec1e5b82020-02-26 17:00:39 +0000261Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute(const V1_0::Request& request,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100262 const ::android::sp<V1_0::IExecutionCallback>& callback)
263{
Mike Kelly65c42dc2019-07-22 14:06:00 +0100264 if (callback.get() == nullptr)
265 {
266 ALOGE("ArmnnPreparedModel_1_2::execute invalid callback passed");
Kevin Mayec1e5b82020-02-26 17:00:39 +0000267 return V1_0::ErrorStatus::INVALID_ARGUMENT;
Mike Kelly65c42dc2019-07-22 14:06:00 +0100268 }
269
Kevin Mayec1e5b82020-02-26 17:00:39 +0000270 auto cb = [callback](V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +0000271 std::vector<V1_2::OutputShape> outputShapes,
272 const V1_2::Timing& timing,
Mike Kelly65c42dc2019-07-22 14:06:00 +0100273 std::string callingFunction)
274 {
275 NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction);
276 };
277
Sadik Armagan188675f2021-02-12 17:16:42 +0000278 return Execute(request, V1_2::MeasureTiming::NO, cb);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100279}
280
281template<typename HalVersion>
Kevin Mayec1e5b82020-02-26 17:00:39 +0000282Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::execute_1_2(
283 const V1_0::Request& request,
Sadik Armagan188675f2021-02-12 17:16:42 +0000284 V1_2::MeasureTiming measureTiming,
Kevin Mayec1e5b82020-02-26 17:00:39 +0000285 const sp<V1_2::IExecutionCallback>& callback)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100286{
Mike Kelly65c42dc2019-07-22 14:06:00 +0100287 if (callback.get() == nullptr)
288 {
289 ALOGE("ArmnnPreparedModel_1_2::execute_1_2 invalid callback passed");
Kevin Mayec1e5b82020-02-26 17:00:39 +0000290 return V1_0::ErrorStatus::INVALID_ARGUMENT;
Mike Kelly65c42dc2019-07-22 14:06:00 +0100291 }
292
Kevin Mayec1e5b82020-02-26 17:00:39 +0000293 auto cb = [callback](V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +0000294 std::vector<V1_2::OutputShape> outputShapes,
295 const V1_2::Timing& timing,
Mike Kelly65c42dc2019-07-22 14:06:00 +0100296 std::string callingFunction)
297 {
298 NotifyCallbackAndCheck(callback, errorStatus, outputShapes, timing, callingFunction);
299 };
300
301 return Execute(request, measureTiming, cb);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100302}
303
Derek Lamberti4de83c52020-03-17 13:40:18 +0000304template<typename HalVersion>
305Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForInputs(
306 armnn::InputTensors& inputs,
307 const V1_0::Request& request,
308 const std::vector<android::nn::RunTimePoolInfo>& memPools)
309{
310 inputs.reserve(request.inputs.size());
311 for (unsigned int i = 0; i < request.inputs.size(); i++)
312 {
313 const auto& inputArg = request.inputs[i];
314
Cathal Corbette27d4e82021-10-28 12:28:35 +0100315 armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
316 // inputs (of type InputTensors) is composed of a vector of ConstTensors.
317 // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
318 inputTensorInfo.SetConstant();
Derek Lamberti4de83c52020-03-17 13:40:18 +0000319 const armnn::Tensor inputTensor = GetTensorForRequestArgument(inputArg, inputTensorInfo, memPools);
320
321 if (inputTensor.GetMemoryArea() == nullptr)
322 {
323 ALOGE("Cannot execute request. Error converting request input %u to tensor", i);
324 return V1_0::ErrorStatus::GENERAL_FAILURE;
325 }
326
327 inputs.emplace_back(i, inputTensor);
328 }
329
330 return V1_0::ErrorStatus::NONE;
331}
332
333template<typename HalVersion>
334Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForOutputs(
335 armnn::OutputTensors& outputs,
Sadik Armagan188675f2021-02-12 17:16:42 +0000336 std::vector<V1_2::OutputShape> &outputShapes,
Derek Lamberti4de83c52020-03-17 13:40:18 +0000337 const V1_0::Request& request,
338 const std::vector<android::nn::RunTimePoolInfo>& memPools)
339{
340 outputs.reserve(request.outputs.size());
341 for (unsigned int i = 0; i < request.outputs.size(); i++)
342 {
343 const auto& outputArg = request.outputs[i];
344
345 const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
346 const armnn::Tensor outputTensor = GetTensorForRequestArgument(outputArg, outputTensorInfo, memPools);
347 if (outputTensor.GetMemoryArea() == nullptr)
348 {
349 ALOGE("Cannot execute request. Error converting request output %u to tensor", i);
350 return V1_0::ErrorStatus::GENERAL_FAILURE;
351 }
352
353 const size_t outputSize = outputTensorInfo.GetNumBytes();
Finn Williamsa4983ce2020-07-23 12:55:12 +0100354
355 if (outputArg.location.length < outputSize)
356 {
357 ALOGW("ArmnnPreparedModel_1_2::Execute failed: outputArg.location.length < outputSize");
358 return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
359 }
360
Sadik Armagan188675f2021-02-12 17:16:42 +0000361#if !defined(ARMNN_ANDROID_S)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000362 const size_t bufferSize = memPools.at(outputArg.location.poolIndex).getHidlMemory().size();
363 if (bufferSize < outputSize)
364 {
Finn Williamsa4983ce2020-07-23 12:55:12 +0100365 ALOGW("ArmnnPreparedModel_1_2::Execute failed: bufferSize < outputSize");
Derek Lamberti4de83c52020-03-17 13:40:18 +0000366 return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
367 }
Sadik Armagan188675f2021-02-12 17:16:42 +0000368#else
Kevin Maydc873f62021-06-14 11:21:11 +0100369 const size_t bufferSize = memPools.at(outputArg.location.poolIndex).getSize();
Sadik Armagan188675f2021-02-12 17:16:42 +0000370 if (bufferSize < outputSize)
371 {
372 ALOGW("ArmnnPreparedModel_1_2::Execute failed bufferSize (%s) < outputSize (%s)",
373 std::to_string(bufferSize).c_str(), std::to_string(outputSize).c_str());
374 outputShapes[i].isSufficient = false;
375 return V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE;
376 }
377#endif
Derek Lamberti4de83c52020-03-17 13:40:18 +0000378 outputs.emplace_back(i, outputTensor);
379 outputShapes[i] = ComputeShape(outputTensorInfo);
380 }
381
382 return V1_0::ErrorStatus::NONE;
383}
384
385template<typename HalVersion>
386Return<V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::PrepareMemoryForIO(
387 armnn::InputTensors& inputs,
388 armnn::OutputTensors& outputs,
389 std::vector<android::nn::RunTimePoolInfo>& memPools,
390 const V1_0::Request& request,
391 CallbackAsync_1_2 callback)
392{
Sadik Armagan188675f2021-02-12 17:16:42 +0000393#if !defined(ARMNN_ANDROID_S)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000394 if (!setRunTimePoolInfosFromHidlMemories(&memPools, request.pools))
Sadik Armagan188675f2021-02-12 17:16:42 +0000395#else
396 if (!setRunTimePoolInfosFromCanonicalMemories(&memPools, uncheckedConvert(request.pools)))
397#endif
Derek Lamberti4de83c52020-03-17 13:40:18 +0000398 {
399 callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
400 return V1_0::ErrorStatus::GENERAL_FAILURE;
401 }
Derek Lamberti4de83c52020-03-17 13:40:18 +0000402 // add the inputs and outputs with their data
403 try
404 {
405 if (PrepareMemoryForInputs(inputs, request, memPools) != V1_0::ErrorStatus::NONE)
406 {
407 callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
408 return V1_0::ErrorStatus::GENERAL_FAILURE;
409 }
410
Sadik Armagan188675f2021-02-12 17:16:42 +0000411 std::vector<V1_2::OutputShape> outputShapes(request.outputs.size());
Derek Lamberti4de83c52020-03-17 13:40:18 +0000412
413 auto errorStatus = PrepareMemoryForOutputs(outputs, outputShapes, request, memPools);
414 if (errorStatus != V1_0::ErrorStatus::NONE)
415 {
416 callback(errorStatus,
417 outputShapes,
418 g_NoTiming,
419 "ArmnnPreparedModel_1_2::Execute");
420 return errorStatus;
421 }
422 }
423 catch (armnn::Exception& e)
424 {
425 ALOGW("armnn::Exception caught while preparing for EnqueueWorkload: %s", e.what());
426 callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
427 return V1_0::ErrorStatus::GENERAL_FAILURE;
428 }
429 catch (std::exception& e)
430 {
431 ALOGE("std::exception caught while preparing for EnqueueWorkload: %s", e.what());
432 callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
433 return V1_0::ErrorStatus::GENERAL_FAILURE;
434 }
435
436 return V1_0::ErrorStatus::NONE;
437}
438
Mike Kellyb5fdf382019-06-11 16:35:25 +0100439template<typename HalVersion>
Kevin Mayec1e5b82020-02-26 17:00:39 +0000440Return<void> ArmnnPreparedModel_1_2<HalVersion>::executeSynchronously(const V1_0::Request& request,
Sadik Armagan188675f2021-02-12 17:16:42 +0000441 V1_2::MeasureTiming measureTiming,
Mike Kelly44381512019-07-08 17:37:35 +0100442 executeSynchronously_cb cb)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100443{
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100444 if (!m_PreparedFromCache)
445 {
446 ALOGV("ArmnnPreparedModel_1_2::executeSynchronously(): %s", GetModelSummary(m_Model).c_str());
447 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100448 m_RequestCount++;
449
450 if (cb == nullptr)
451 {
452 ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid callback passed");
453 return Void();
454 }
455
Derek Lamberti4de83c52020-03-17 13:40:18 +0000456 TimePoint driverStart;
Mike Kelly44381512019-07-08 17:37:35 +0100457
Sadik Armagan188675f2021-02-12 17:16:42 +0000458 if (measureTiming == V1_2::MeasureTiming::YES)
Mike Kelly44381512019-07-08 17:37:35 +0100459 {
460 driverStart = Now();
461 }
462
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100463 if (!m_PreparedFromCache && !android::nn::validateRequest(request, m_Model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100464 {
Mike Kelly44381512019-07-08 17:37:35 +0100465 ALOGE("ArmnnPreparedModel_1_2::executeSynchronously invalid request model");
Kevin Mayec1e5b82020-02-26 17:00:39 +0000466 cb(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100467 return Void();
468 }
469
Derek Lamberti4de83c52020-03-17 13:40:18 +0000470 auto cbWrapper = [cb](V1_0::ErrorStatus errorStatus,
Sadik Armagan188675f2021-02-12 17:16:42 +0000471 std::vector<V1_2::OutputShape> outputShapes,
472 const V1_2::Timing& timing,
Derek Lamberti4de83c52020-03-17 13:40:18 +0000473 std::string)
474 {
475 cb(errorStatus, outputShapes, timing);
476 };
Mike Kellyb5fdf382019-06-11 16:35:25 +0100477
478 // map the memory pool into shared pointers
479 // use a shared memory pools vector on the heap, as it is passed to the request thread
Derek Lamberti4de83c52020-03-17 13:40:18 +0000480 auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
Mike Kellyb5fdf382019-06-11 16:35:25 +0100481
Derek Lamberti4de83c52020-03-17 13:40:18 +0000482 // allocate the tensors on the heap, as they are passed to the request thread
483 auto inputs = std::make_shared<armnn::InputTensors>();
484 auto outputs = std::make_shared<armnn::OutputTensors>();
485
486 auto prepareStatus = PrepareMemoryForIO(*inputs, *outputs, *memPools, request, cbWrapper);
487 if (prepareStatus != V1_0::ErrorStatus::NONE)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100488 {
Kevin May7bdaac52020-02-10 12:10:07 +0000489 return Void();
490 }
491
Mike Kellyb5fdf382019-06-11 16:35:25 +0100492 ALOGV("ArmnnPreparedModel_1_2::executeSynchronously() before Execution");
493
Derek Lamberti4de83c52020-03-17 13:40:18 +0000494 CallbackContext_1_2 cbCtx;
495 cbCtx.callback = cbWrapper;
496 cbCtx.ctx.measureTimings = measureTiming;
497 cbCtx.ctx.driverStart = driverStart;
498 ExecuteGraph(memPools, *inputs, *outputs, cbCtx);
499
500 return Void();
501}
502
503template<typename HalVersion>
504template<typename CallbackContext>
505bool ArmnnPreparedModel_1_2<HalVersion>::ExecuteGraph(
506 std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
507 armnn::InputTensors& inputTensors,
508 armnn::OutputTensors& outputTensors,
509 CallbackContext cb)
510{
511 ALOGV("ArmnnPreparedModel_1_2::ExecuteGraph(...)");
512
513 TimePoint driverEnd, deviceStart, deviceEnd;
514
515 DumpTensorsIfRequired("Input", inputTensors);
516
Sadik Armagan188675f2021-02-12 17:16:42 +0000517 std::vector<V1_2::OutputShape> outputShapes(outputTensors.size());
Derek Lamberti4de83c52020-03-17 13:40:18 +0000518 for (unsigned int i = 0; i < outputTensors.size(); i++)
519 {
520 std::pair<int, armnn::Tensor> outputTensorPair = outputTensors[i];
521 const armnn::Tensor outputTensor = outputTensorPair.second;
522 const armnn::TensorInfo outputTensorInfo = outputTensor.GetInfo();
523
524 outputShapes[i] = ComputeShape(outputTensorInfo);
525 }
526
Mike Kellyb5fdf382019-06-11 16:35:25 +0100527 // run it
528 try
529 {
Sadik Armagan188675f2021-02-12 17:16:42 +0000530 if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
Mike Kelly44381512019-07-08 17:37:35 +0100531 {
532 deviceStart = Now();
533 }
534
Finn Williamsd8fb5402021-05-19 20:52:00 +0100535 armnn::Status status;
536 if (m_AsyncModelExecutionEnabled)
537 {
538 ALOGW("ArmnnPreparedModel_1_2::ExecuteGraph m_AsyncModelExecutionEnabled true");
539 status = m_Runtime->Execute(*m_WorkingMemHandle, inputTensors, outputTensors);
540 }
541 else
542 {
543 ALOGW("ArmnnPreparedModel_1_2::ExecuteGraph m_AsyncModelExecutionEnabled false");
Narumol Prangnawarat558a1d42022-02-07 13:12:24 +0000544
545 // Create a vector of Input and Output Ids which can be imported. An empty vector means all will be copied.
546 std::vector<armnn::ImportedInputId> importedInputIds;
547 if (m_EnableImport)
548 {
549 importedInputIds = m_Runtime->ImportInputs(m_NetworkId, inputTensors, armnn::MemorySource::Malloc);
550 }
551 std::vector<armnn::ImportedOutputId> importedOutputIds;
552 if (m_EnableExport)
553 {
554 importedOutputIds = m_Runtime->ImportOutputs(m_NetworkId, outputTensors, armnn::MemorySource::Malloc);
555 }
556 status = m_Runtime->EnqueueWorkload(m_NetworkId, inputTensors, outputTensors,
557 importedInputIds, importedOutputIds);
Finn Williamsd8fb5402021-05-19 20:52:00 +0100558 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100559
Sadik Armagan188675f2021-02-12 17:16:42 +0000560 if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
Mike Kelly44381512019-07-08 17:37:35 +0100561 {
562 deviceEnd = Now();
563 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100564 if (status != armnn::Status::Success)
565 {
566 ALOGW("EnqueueWorkload failed");
Derek Lamberti4de83c52020-03-17 13:40:18 +0000567 cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming,
568 "ArmnnPreparedModel_1_2::ExecuteGraph");
569 return false;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100570 }
571 }
Kevin May7bdaac52020-02-10 12:10:07 +0000572 catch (armnn::Exception& e)
573 {
Derek Lamberti4de83c52020-03-17 13:40:18 +0000574 ALOGW("armnn:Exception caught from EnqueueWorkload: %s", e.what());
575 cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
576 return false;
Kevin May7bdaac52020-02-10 12:10:07 +0000577 }
Derek Lambertib9cb8442019-11-28 13:34:48 +0000578 catch (std::exception& e)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100579 {
Kevin May7bdaac52020-02-10 12:10:07 +0000580 ALOGE("std::exception caught from EnqueueWorkload: %s", e.what());
Derek Lamberti4de83c52020-03-17 13:40:18 +0000581 cb.callback(V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
582 return false;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100583 }
584
Derek Lamberti4de83c52020-03-17 13:40:18 +0000585 CommitPools(*pMemPools);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100586
Derek Lamberti4de83c52020-03-17 13:40:18 +0000587 DumpTensorsIfRequired("Output", outputTensors);
Kevin Mayec1e5b82020-02-26 17:00:39 +0000588
Sadik Armagan188675f2021-02-12 17:16:42 +0000589 if (cb.ctx.measureTimings == V1_2::MeasureTiming::YES)
Mike Kelly44381512019-07-08 17:37:35 +0100590 {
591 driverEnd = Now();
Sadik Armagan188675f2021-02-12 17:16:42 +0000592 V1_2::Timing timing;
Mike Kelly44381512019-07-08 17:37:35 +0100593 timing.timeOnDevice = MicrosecondsDuration(deviceEnd, deviceStart);
Derek Lamberti4de83c52020-03-17 13:40:18 +0000594 timing.timeInDriver = MicrosecondsDuration(driverEnd, cb.ctx.driverStart);
Zingo Andersen7c561492022-01-25 11:09:41 +0100595 ALOGV("ArmnnPreparedModel_1_2::execute timing - Device = %lu Driver = %lu",
596 static_cast<unsigned long>(timing.timeOnDevice), static_cast<unsigned long>(timing.timeInDriver));
Derek Lamberti4de83c52020-03-17 13:40:18 +0000597 cb.callback(V1_0::ErrorStatus::NONE, outputShapes, timing, "ArmnnPreparedModel_1_2::ExecuteGraph");
598 } else {
599 cb.callback(V1_0::ErrorStatus::NONE, outputShapes, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
Mike Kelly44381512019-07-08 17:37:35 +0100600 }
Derek Lamberti4de83c52020-03-17 13:40:18 +0000601
602 return true;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100603}
604
Derek Lamberti4de83c52020-03-17 13:40:18 +0000605template<typename HalVersion>
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100606bool ArmnnPreparedModel_1_2<HalVersion>::ExecuteWithDummyInputs(unsigned int numInputs, unsigned int numOutputs)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000607{
608 std::vector<std::vector<char>> storage;
609 armnn::InputTensors inputTensors;
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100610 for (unsigned int i = 0; i < numInputs; i++)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000611 {
Cathal Corbette27d4e82021-10-28 12:28:35 +0100612 armnn::TensorInfo inputTensorInfo = m_Runtime->GetInputTensorInfo(m_NetworkId, i);
613 // pInputTensors (of type InputTensors) is composed of a vector of ConstTensors.
614 // Therefore, set all TensorInfo isConstant parameters of input Tensors to true.
615 inputTensorInfo.SetConstant();
616
Derek Lamberti4de83c52020-03-17 13:40:18 +0000617 storage.emplace_back(inputTensorInfo.GetNumBytes());
618 const armnn::ConstTensor inputTensor(inputTensorInfo, storage.back().data());
619
620 inputTensors.emplace_back(i, inputTensor);
621 }
622
623 armnn::OutputTensors outputTensors;
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100624 for (unsigned int i = 0; i < numOutputs; i++)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000625 {
626 const armnn::TensorInfo outputTensorInfo = m_Runtime->GetOutputTensorInfo(m_NetworkId, i);
627 storage.emplace_back(outputTensorInfo.GetNumBytes());
628 const armnn::Tensor outputTensor(outputTensorInfo, storage.back().data());
629
630 outputTensors.emplace_back(i, outputTensor);
631 }
632
Sadik Armagan188675f2021-02-12 17:16:42 +0000633 auto nullCallback = [](V1_0::ErrorStatus, std::vector<V1_2::OutputShape>, const V1_2::Timing&, std::string) {};
Derek Lamberti4de83c52020-03-17 13:40:18 +0000634 CallbackContext_1_2 callbackContext;
635 callbackContext.callback = nullCallback;
Sadik Armagan188675f2021-02-12 17:16:42 +0000636 callbackContext.ctx.measureTimings = V1_2::MeasureTiming::NO;
Derek Lamberti4de83c52020-03-17 13:40:18 +0000637 auto memPools = std::make_shared<std::vector<::android::nn::RunTimePoolInfo>>();
638 return ExecuteGraph(memPools,
639 inputTensors,
640 outputTensors,
641 callbackContext);
642}
643
644template<typename HalVersion>
645Return <V1_0::ErrorStatus> ArmnnPreparedModel_1_2<HalVersion>::Execute(const V1_0::Request& request,
Sadik Armagan188675f2021-02-12 17:16:42 +0000646 V1_2::MeasureTiming measureTiming,
Derek Lamberti4de83c52020-03-17 13:40:18 +0000647 CallbackAsync_1_2 callback)
648{
649 ExecutionContext_1_2 ctx;
Sadik Armagan188675f2021-02-12 17:16:42 +0000650 if (measureTiming == V1_2::MeasureTiming::YES)
Derek Lamberti4de83c52020-03-17 13:40:18 +0000651 {
652 ctx.measureTimings = measureTiming;
653 ctx.driverStart = Now();
654 }
655
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100656 if (!m_PreparedFromCache)
657 {
658 ALOGV("ArmnnPreparedModel_1_2::execute(): %s", GetModelSummary(m_Model).c_str());
659 }
Derek Lamberti4de83c52020-03-17 13:40:18 +0000660 m_RequestCount++;
661
Sadik Armagan0a2dfab2021-10-06 16:41:44 +0100662 if (!m_PreparedFromCache && !android::nn::validateRequest(request, m_Model))
Derek Lamberti4de83c52020-03-17 13:40:18 +0000663 {
664 callback(V1_0::ErrorStatus::INVALID_ARGUMENT, {}, g_NoTiming, "ArmnnPreparedModel_1_2::execute");
665 return V1_0::ErrorStatus::INVALID_ARGUMENT;
666 }
667
668 if (!m_RequestInputsAndOutputsDumpDir.empty())
669 {
670 ALOGD("Dumping inputs and outputs for request %" PRIuPTR, reinterpret_cast<std::uintptr_t>(&callback));
671 }
672
673 // map the memory pool into shared pointers
674 // use a shared memory pools vector on the heap, as it is passed to the request thread
675 auto memPools = std::make_shared<std::vector<android::nn::RunTimePoolInfo>>();
676
677 // allocate the tensors on the heap, as they are passed to the request thread
678 auto inputTensors = std::make_shared<armnn::InputTensors>();
679 auto outputTensors = std::make_shared<armnn::OutputTensors>();
680
681 auto prepareStatus = PrepareMemoryForIO(*inputTensors, *outputTensors, *memPools, request, callback);
682 switch(prepareStatus)
683 {
684 case V1_0::ErrorStatus::OUTPUT_INSUFFICIENT_SIZE:
685 return V1_0::ErrorStatus::NONE;
686 case V1_0::ErrorStatus::GENERAL_FAILURE:
687 return V1_0::ErrorStatus::GENERAL_FAILURE;
688 default:
689 {}
690 }
691
Derek Lamberti4de83c52020-03-17 13:40:18 +0000692
693 // post the request for asynchronous execution
694 CallbackContext_1_2 cb;
695 cb.callback = callback;
696 cb.ctx = ctx;
Finn Williamsd8fb5402021-05-19 20:52:00 +0100697
698 if (m_AsyncModelExecutionEnabled)
699 {
700 ALOGV("ArmnnPreparedModel_1_2::execute(...) before ScheduleGraphForExecution");
701 ScheduleGraphForExecution(memPools, inputTensors, outputTensors, cb);
702 ALOGV("ArmnnPreparedModel_1_2::execute(...) after ScheduleGraphForExecution");
703 return V1_0::ErrorStatus::NONE;
704 }
705
706 ALOGV("ArmnnPreparedModel_1_2::execute(...) before PostMsg");
Derek Lamberti4de83c52020-03-17 13:40:18 +0000707 m_RequestThread.PostMsg(this, memPools, inputTensors, outputTensors, cb);
708 ALOGV("ArmnnPreparedModel_1_2::execute(...) after PostMsg");
709 return V1_0::ErrorStatus::NONE;
710}
711
Mike Kellyb5fdf382019-06-11 16:35:25 +0100712template<typename HalVersion>
713Return<void> ArmnnPreparedModel_1_2<HalVersion>::configureExecutionBurst(
Derek Lamberti4de83c52020-03-17 13:40:18 +0000714 const sp<V1_2::IBurstCallback>& callback,
715 const MQDescriptorSync<V1_2::FmqRequestDatum>& requestChannel,
716 const MQDescriptorSync<V1_2::FmqResultDatum>& resultChannel,
717 V1_2::IPreparedModel::configureExecutionBurst_cb cb)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100718{
719 ALOGV("ArmnnPreparedModel_1_2::configureExecutionBurst");
Mike Kelly65c42dc2019-07-22 14:06:00 +0100720 const sp<V1_2::IBurstContext> burst = ExecutionBurstServer::create(callback,
721 requestChannel,
722 resultChannel,
Kevin May42477c12020-03-26 13:34:14 +0000723 this);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100724
Mike Kelly44381512019-07-08 17:37:35 +0100725 if (burst == nullptr)
726 {
Kevin Mayec1e5b82020-02-26 17:00:39 +0000727 cb(V1_0::ErrorStatus::GENERAL_FAILURE, {});
Mike Kelly44381512019-07-08 17:37:35 +0100728 }
729 else
730 {
Kevin Mayec1e5b82020-02-26 17:00:39 +0000731 cb(V1_0::ErrorStatus::NONE, burst);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100732 }
733 return Void();
734}
735
Finn Williamsd8fb5402021-05-19 20:52:00 +0100736/// Schedule the graph prepared from the request for execution
737template<typename HalVersion>
738template<typename CallbackContext>
739void ArmnnPreparedModel_1_2<HalVersion>::ScheduleGraphForExecution(
740 std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
741 std::shared_ptr<armnn::InputTensors>& inputTensors,
742 std::shared_ptr<armnn::OutputTensors>& outputTensors,
743 CallbackContext callbackContext)
744{
745 ALOGV("ArmnnPreparedModel_1_2::ScheduleGraphForExecution(...)");
746
747 DumpTensorsIfRequired("Input", *inputTensors);
748
749 unsigned int outputTensorSize = outputTensors.get()->size();
750 std::vector<V1_2::OutputShape> outputShapes(outputTensorSize);
751 for (unsigned int i = 0; i < outputTensorSize; i++)
752 {
753 std::pair<int, armnn::Tensor> outputTensorPair = outputTensors.get()->at(i);
754 const armnn::Tensor outputTensor = outputTensorPair.second;
755 const armnn::TensorInfo outputTensorInfo = outputTensor.GetInfo();
756
757 outputShapes[i] = ComputeShape(outputTensorInfo);
758 }
759
760 auto tpCb = std::make_shared<
761 ArmnnThreadPoolCallback_1_2<CallbackContext_1_2>>(this,
762 pMemPools,
763 outputShapes,
764 inputTensors,
765 outputTensors,
766 callbackContext);
767
Finn Williamsca3a3e02021-06-11 15:04:02 +0100768 m_Threadpool->Schedule(m_NetworkId,
769 *tpCb->m_InputTensors,
770 *tpCb->m_OutputTensors,
771 armnn::QosExecPriority::Medium,
772 tpCb);
Finn Williamsd8fb5402021-05-19 20:52:00 +0100773 ALOGV("ArmnnPreparedModel_1_2::ScheduleGraphForExecution end");
774}
775
776template<typename HalVersion>
777template <typename CallbackContext>
778void ArmnnPreparedModel_1_2<HalVersion>::ArmnnThreadPoolCallback_1_2<CallbackContext>::Notify(
779 armnn::Status status, armnn::InferenceTimingPair timeTaken)
780{
781 ALOGV("ArmnnPreparedModel_1_2::ArmnnThreadPoolCallback_1_2 Notify");
782
783 TimePoint driverEnd;
784
785 CommitPools(*m_MemPools);
786
787 m_Model->DumpTensorsIfRequired("Output", *m_OutputTensors);
788
789 if (status != armnn::Status::Success)
790 {
791 ALOGW("ArmnnThreadPoolCallback::Notify EnqueueWorkload failed");
792 m_CallbackContext.callback(
793 V1_0::ErrorStatus::GENERAL_FAILURE, {}, g_NoTiming, "ArmnnPreparedModel::ExecuteGraph");
794 return;
795 }
796
797 if (m_CallbackContext.ctx.measureTimings == V1_2::MeasureTiming::YES)
798 {
799 driverEnd = std::chrono::steady_clock::now();
800 V1_2::Timing timing;
801 timing.timeOnDevice = MicrosecondsDuration(timeTaken.second, timeTaken.first);
802 timing.timeInDriver = MicrosecondsDuration(driverEnd, m_CallbackContext.ctx.driverStart);
Zingo Andersen7c561492022-01-25 11:09:41 +0100803 ALOGV("ArmnnPreparedModel_1_2::execute timing - Device = %lu Driver = %lu",
804 static_cast<unsigned long>(timing.timeOnDevice), static_cast<unsigned long>(timing.timeInDriver));
Finn Williamsd8fb5402021-05-19 20:52:00 +0100805 m_CallbackContext.callback(
806 V1_0::ErrorStatus::NONE, m_OutputShapes, timing, "ArmnnPreparedModel_1_2::ExecuteGraph");
807 } else {
808 m_CallbackContext.callback(
809 V1_0::ErrorStatus::NONE, m_OutputShapes, g_NoTiming, "ArmnnPreparedModel_1_2::ExecuteGraph");
810 }
811 return;
812}
813
Kevin May42477c12020-03-26 13:34:14 +0000814#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)
Mike Kellyb5fdf382019-06-11 16:35:25 +0100815template class ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>;
Derek Lamberti4de83c52020-03-17 13:40:18 +0000816template bool ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>::ExecuteGraph<CallbackContext_1_2>(
817 std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
818 armnn::InputTensors& pInputTensors,
819 armnn::OutputTensors& pOutputTensors,
820 CallbackContext_1_2 cb);
Finn Williamsd8fb5402021-05-19 20:52:00 +0100821
822template void ArmnnPreparedModel_1_2<hal_1_2::HalPolicy>::ScheduleGraphForExecution<CallbackContext_1_2>(
823 std::shared_ptr<std::vector<::android::nn::RunTimePoolInfo>>& pMemPools,
824 std::shared_ptr<armnn::InputTensors>& inputTensors,
825 std::shared_ptr<armnn::OutputTensors>& outputTensors,
826 CallbackContext_1_2 callbackContext);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100827#endif
828
829} // namespace armnn_driver