blob: 8a2812ad19cf005dbd0cba09259d2e9907367b5b [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa015307bc12018-03-09 13:51:08 +00004//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "Utils.hpp"
Jim Flynnf2e175c2019-12-12 15:11:30 +00009#include "Half.hpp"
telsoa015307bc12018-03-09 13:51:08 +000010
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000011#include <armnnUtils/Permute.hpp>
12
Derek Lambertid00ad912020-01-22 15:55:16 +000013#include <armnn/Utils.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010014#include <armnn/utility/Assert.hpp>
Derek Lambertid00ad912020-01-22 15:55:16 +000015
telsoa015307bc12018-03-09 13:51:08 +000016#include <cassert>
Jim Flynn829ad302019-12-13 14:43:24 +000017#include <cerrno>
telsoa015307bc12018-03-09 13:51:08 +000018#include <cinttypes>
Jim Flynn829ad302019-12-13 14:43:24 +000019#include <sstream>
20#include <cstdio>
21#include <time.h>
22
23
telsoa015307bc12018-03-09 13:51:08 +000024
25using namespace android;
telsoa01ce3e84a2018-08-31 09:31:35 +010026using namespace android::hardware;
telsoa015307bc12018-03-09 13:51:08 +000027using namespace android::hidl::memory::V1_0;
28
29namespace armnn_driver
30{
31const armnn::PermutationVector g_DontPermute{};
32
33namespace
34{
35
telsoa015307bc12018-03-09 13:51:08 +000036void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input,
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000037 void* output, size_t dataTypeSize, const armnn::PermutationVector& mappings)
telsoa015307bc12018-03-09 13:51:08 +000038{
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000039 assert(inTensorShape.GetNumDimensions() == 4U);
telsoa015307bc12018-03-09 13:51:08 +000040
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000041 armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, input, output, dataTypeSize);
telsoa015307bc12018-03-09 13:51:08 +000042}
43
44} // anonymous namespace
45
46void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
47 const armnn::PermutationVector& mappings)
48{
49 assert(tensor.GetNumDimensions() == 4U);
50
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000051 armnn::DataType dataType = tensor.GetDataType();
52 switch (dataType)
telsoa015307bc12018-03-09 13:51:08 +000053 {
Mike Kelly3c673942019-07-25 09:26:06 +010054 case armnn::DataType::Float16:
telsoa015307bc12018-03-09 13:51:08 +000055 case armnn::DataType::Float32:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000056 case armnn::DataType::QAsymmU8:
Derek Lambertid00ad912020-01-22 15:55:16 +000057 case armnn::DataType::QSymmS8:
Sadik Armagan1153d1e2020-04-01 15:09:39 +010058 case armnn::DataType::QAsymmS8:
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000059 SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +000060 break;
telsoa015307bc12018-03-09 13:51:08 +000061 default:
62 ALOGW("Unknown armnn::DataType for swizzling");
63 assert(0);
64 }
65}
66
67void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
68{
69 // find the location within the pool
70 assert(location.poolIndex < memPools.size());
71
surmeh01deb3bdb2018-07-05 12:06:04 +010072 const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
73
surmeh01deb3bdb2018-07-05 12:06:04 +010074 uint8_t* memPoolBuffer = memPool.getBuffer();
surmeh01deb3bdb2018-07-05 12:06:04 +010075
76 uint8_t* memory = memPoolBuffer + location.offset;
telsoa015307bc12018-03-09 13:51:08 +000077
78 return memory;
79}
80
Matthew Bentham912b3622019-05-03 15:49:14 +010081armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +000082{
83 armnn::DataType type;
84
85 switch (operand.type)
86 {
Matthew Bentham912b3622019-05-03 15:49:14 +010087 case V1_0::OperandType::TENSOR_FLOAT32:
telsoa015307bc12018-03-09 13:51:08 +000088 type = armnn::DataType::Float32;
89 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010090 case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000091 type = armnn::DataType::QAsymmU8;
telsoa015307bc12018-03-09 13:51:08 +000092 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010093 case V1_0::OperandType::TENSOR_INT32:
telsoa015307bc12018-03-09 13:51:08 +000094 type = armnn::DataType::Signed32;
95 break;
96 default:
Mike Kellyb5fdf382019-06-11 16:35:25 +010097 throw UnsupportedOperand<V1_0::OperandType>(operand.type);
telsoa015307bc12018-03-09 13:51:08 +000098 }
99
100 armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
101
102 ret.SetQuantizationScale(operand.scale);
103 ret.SetQuantizationOffset(operand.zeroPoint);
104
105 return ret;
106}
107
Kevin May42477c12020-03-26 13:34:14 +0000108#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
Mike Kellyb5fdf382019-06-11 16:35:25 +0100109
110armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
111{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000112 using namespace armnn;
Derek Lambertid00ad912020-01-22 15:55:16 +0000113 bool perChannel = false;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100114
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000115 DataType type;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100116 switch (operand.type)
117 {
Sadik Armagan793a70c2020-03-19 13:54:04 +0000118 case V1_2::OperandType::TENSOR_BOOL8:
119 type = armnn::DataType::Boolean;
120 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100121 case V1_2::OperandType::TENSOR_FLOAT32:
122 type = armnn::DataType::Float32;
123 break;
Mike Kelly3c673942019-07-25 09:26:06 +0100124 case V1_2::OperandType::TENSOR_FLOAT16:
125 type = armnn::DataType::Float16;
126 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100127 case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000128 type = armnn::DataType::QAsymmU8;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100129 break;
Derek Lambertid00ad912020-01-22 15:55:16 +0000130 case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
131 perChannel=true;
132 ARMNN_FALLTHROUGH;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000133 case V1_2::OperandType::TENSOR_QUANT8_SYMM:
FinnWilliamsArm624fe9f2019-12-06 17:12:42 +0000134 type = armnn::DataType::QSymmS8;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000135 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100136 case V1_2::OperandType::TENSOR_QUANT16_SYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000137 type = armnn::DataType::QSymmS16;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100138 break;
139 case V1_2::OperandType::TENSOR_INT32:
140 type = armnn::DataType::Signed32;
141 break;
142 default:
143 throw UnsupportedOperand<V1_2::OperandType>(operand.type);
144 }
145
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000146 TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
Derek Lambertid00ad912020-01-22 15:55:16 +0000147 if (perChannel)
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000148 {
149 // ExtraParams is expected to be of type channelQuant
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100150 ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000151 V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100152
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000153 auto perAxisQuantParams = operand.extraParams.channelQuant();
154
155 ret.SetQuantizationScales(perAxisQuantParams.scales);
156 ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
157 }
158 else
159 {
160 ret.SetQuantizationScale(operand.scale);
161 ret.SetQuantizationOffset(operand.zeroPoint);
162 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100163
164 return ret;
165}
166
167#endif
168
Kevin May42477c12020-03-26 13:34:14 +0000169#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
170
171armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
172{
173 using namespace armnn;
174 bool perChannel = false;
Teresa Charlin896572b2020-07-15 12:37:51 +0100175 bool isScalar = false;
Kevin May42477c12020-03-26 13:34:14 +0000176
177 DataType type;
178 switch (operand.type)
179 {
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100180 case V1_3::OperandType::TENSOR_BOOL8:
181 type = armnn::DataType::Boolean;
182 break;
Kevin May42477c12020-03-26 13:34:14 +0000183 case V1_3::OperandType::TENSOR_FLOAT32:
184 type = armnn::DataType::Float32;
185 break;
186 case V1_3::OperandType::TENSOR_FLOAT16:
187 type = armnn::DataType::Float16;
188 break;
189 case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
190 type = armnn::DataType::QAsymmU8;
191 break;
192 case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
193 perChannel=true;
194 ARMNN_FALLTHROUGH;
195 case V1_3::OperandType::TENSOR_QUANT8_SYMM:
196 type = armnn::DataType::QSymmS8;
197 break;
198 case V1_3::OperandType::TENSOR_QUANT16_SYMM:
199 type = armnn::DataType::QSymmS16;
200 break;
201 case V1_3::OperandType::TENSOR_INT32:
202 type = armnn::DataType::Signed32;
203 break;
Finn Williamsfc884b42020-06-11 17:35:44 +0100204 case V1_3::OperandType::INT32:
205 type = armnn::DataType::Signed32;
Teresa Charlin896572b2020-07-15 12:37:51 +0100206 isScalar = true;
Finn Williamsfc884b42020-06-11 17:35:44 +0100207 break;
Kevin May42477c12020-03-26 13:34:14 +0000208 case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
209 type = armnn::DataType::QAsymmS8;
210 break;
211 default:
212 throw UnsupportedOperand<V1_3::OperandType>(operand.type);
213 }
214
Finn Williamsfc884b42020-06-11 17:35:44 +0100215 TensorInfo ret;
Teresa Charlin896572b2020-07-15 12:37:51 +0100216 if (isScalar)
Finn Williamsfc884b42020-06-11 17:35:44 +0100217 {
Teresa Charlin896572b2020-07-15 12:37:51 +0100218 ret = TensorInfo(TensorShape(armnn::Dimensionality::Scalar), type);
Finn Williamsfc884b42020-06-11 17:35:44 +0100219 }
220 else
221 {
Teresa Charlin896572b2020-07-15 12:37:51 +0100222 ret = TensorInfo(operand.dimensions.size(), operand.dimensions.data(), type);
Finn Williamsfc884b42020-06-11 17:35:44 +0100223 }
224
Kevin May42477c12020-03-26 13:34:14 +0000225 if (perChannel)
226 {
227 // ExtraParams is expected to be of type channelQuant
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100228 ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
Kevin May352d8382020-03-31 15:03:42 +0100229 V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
Kevin May42477c12020-03-26 13:34:14 +0000230
231 auto perAxisQuantParams = operand.extraParams.channelQuant();
232
233 ret.SetQuantizationScales(perAxisQuantParams.scales);
234 ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
235 }
236 else
237 {
238 ret.SetQuantizationScale(operand.scale);
239 ret.SetQuantizationOffset(operand.zeroPoint);
240 }
Kevin May42477c12020-03-26 13:34:14 +0000241 return ret;
242}
243
244#endif
245
Matthew Bentham912b3622019-05-03 15:49:14 +0100246std::string GetOperandSummary(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +0000247{
248 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
249 toString(operand.type);
250}
251
Kevin May42477c12020-03-26 13:34:14 +0000252#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
Mike Kellyb5fdf382019-06-11 16:35:25 +0100253
254std::string GetOperandSummary(const V1_2::Operand& operand)
255{
256 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
257 toString(operand.type);
258}
259
260#endif
261
Kevin May42477c12020-03-26 13:34:14 +0000262#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
263
264std::string GetOperandSummary(const V1_3::Operand& operand)
265{
266 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
267 toString(operand.type);
268}
269
270#endif
271
telsoa015307bc12018-03-09 13:51:08 +0000272using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
273 unsigned int elementIndex,
274 std::ofstream& fileStream);
275
276namespace
277{
278template <typename ElementType, typename PrintableType = ElementType>
279void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
280{
281 const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
282 fileStream << static_cast<PrintableType>(elements[elementIndex]) << ",";
283}
284
285constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor)
286{
287 const char* str = "";
288
289 switch (tensor.GetNumDimensions())
290 {
291 case 4: { str = "(BHWC) "; break; }
292 case 3: { str = "(HWC) "; break; }
293 case 2: { str = "(HW) "; break; }
294 default: { str = ""; break; }
295 }
296
297 return str;
298}
299} // namespace
300
301void DumpTensor(const std::string& dumpDir,
302 const std::string& requestName,
303 const std::string& tensorName,
304 const armnn::ConstTensor& tensor)
305{
306 // The dump directory must exist in advance.
307 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.dump") % dumpDir % requestName % tensorName);
308
309 std::ofstream fileStream;
310 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
311
312 if (!fileStream.good())
313 {
314 ALOGW("Could not open file %s for writing", fileName.c_str());
315 return;
316 }
317
318 DumpElementFunction dumpElementFunction = nullptr;
319
320 switch (tensor.GetDataType())
321 {
322 case armnn::DataType::Float32:
323 {
324 dumpElementFunction = &DumpTensorElement<float>;
325 break;
326 }
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000327 case armnn::DataType::QAsymmU8:
telsoa015307bc12018-03-09 13:51:08 +0000328 {
329 dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
330 break;
331 }
332 case armnn::DataType::Signed32:
333 {
334 dumpElementFunction = &DumpTensorElement<int32_t>;
335 break;
336 }
Jim Flynnf2e175c2019-12-12 15:11:30 +0000337 case armnn::DataType::Float16:
338 {
339 dumpElementFunction = &DumpTensorElement<armnn::Half>;
340 break;
341 }
Teresa Charlinb248ec12020-04-30 11:06:34 +0100342 case armnn::DataType::QAsymmS8:
343 {
344 dumpElementFunction = &DumpTensorElement<int8_t, int32_t>;
345 break;
346 }
347 case armnn::DataType::Boolean:
348 {
349 dumpElementFunction = &DumpTensorElement<bool>;
350 break;
351 }
telsoa015307bc12018-03-09 13:51:08 +0000352 default:
353 {
354 dumpElementFunction = nullptr;
355 }
356 }
357
358 if (dumpElementFunction != nullptr)
359 {
360 const unsigned int numDimensions = tensor.GetNumDimensions();
361
362 const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1;
363
364 const unsigned int height = (numDimensions >= 3)
365 ? tensor.GetShape()[numDimensions - 3]
366 : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1;
367
368 const unsigned int width = (numDimensions >= 3)
369 ? tensor.GetShape()[numDimensions - 2]
370 : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0;
371
372 const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1;
373
374 fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
375 fileStream << "# Dimensions " << MemoryLayoutString(tensor);
376 fileStream << "[" << tensor.GetShape()[0];
377 for (unsigned int d = 1; d < numDimensions; d++)
378 {
379 fileStream << "," << tensor.GetShape()[d];
380 }
381 fileStream << "]" << std::endl;
382
383 for (unsigned int e = 0, b = 0; b < batch; ++b)
384 {
385 if (numDimensions >= 4)
386 {
387 fileStream << "# Batch " << b << std::endl;
388 }
389 for (unsigned int c = 0; c < channels; c++)
390 {
391 if (numDimensions >= 3)
392 {
393 fileStream << "# Channel " << c << std::endl;
394 }
395 for (unsigned int h = 0; h < height; h++)
396 {
397 for (unsigned int w = 0; w < width; w++, e += channels)
398 {
399 (*dumpElementFunction)(tensor, e, fileStream);
400 }
401 fileStream << std::endl;
402 }
403 e -= channels - 1;
404 if (c < channels)
405 {
406 e -= ((height * width) - 1) * channels;
407 }
408 }
409 fileStream << std::endl;
410 }
411 fileStream << std::endl;
412 }
413 else
414 {
415 fileStream << "Cannot dump tensor elements: Unsupported data type "
416 << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
417 }
418
419 if (!fileStream.good())
420 {
421 ALOGW("An error occurred when writing to file %s", fileName.c_str());
422 }
423}
424
telsoa01ce3e84a2018-08-31 09:31:35 +0100425void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
426 const std::string& dumpDir,
427 armnn::NetworkId networkId,
428 const armnn::IProfiler* profiler)
429{
430 // Check if profiling is required.
431 if (!gpuProfilingEnabled)
432 {
433 return;
434 }
435
436 // The dump directory must exist in advance.
437 if (dumpDir.empty())
438 {
439 return;
440 }
441
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100442 ARMNN_ASSERT(profiler);
telsoa01ce3e84a2018-08-31 09:31:35 +0100443
444 // Set the name of the output profiling file.
445 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.json")
446 % dumpDir
447 % std::to_string(networkId)
448 % "profiling");
449
450 // Open the ouput file for writing.
451 std::ofstream fileStream;
452 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
453
454 if (!fileStream.good())
455 {
456 ALOGW("Could not open file %s for writing", fileName.c_str());
457 return;
458 }
459
460 // Write the profiling info to a JSON file.
461 profiler->Print(fileStream);
462}
463
Jim Flynn829ad302019-12-13 14:43:24 +0000464std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
465 const std::string& dumpDir)
466{
467 std::string fileName;
468 // The dump directory must exist in advance.
469 if (dumpDir.empty())
470 {
471 return fileName;
472 }
473
474 std::string timestamp = GetFileTimestamp();
475 if (timestamp.empty())
476 {
477 return fileName;
478 }
479
480 // Set the name of the output .dot file.
481 fileName = boost::str(boost::format("%1%/%2%_networkgraph.dot")
482 % dumpDir
483 % timestamp);
484
485 ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str());
486
487 // Write the network graph to a dot file.
488 std::ofstream fileStream;
489 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
490
491 if (!fileStream.good())
492 {
493 ALOGW("Could not open file %s for writing", fileName.c_str());
494 return fileName;
495 }
496
497 if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
498 {
499 ALOGW("An error occurred when writing to file %s", fileName.c_str());
500 }
501 return fileName;
502}
503
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100504bool IsDynamicTensor(const armnn::TensorInfo& outputInfo)
505{
506 // Dynamic tensors have at least one 0-sized dimension
507 return outputInfo.GetNumElements() == 0u;
508}
509
Jim Flynn829ad302019-12-13 14:43:24 +0000510std::string GetFileTimestamp()
511{
512 // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
513 // and getSupportedOperations.txt files)
514 timespec ts;
515 int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
516 std::stringstream ss;
517 if (iRet == 0)
518 {
519 ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
520 }
521 else
522 {
523 ALOGW("clock_gettime failed with errno %s : %s", std::to_string(errno).c_str(), std::strerror(errno));
524 }
525 return ss.str();
526}
527
528void RenameGraphDotFile(const std::string& oldName, const std::string& dumpDir, const armnn::NetworkId networkId)
529{
530 if (dumpDir.empty())
531 {
532 return;
533 }
534 if (oldName.empty())
535 {
536 return;
537 }
538 const std::string newFileName = boost::str(boost::format("%1%/%2%_networkgraph.dot")
539 % dumpDir
540 % std::to_string(networkId));
541 int iRet = rename(oldName.c_str(), newFileName.c_str());
542 if (iRet != 0)
543 {
544 std::stringstream ss;
545 ss << "rename of [" << oldName << "] to [" << newFileName << "] failed with errno " << std::to_string(errno)
546 << " : " << std::strerror(errno);
547 ALOGW(ss.str().c_str());
548 }
549}
550
Kevin May42477c12020-03-26 13:34:14 +0000551void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
552{
553 if (memPools.empty())
554 {
555 return;
556 }
557 // Commit output buffers.
558 // Note that we update *all* pools, even if they aren't actually used as outputs -
559 // this is simpler and is what the CpuExecutor does.
560 for (auto& pool : memPools)
561 {
562 // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
563 // update() has been removed and flush() added.
564#if defined(ARMNN_ANDROID_R) // Use the new Android implementation.
565 pool.flush();
566#else
567 pool.update();
568#endif
569 }
570}
571
Jim Flynn829ad302019-12-13 14:43:24 +0000572
573
telsoa015307bc12018-03-09 13:51:08 +0000574} // namespace armnn_driver