blob: 77575d701c35fa50da309330b2781cd0775eb88f [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa015307bc12018-03-09 13:51:08 +00004//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "Utils.hpp"
Jim Flynnf2e175c2019-12-12 15:11:30 +00009#include "Half.hpp"
telsoa015307bc12018-03-09 13:51:08 +000010
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000011#include <armnnUtils/Permute.hpp>
12
Derek Lambertid00ad912020-01-22 15:55:16 +000013#include <armnn/Utils.hpp>
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +010014#include <armnn/utility/Assert.hpp>
Derek Lambertid00ad912020-01-22 15:55:16 +000015
telsoa015307bc12018-03-09 13:51:08 +000016#include <cassert>
Jim Flynn829ad302019-12-13 14:43:24 +000017#include <cerrno>
telsoa015307bc12018-03-09 13:51:08 +000018#include <cinttypes>
Jim Flynn829ad302019-12-13 14:43:24 +000019#include <sstream>
20#include <cstdio>
21#include <time.h>
22
23
telsoa015307bc12018-03-09 13:51:08 +000024
25using namespace android;
telsoa01ce3e84a2018-08-31 09:31:35 +010026using namespace android::hardware;
telsoa015307bc12018-03-09 13:51:08 +000027using namespace android::hidl::memory::V1_0;
28
29namespace armnn_driver
30{
31const armnn::PermutationVector g_DontPermute{};
32
33namespace
34{
35
telsoa015307bc12018-03-09 13:51:08 +000036void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input,
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000037 void* output, size_t dataTypeSize, const armnn::PermutationVector& mappings)
telsoa015307bc12018-03-09 13:51:08 +000038{
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000039 assert(inTensorShape.GetNumDimensions() == 4U);
telsoa015307bc12018-03-09 13:51:08 +000040
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000041 armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, input, output, dataTypeSize);
telsoa015307bc12018-03-09 13:51:08 +000042}
43
44} // anonymous namespace
45
46void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
47 const armnn::PermutationVector& mappings)
48{
49 assert(tensor.GetNumDimensions() == 4U);
50
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000051 armnn::DataType dataType = tensor.GetDataType();
52 switch (dataType)
telsoa015307bc12018-03-09 13:51:08 +000053 {
Mike Kelly3c673942019-07-25 09:26:06 +010054 case armnn::DataType::Float16:
telsoa015307bc12018-03-09 13:51:08 +000055 case armnn::DataType::Float32:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000056 case armnn::DataType::QAsymmU8:
Derek Lambertid00ad912020-01-22 15:55:16 +000057 case armnn::DataType::QSymmS8:
Sadik Armagan1153d1e2020-04-01 15:09:39 +010058 case armnn::DataType::QAsymmS8:
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000059 SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +000060 break;
telsoa015307bc12018-03-09 13:51:08 +000061 default:
62 ALOGW("Unknown armnn::DataType for swizzling");
63 assert(0);
64 }
65}
66
67void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
68{
69 // find the location within the pool
70 assert(location.poolIndex < memPools.size());
71
surmeh01deb3bdb2018-07-05 12:06:04 +010072 const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
73
surmeh01deb3bdb2018-07-05 12:06:04 +010074 uint8_t* memPoolBuffer = memPool.getBuffer();
surmeh01deb3bdb2018-07-05 12:06:04 +010075
76 uint8_t* memory = memPoolBuffer + location.offset;
telsoa015307bc12018-03-09 13:51:08 +000077
78 return memory;
79}
80
Matthew Bentham912b3622019-05-03 15:49:14 +010081armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +000082{
Finn Williamsa4983ce2020-07-23 12:55:12 +010083 using namespace armnn;
84 DataType type;
telsoa015307bc12018-03-09 13:51:08 +000085
86 switch (operand.type)
87 {
Matthew Bentham912b3622019-05-03 15:49:14 +010088 case V1_0::OperandType::TENSOR_FLOAT32:
telsoa015307bc12018-03-09 13:51:08 +000089 type = armnn::DataType::Float32;
90 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010091 case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000092 type = armnn::DataType::QAsymmU8;
telsoa015307bc12018-03-09 13:51:08 +000093 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010094 case V1_0::OperandType::TENSOR_INT32:
telsoa015307bc12018-03-09 13:51:08 +000095 type = armnn::DataType::Signed32;
96 break;
97 default:
Mike Kellyb5fdf382019-06-11 16:35:25 +010098 throw UnsupportedOperand<V1_0::OperandType>(operand.type);
telsoa015307bc12018-03-09 13:51:08 +000099 }
100
Finn Williamsa4983ce2020-07-23 12:55:12 +0100101 TensorInfo ret;
102 if (operand.dimensions.size() == 0)
103 {
104 TensorShape tensorShape(Dimensionality::NotSpecified);
105 ret = TensorInfo(tensorShape, type);
106 }
107 else
108 {
109 bool dimensionsSpecificity[5] = { true, true, true, true, true };
110 int count = 0;
111 std::for_each(operand.dimensions.data(),
112 operand.dimensions.data() + operand.dimensions.size(),
113 [&](const unsigned int val)
114 {
115 if (val == 0)
116 {
117 dimensionsSpecificity[count] = false;
118 }
119 count++;
120 });
121
122 TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
123 ret = TensorInfo(tensorShape, type);
124 }
telsoa015307bc12018-03-09 13:51:08 +0000125
126 ret.SetQuantizationScale(operand.scale);
127 ret.SetQuantizationOffset(operand.zeroPoint);
128
129 return ret;
130}
131
Kevin May42477c12020-03-26 13:34:14 +0000132#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
Mike Kellyb5fdf382019-06-11 16:35:25 +0100133
134armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
135{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000136 using namespace armnn;
Derek Lambertid00ad912020-01-22 15:55:16 +0000137 bool perChannel = false;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100138
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000139 DataType type;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100140 switch (operand.type)
141 {
Sadik Armagan793a70c2020-03-19 13:54:04 +0000142 case V1_2::OperandType::TENSOR_BOOL8:
143 type = armnn::DataType::Boolean;
144 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100145 case V1_2::OperandType::TENSOR_FLOAT32:
146 type = armnn::DataType::Float32;
147 break;
Mike Kelly3c673942019-07-25 09:26:06 +0100148 case V1_2::OperandType::TENSOR_FLOAT16:
149 type = armnn::DataType::Float16;
150 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100151 case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000152 type = armnn::DataType::QAsymmU8;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100153 break;
Derek Lambertid00ad912020-01-22 15:55:16 +0000154 case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
155 perChannel=true;
156 ARMNN_FALLTHROUGH;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000157 case V1_2::OperandType::TENSOR_QUANT8_SYMM:
FinnWilliamsArm624fe9f2019-12-06 17:12:42 +0000158 type = armnn::DataType::QSymmS8;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000159 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100160 case V1_2::OperandType::TENSOR_QUANT16_SYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000161 type = armnn::DataType::QSymmS16;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100162 break;
163 case V1_2::OperandType::TENSOR_INT32:
164 type = armnn::DataType::Signed32;
165 break;
166 default:
167 throw UnsupportedOperand<V1_2::OperandType>(operand.type);
168 }
169
Finn Williamsa4983ce2020-07-23 12:55:12 +0100170 TensorInfo ret;
171 if (operand.dimensions.size() == 0)
172 {
173 TensorShape tensorShape(Dimensionality::NotSpecified);
174 ret = TensorInfo(tensorShape, type);
175 }
176 else
177 {
178 bool dimensionsSpecificity[5] = { true, true, true, true, true };
179 int count = 0;
180 std::for_each(operand.dimensions.data(),
181 operand.dimensions.data() + operand.dimensions.size(),
182 [&](const unsigned int val)
183 {
184 if (val == 0)
185 {
186 dimensionsSpecificity[count] = false;
187 }
188 count++;
189 });
190
191 TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
192 ret = TensorInfo(tensorShape, type);
193 }
194
Derek Lambertid00ad912020-01-22 15:55:16 +0000195 if (perChannel)
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000196 {
197 // ExtraParams is expected to be of type channelQuant
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100198 ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000199 V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100200
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000201 auto perAxisQuantParams = operand.extraParams.channelQuant();
202
203 ret.SetQuantizationScales(perAxisQuantParams.scales);
204 ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
205 }
206 else
207 {
208 ret.SetQuantizationScale(operand.scale);
209 ret.SetQuantizationOffset(operand.zeroPoint);
210 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100211
212 return ret;
213}
214
215#endif
216
Kevin May42477c12020-03-26 13:34:14 +0000217#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
218
219armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
220{
221 using namespace armnn;
222 bool perChannel = false;
Teresa Charlin896572b2020-07-15 12:37:51 +0100223 bool isScalar = false;
Kevin May42477c12020-03-26 13:34:14 +0000224
225 DataType type;
226 switch (operand.type)
227 {
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100228 case V1_3::OperandType::TENSOR_BOOL8:
229 type = armnn::DataType::Boolean;
230 break;
Kevin May42477c12020-03-26 13:34:14 +0000231 case V1_3::OperandType::TENSOR_FLOAT32:
232 type = armnn::DataType::Float32;
233 break;
234 case V1_3::OperandType::TENSOR_FLOAT16:
235 type = armnn::DataType::Float16;
236 break;
237 case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
238 type = armnn::DataType::QAsymmU8;
239 break;
240 case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
241 perChannel=true;
242 ARMNN_FALLTHROUGH;
243 case V1_3::OperandType::TENSOR_QUANT8_SYMM:
244 type = armnn::DataType::QSymmS8;
245 break;
246 case V1_3::OperandType::TENSOR_QUANT16_SYMM:
247 type = armnn::DataType::QSymmS16;
248 break;
249 case V1_3::OperandType::TENSOR_INT32:
250 type = armnn::DataType::Signed32;
251 break;
Finn Williamsfc884b42020-06-11 17:35:44 +0100252 case V1_3::OperandType::INT32:
253 type = armnn::DataType::Signed32;
Teresa Charlin896572b2020-07-15 12:37:51 +0100254 isScalar = true;
Finn Williamsfc884b42020-06-11 17:35:44 +0100255 break;
Kevin May42477c12020-03-26 13:34:14 +0000256 case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
257 type = armnn::DataType::QAsymmS8;
258 break;
259 default:
260 throw UnsupportedOperand<V1_3::OperandType>(operand.type);
261 }
262
Finn Williamsfc884b42020-06-11 17:35:44 +0100263 TensorInfo ret;
Teresa Charlin896572b2020-07-15 12:37:51 +0100264 if (isScalar)
Finn Williamsfc884b42020-06-11 17:35:44 +0100265 {
Teresa Charlin896572b2020-07-15 12:37:51 +0100266 ret = TensorInfo(TensorShape(armnn::Dimensionality::Scalar), type);
Finn Williamsfc884b42020-06-11 17:35:44 +0100267 }
268 else
269 {
Finn Williamsa4983ce2020-07-23 12:55:12 +0100270 if (operand.dimensions.size() == 0)
271 {
272 TensorShape tensorShape(Dimensionality::NotSpecified);
273 ret = TensorInfo(tensorShape, type);
274 }
275 else
276 {
277 bool dimensionsSpecificity[5] = { true, true, true, true, true };
278 int count = 0;
279 std::for_each(operand.dimensions.data(),
280 operand.dimensions.data() + operand.dimensions.size(),
281 [&](const unsigned int val)
282 {
283 if (val == 0)
284 {
285 dimensionsSpecificity[count] = false;
286 }
287 count++;
288 });
289
290 TensorShape tensorShape(operand.dimensions.size(), operand.dimensions.data(), dimensionsSpecificity);
291 ret = TensorInfo(tensorShape, type);
292 }
Finn Williamsfc884b42020-06-11 17:35:44 +0100293 }
294
Kevin May42477c12020-03-26 13:34:14 +0000295 if (perChannel)
296 {
297 // ExtraParams is expected to be of type channelQuant
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100298 ARMNN_ASSERT(operand.extraParams.getDiscriminator() ==
Kevin May352d8382020-03-31 15:03:42 +0100299 V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
Kevin May42477c12020-03-26 13:34:14 +0000300
301 auto perAxisQuantParams = operand.extraParams.channelQuant();
302
303 ret.SetQuantizationScales(perAxisQuantParams.scales);
304 ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
305 }
306 else
307 {
308 ret.SetQuantizationScale(operand.scale);
309 ret.SetQuantizationOffset(operand.zeroPoint);
310 }
Kevin May42477c12020-03-26 13:34:14 +0000311 return ret;
312}
313
314#endif
315
Matthew Bentham912b3622019-05-03 15:49:14 +0100316std::string GetOperandSummary(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +0000317{
318 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
319 toString(operand.type);
320}
321
Kevin May42477c12020-03-26 13:34:14 +0000322#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
Mike Kellyb5fdf382019-06-11 16:35:25 +0100323
324std::string GetOperandSummary(const V1_2::Operand& operand)
325{
326 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
327 toString(operand.type);
328}
329
330#endif
331
Kevin May42477c12020-03-26 13:34:14 +0000332#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
333
334std::string GetOperandSummary(const V1_3::Operand& operand)
335{
336 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
337 toString(operand.type);
338}
339
340#endif
341
telsoa015307bc12018-03-09 13:51:08 +0000342using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
343 unsigned int elementIndex,
344 std::ofstream& fileStream);
345
346namespace
347{
348template <typename ElementType, typename PrintableType = ElementType>
349void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
350{
351 const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
352 fileStream << static_cast<PrintableType>(elements[elementIndex]) << ",";
353}
354
355constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor)
356{
357 const char* str = "";
358
359 switch (tensor.GetNumDimensions())
360 {
361 case 4: { str = "(BHWC) "; break; }
362 case 3: { str = "(HWC) "; break; }
363 case 2: { str = "(HW) "; break; }
364 default: { str = ""; break; }
365 }
366
367 return str;
368}
369} // namespace
370
371void DumpTensor(const std::string& dumpDir,
372 const std::string& requestName,
373 const std::string& tensorName,
374 const armnn::ConstTensor& tensor)
375{
376 // The dump directory must exist in advance.
377 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.dump") % dumpDir % requestName % tensorName);
378
379 std::ofstream fileStream;
380 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
381
382 if (!fileStream.good())
383 {
384 ALOGW("Could not open file %s for writing", fileName.c_str());
385 return;
386 }
387
388 DumpElementFunction dumpElementFunction = nullptr;
389
390 switch (tensor.GetDataType())
391 {
392 case armnn::DataType::Float32:
393 {
394 dumpElementFunction = &DumpTensorElement<float>;
395 break;
396 }
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000397 case armnn::DataType::QAsymmU8:
telsoa015307bc12018-03-09 13:51:08 +0000398 {
399 dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
400 break;
401 }
402 case armnn::DataType::Signed32:
403 {
404 dumpElementFunction = &DumpTensorElement<int32_t>;
405 break;
406 }
Jim Flynnf2e175c2019-12-12 15:11:30 +0000407 case armnn::DataType::Float16:
408 {
409 dumpElementFunction = &DumpTensorElement<armnn::Half>;
410 break;
411 }
Teresa Charlinb248ec12020-04-30 11:06:34 +0100412 case armnn::DataType::QAsymmS8:
413 {
414 dumpElementFunction = &DumpTensorElement<int8_t, int32_t>;
415 break;
416 }
417 case armnn::DataType::Boolean:
418 {
419 dumpElementFunction = &DumpTensorElement<bool>;
420 break;
421 }
telsoa015307bc12018-03-09 13:51:08 +0000422 default:
423 {
424 dumpElementFunction = nullptr;
425 }
426 }
427
428 if (dumpElementFunction != nullptr)
429 {
430 const unsigned int numDimensions = tensor.GetNumDimensions();
431
432 const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1;
433
434 const unsigned int height = (numDimensions >= 3)
435 ? tensor.GetShape()[numDimensions - 3]
436 : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1;
437
438 const unsigned int width = (numDimensions >= 3)
439 ? tensor.GetShape()[numDimensions - 2]
440 : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0;
441
442 const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1;
443
444 fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
445 fileStream << "# Dimensions " << MemoryLayoutString(tensor);
446 fileStream << "[" << tensor.GetShape()[0];
447 for (unsigned int d = 1; d < numDimensions; d++)
448 {
449 fileStream << "," << tensor.GetShape()[d];
450 }
451 fileStream << "]" << std::endl;
452
453 for (unsigned int e = 0, b = 0; b < batch; ++b)
454 {
455 if (numDimensions >= 4)
456 {
457 fileStream << "# Batch " << b << std::endl;
458 }
459 for (unsigned int c = 0; c < channels; c++)
460 {
461 if (numDimensions >= 3)
462 {
463 fileStream << "# Channel " << c << std::endl;
464 }
465 for (unsigned int h = 0; h < height; h++)
466 {
467 for (unsigned int w = 0; w < width; w++, e += channels)
468 {
469 (*dumpElementFunction)(tensor, e, fileStream);
470 }
471 fileStream << std::endl;
472 }
473 e -= channels - 1;
474 if (c < channels)
475 {
476 e -= ((height * width) - 1) * channels;
477 }
478 }
479 fileStream << std::endl;
480 }
481 fileStream << std::endl;
482 }
483 else
484 {
485 fileStream << "Cannot dump tensor elements: Unsupported data type "
486 << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
487 }
488
489 if (!fileStream.good())
490 {
491 ALOGW("An error occurred when writing to file %s", fileName.c_str());
492 }
493}
494
telsoa01ce3e84a2018-08-31 09:31:35 +0100495void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
496 const std::string& dumpDir,
497 armnn::NetworkId networkId,
498 const armnn::IProfiler* profiler)
499{
500 // Check if profiling is required.
501 if (!gpuProfilingEnabled)
502 {
503 return;
504 }
505
506 // The dump directory must exist in advance.
507 if (dumpDir.empty())
508 {
509 return;
510 }
511
Narumol Prangnawarat4d07e5e2020-04-06 16:46:21 +0100512 ARMNN_ASSERT(profiler);
telsoa01ce3e84a2018-08-31 09:31:35 +0100513
514 // Set the name of the output profiling file.
515 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.json")
516 % dumpDir
517 % std::to_string(networkId)
518 % "profiling");
519
520 // Open the ouput file for writing.
521 std::ofstream fileStream;
522 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
523
524 if (!fileStream.good())
525 {
526 ALOGW("Could not open file %s for writing", fileName.c_str());
527 return;
528 }
529
530 // Write the profiling info to a JSON file.
531 profiler->Print(fileStream);
532}
533
Jim Flynn829ad302019-12-13 14:43:24 +0000534std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
535 const std::string& dumpDir)
536{
537 std::string fileName;
538 // The dump directory must exist in advance.
539 if (dumpDir.empty())
540 {
541 return fileName;
542 }
543
544 std::string timestamp = GetFileTimestamp();
545 if (timestamp.empty())
546 {
547 return fileName;
548 }
549
550 // Set the name of the output .dot file.
551 fileName = boost::str(boost::format("%1%/%2%_networkgraph.dot")
552 % dumpDir
553 % timestamp);
554
555 ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str());
556
557 // Write the network graph to a dot file.
558 std::ofstream fileStream;
559 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
560
561 if (!fileStream.good())
562 {
563 ALOGW("Could not open file %s for writing", fileName.c_str());
564 return fileName;
565 }
566
567 if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
568 {
569 ALOGW("An error occurred when writing to file %s", fileName.c_str());
570 }
571 return fileName;
572}
573
Finn Williamsa4983ce2020-07-23 12:55:12 +0100574bool IsDynamicTensor(const armnn::TensorInfo& tensorInfo)
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100575{
Finn Williamsa4983ce2020-07-23 12:55:12 +0100576 if (tensorInfo.GetShape().GetDimensionality() == armnn::Dimensionality::NotSpecified)
577 {
578 return true;
579 }
Teresa Charlin4bd9a742020-08-12 12:58:50 +0100580 // Account for the usage of the TensorShape empty constructor
581 if (tensorInfo.GetNumDimensions() == 0)
582 {
583 return true;
584 }
Finn Williamsa4983ce2020-07-23 12:55:12 +0100585 return !tensorInfo.GetShape().AreAllDimensionsSpecified();
586}
587
588bool AreDynamicTensorsSupported()
589{
590#if defined(ARMNN_ANDROID_NN_V1_3)
591 return true;
592#else
593 return false;
594#endif
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100595}
596
Jim Flynn829ad302019-12-13 14:43:24 +0000597std::string GetFileTimestamp()
598{
599 // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
600 // and getSupportedOperations.txt files)
601 timespec ts;
602 int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
603 std::stringstream ss;
604 if (iRet == 0)
605 {
606 ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
607 }
608 else
609 {
610 ALOGW("clock_gettime failed with errno %s : %s", std::to_string(errno).c_str(), std::strerror(errno));
611 }
612 return ss.str();
613}
614
615void RenameGraphDotFile(const std::string& oldName, const std::string& dumpDir, const armnn::NetworkId networkId)
616{
617 if (dumpDir.empty())
618 {
619 return;
620 }
621 if (oldName.empty())
622 {
623 return;
624 }
625 const std::string newFileName = boost::str(boost::format("%1%/%2%_networkgraph.dot")
626 % dumpDir
627 % std::to_string(networkId));
628 int iRet = rename(oldName.c_str(), newFileName.c_str());
629 if (iRet != 0)
630 {
631 std::stringstream ss;
632 ss << "rename of [" << oldName << "] to [" << newFileName << "] failed with errno " << std::to_string(errno)
633 << " : " << std::strerror(errno);
634 ALOGW(ss.str().c_str());
635 }
636}
637
Kevin May42477c12020-03-26 13:34:14 +0000638void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
639{
640 if (memPools.empty())
641 {
642 return;
643 }
644 // Commit output buffers.
645 // Note that we update *all* pools, even if they aren't actually used as outputs -
646 // this is simpler and is what the CpuExecutor does.
647 for (auto& pool : memPools)
648 {
649 // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
650 // update() has been removed and flush() added.
651#if defined(ARMNN_ANDROID_R) // Use the new Android implementation.
652 pool.flush();
653#else
654 pool.update();
655#endif
656 }
657}
telsoa015307bc12018-03-09 13:51:08 +0000658} // namespace armnn_driver