blob: aeee800bc6da8e26429846b2ebef3449b5a7181b [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa015307bc12018-03-09 13:51:08 +00004//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "Utils.hpp"
Jim Flynnf2e175c2019-12-12 15:11:30 +00009#include "Half.hpp"
telsoa015307bc12018-03-09 13:51:08 +000010
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000011#include <armnnUtils/Permute.hpp>
12
Derek Lambertid00ad912020-01-22 15:55:16 +000013#include <armnn/Utils.hpp>
14
telsoa015307bc12018-03-09 13:51:08 +000015#include <cassert>
Jim Flynn829ad302019-12-13 14:43:24 +000016#include <cerrno>
telsoa015307bc12018-03-09 13:51:08 +000017#include <cinttypes>
Jim Flynn829ad302019-12-13 14:43:24 +000018#include <sstream>
19#include <cstdio>
20#include <time.h>
21
22
telsoa015307bc12018-03-09 13:51:08 +000023
24using namespace android;
telsoa01ce3e84a2018-08-31 09:31:35 +010025using namespace android::hardware;
telsoa015307bc12018-03-09 13:51:08 +000026using namespace android::hidl::memory::V1_0;
27
28namespace armnn_driver
29{
30const armnn::PermutationVector g_DontPermute{};
31
32namespace
33{
34
telsoa015307bc12018-03-09 13:51:08 +000035void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input,
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000036 void* output, size_t dataTypeSize, const armnn::PermutationVector& mappings)
telsoa015307bc12018-03-09 13:51:08 +000037{
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000038 assert(inTensorShape.GetNumDimensions() == 4U);
telsoa015307bc12018-03-09 13:51:08 +000039
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000040 armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, input, output, dataTypeSize);
telsoa015307bc12018-03-09 13:51:08 +000041}
42
43} // anonymous namespace
44
45void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
46 const armnn::PermutationVector& mappings)
47{
48 assert(tensor.GetNumDimensions() == 4U);
49
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000050 armnn::DataType dataType = tensor.GetDataType();
51 switch (dataType)
telsoa015307bc12018-03-09 13:51:08 +000052 {
Mike Kelly3c673942019-07-25 09:26:06 +010053 case armnn::DataType::Float16:
telsoa015307bc12018-03-09 13:51:08 +000054 case armnn::DataType::Float32:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000055 case armnn::DataType::QAsymmU8:
Derek Lambertid00ad912020-01-22 15:55:16 +000056 case armnn::DataType::QSymmS8:
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000057 SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +000058 break;
telsoa015307bc12018-03-09 13:51:08 +000059 default:
60 ALOGW("Unknown armnn::DataType for swizzling");
61 assert(0);
62 }
63}
64
65void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
66{
67 // find the location within the pool
68 assert(location.poolIndex < memPools.size());
69
surmeh01deb3bdb2018-07-05 12:06:04 +010070 const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
71
surmeh01deb3bdb2018-07-05 12:06:04 +010072 uint8_t* memPoolBuffer = memPool.getBuffer();
surmeh01deb3bdb2018-07-05 12:06:04 +010073
74 uint8_t* memory = memPoolBuffer + location.offset;
telsoa015307bc12018-03-09 13:51:08 +000075
76 return memory;
77}
78
Matthew Bentham912b3622019-05-03 15:49:14 +010079armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +000080{
81 armnn::DataType type;
82
83 switch (operand.type)
84 {
Matthew Bentham912b3622019-05-03 15:49:14 +010085 case V1_0::OperandType::TENSOR_FLOAT32:
telsoa015307bc12018-03-09 13:51:08 +000086 type = armnn::DataType::Float32;
87 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010088 case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000089 type = armnn::DataType::QAsymmU8;
telsoa015307bc12018-03-09 13:51:08 +000090 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010091 case V1_0::OperandType::TENSOR_INT32:
telsoa015307bc12018-03-09 13:51:08 +000092 type = armnn::DataType::Signed32;
93 break;
94 default:
Mike Kellyb5fdf382019-06-11 16:35:25 +010095 throw UnsupportedOperand<V1_0::OperandType>(operand.type);
telsoa015307bc12018-03-09 13:51:08 +000096 }
97
98 armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
99
100 ret.SetQuantizationScale(operand.scale);
101 ret.SetQuantizationOffset(operand.zeroPoint);
102
103 return ret;
104}
105
Kevin May42477c12020-03-26 13:34:14 +0000106#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3)// Using ::android::hardware::neuralnetworks::V1_2
Mike Kellyb5fdf382019-06-11 16:35:25 +0100107
108armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
109{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000110 using namespace armnn;
Derek Lambertid00ad912020-01-22 15:55:16 +0000111 bool perChannel = false;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100112
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000113 DataType type;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100114 switch (operand.type)
115 {
Sadik Armagan793a70c2020-03-19 13:54:04 +0000116 case V1_2::OperandType::TENSOR_BOOL8:
117 type = armnn::DataType::Boolean;
118 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100119 case V1_2::OperandType::TENSOR_FLOAT32:
120 type = armnn::DataType::Float32;
121 break;
Mike Kelly3c673942019-07-25 09:26:06 +0100122 case V1_2::OperandType::TENSOR_FLOAT16:
123 type = armnn::DataType::Float16;
124 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100125 case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000126 type = armnn::DataType::QAsymmU8;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100127 break;
Derek Lambertid00ad912020-01-22 15:55:16 +0000128 case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
129 perChannel=true;
130 ARMNN_FALLTHROUGH;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000131 case V1_2::OperandType::TENSOR_QUANT8_SYMM:
FinnWilliamsArm624fe9f2019-12-06 17:12:42 +0000132 type = armnn::DataType::QSymmS8;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000133 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100134 case V1_2::OperandType::TENSOR_QUANT16_SYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000135 type = armnn::DataType::QSymmS16;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100136 break;
137 case V1_2::OperandType::TENSOR_INT32:
138 type = armnn::DataType::Signed32;
139 break;
140 default:
141 throw UnsupportedOperand<V1_2::OperandType>(operand.type);
142 }
143
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000144 TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
Derek Lambertid00ad912020-01-22 15:55:16 +0000145 if (perChannel)
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000146 {
147 // ExtraParams is expected to be of type channelQuant
148 BOOST_ASSERT(operand.extraParams.getDiscriminator() ==
149 V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100150
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000151 auto perAxisQuantParams = operand.extraParams.channelQuant();
152
153 ret.SetQuantizationScales(perAxisQuantParams.scales);
154 ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
155 }
156 else
157 {
158 ret.SetQuantizationScale(operand.scale);
159 ret.SetQuantizationOffset(operand.zeroPoint);
160 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100161
162 return ret;
163}
164
165#endif
166
Kevin May42477c12020-03-26 13:34:14 +0000167#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
168
169armnn::TensorInfo GetTensorInfoForOperand(const V1_3::Operand& operand)
170{
171 using namespace armnn;
172 bool perChannel = false;
173
174 DataType type;
175 switch (operand.type)
176 {
Sadik Armagan51ba2c62020-03-31 15:36:25 +0100177 case V1_3::OperandType::TENSOR_BOOL8:
178 type = armnn::DataType::Boolean;
179 break;
Kevin May42477c12020-03-26 13:34:14 +0000180 case V1_3::OperandType::TENSOR_FLOAT32:
181 type = armnn::DataType::Float32;
182 break;
183 case V1_3::OperandType::TENSOR_FLOAT16:
184 type = armnn::DataType::Float16;
185 break;
186 case V1_3::OperandType::TENSOR_QUANT8_ASYMM:
187 type = armnn::DataType::QAsymmU8;
188 break;
189 case V1_3::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
190 perChannel=true;
191 ARMNN_FALLTHROUGH;
192 case V1_3::OperandType::TENSOR_QUANT8_SYMM:
193 type = armnn::DataType::QSymmS8;
194 break;
195 case V1_3::OperandType::TENSOR_QUANT16_SYMM:
196 type = armnn::DataType::QSymmS16;
197 break;
198 case V1_3::OperandType::TENSOR_INT32:
199 type = armnn::DataType::Signed32;
200 break;
201 case V1_3::OperandType::TENSOR_QUANT8_ASYMM_SIGNED:
202 type = armnn::DataType::QAsymmS8;
203 break;
204 default:
205 throw UnsupportedOperand<V1_3::OperandType>(operand.type);
206 }
207
208 TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
209 if (perChannel)
210 {
211 // ExtraParams is expected to be of type channelQuant
212 BOOST_ASSERT(operand.extraParams.getDiscriminator() ==
213 V1_3::Operand::ExtraParams::hidl_discriminator::channelQuant);
214
215 auto perAxisQuantParams = operand.extraParams.channelQuant();
216
217 ret.SetQuantizationScales(perAxisQuantParams.scales);
218 ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
219 }
220 else
221 {
222 ret.SetQuantizationScale(operand.scale);
223 ret.SetQuantizationOffset(operand.zeroPoint);
224 }
225
226 return ret;
227}
228
229#endif
230
Matthew Bentham912b3622019-05-03 15:49:14 +0100231std::string GetOperandSummary(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +0000232{
233 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
234 toString(operand.type);
235}
236
Kevin May42477c12020-03-26 13:34:14 +0000237#if defined(ARMNN_ANDROID_NN_V1_2) || defined(ARMNN_ANDROID_NN_V1_3) // Using ::android::hardware::neuralnetworks::V1_2
Mike Kellyb5fdf382019-06-11 16:35:25 +0100238
239std::string GetOperandSummary(const V1_2::Operand& operand)
240{
241 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
242 toString(operand.type);
243}
244
245#endif
246
Kevin May42477c12020-03-26 13:34:14 +0000247#ifdef ARMNN_ANDROID_NN_V1_3 // Using ::android::hardware::neuralnetworks::V1_3
248
249std::string GetOperandSummary(const V1_3::Operand& operand)
250{
251 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
252 toString(operand.type);
253}
254
255#endif
256
telsoa015307bc12018-03-09 13:51:08 +0000257using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
258 unsigned int elementIndex,
259 std::ofstream& fileStream);
260
261namespace
262{
263template <typename ElementType, typename PrintableType = ElementType>
264void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
265{
266 const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
267 fileStream << static_cast<PrintableType>(elements[elementIndex]) << ",";
268}
269
270constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor)
271{
272 const char* str = "";
273
274 switch (tensor.GetNumDimensions())
275 {
276 case 4: { str = "(BHWC) "; break; }
277 case 3: { str = "(HWC) "; break; }
278 case 2: { str = "(HW) "; break; }
279 default: { str = ""; break; }
280 }
281
282 return str;
283}
284} // namespace
285
286void DumpTensor(const std::string& dumpDir,
287 const std::string& requestName,
288 const std::string& tensorName,
289 const armnn::ConstTensor& tensor)
290{
291 // The dump directory must exist in advance.
292 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.dump") % dumpDir % requestName % tensorName);
293
294 std::ofstream fileStream;
295 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
296
297 if (!fileStream.good())
298 {
299 ALOGW("Could not open file %s for writing", fileName.c_str());
300 return;
301 }
302
303 DumpElementFunction dumpElementFunction = nullptr;
304
305 switch (tensor.GetDataType())
306 {
307 case armnn::DataType::Float32:
308 {
309 dumpElementFunction = &DumpTensorElement<float>;
310 break;
311 }
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000312 case armnn::DataType::QAsymmU8:
telsoa015307bc12018-03-09 13:51:08 +0000313 {
314 dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
315 break;
316 }
317 case armnn::DataType::Signed32:
318 {
319 dumpElementFunction = &DumpTensorElement<int32_t>;
320 break;
321 }
Jim Flynnf2e175c2019-12-12 15:11:30 +0000322 case armnn::DataType::Float16:
323 {
324 dumpElementFunction = &DumpTensorElement<armnn::Half>;
325 break;
326 }
telsoa015307bc12018-03-09 13:51:08 +0000327 default:
328 {
329 dumpElementFunction = nullptr;
330 }
331 }
332
333 if (dumpElementFunction != nullptr)
334 {
335 const unsigned int numDimensions = tensor.GetNumDimensions();
336
337 const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1;
338
339 const unsigned int height = (numDimensions >= 3)
340 ? tensor.GetShape()[numDimensions - 3]
341 : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1;
342
343 const unsigned int width = (numDimensions >= 3)
344 ? tensor.GetShape()[numDimensions - 2]
345 : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0;
346
347 const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1;
348
349 fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
350 fileStream << "# Dimensions " << MemoryLayoutString(tensor);
351 fileStream << "[" << tensor.GetShape()[0];
352 for (unsigned int d = 1; d < numDimensions; d++)
353 {
354 fileStream << "," << tensor.GetShape()[d];
355 }
356 fileStream << "]" << std::endl;
357
358 for (unsigned int e = 0, b = 0; b < batch; ++b)
359 {
360 if (numDimensions >= 4)
361 {
362 fileStream << "# Batch " << b << std::endl;
363 }
364 for (unsigned int c = 0; c < channels; c++)
365 {
366 if (numDimensions >= 3)
367 {
368 fileStream << "# Channel " << c << std::endl;
369 }
370 for (unsigned int h = 0; h < height; h++)
371 {
372 for (unsigned int w = 0; w < width; w++, e += channels)
373 {
374 (*dumpElementFunction)(tensor, e, fileStream);
375 }
376 fileStream << std::endl;
377 }
378 e -= channels - 1;
379 if (c < channels)
380 {
381 e -= ((height * width) - 1) * channels;
382 }
383 }
384 fileStream << std::endl;
385 }
386 fileStream << std::endl;
387 }
388 else
389 {
390 fileStream << "Cannot dump tensor elements: Unsupported data type "
391 << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
392 }
393
394 if (!fileStream.good())
395 {
396 ALOGW("An error occurred when writing to file %s", fileName.c_str());
397 }
398}
399
telsoa01ce3e84a2018-08-31 09:31:35 +0100400void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
401 const std::string& dumpDir,
402 armnn::NetworkId networkId,
403 const armnn::IProfiler* profiler)
404{
405 // Check if profiling is required.
406 if (!gpuProfilingEnabled)
407 {
408 return;
409 }
410
411 // The dump directory must exist in advance.
412 if (dumpDir.empty())
413 {
414 return;
415 }
416
417 BOOST_ASSERT(profiler);
418
419 // Set the name of the output profiling file.
420 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.json")
421 % dumpDir
422 % std::to_string(networkId)
423 % "profiling");
424
425 // Open the ouput file for writing.
426 std::ofstream fileStream;
427 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
428
429 if (!fileStream.good())
430 {
431 ALOGW("Could not open file %s for writing", fileName.c_str());
432 return;
433 }
434
435 // Write the profiling info to a JSON file.
436 profiler->Print(fileStream);
437}
438
Jim Flynn829ad302019-12-13 14:43:24 +0000439std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
440 const std::string& dumpDir)
441{
442 std::string fileName;
443 // The dump directory must exist in advance.
444 if (dumpDir.empty())
445 {
446 return fileName;
447 }
448
449 std::string timestamp = GetFileTimestamp();
450 if (timestamp.empty())
451 {
452 return fileName;
453 }
454
455 // Set the name of the output .dot file.
456 fileName = boost::str(boost::format("%1%/%2%_networkgraph.dot")
457 % dumpDir
458 % timestamp);
459
460 ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str());
461
462 // Write the network graph to a dot file.
463 std::ofstream fileStream;
464 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
465
466 if (!fileStream.good())
467 {
468 ALOGW("Could not open file %s for writing", fileName.c_str());
469 return fileName;
470 }
471
472 if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
473 {
474 ALOGW("An error occurred when writing to file %s", fileName.c_str());
475 }
476 return fileName;
477}
478
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100479bool IsDynamicTensor(const armnn::TensorInfo& outputInfo)
480{
481 // Dynamic tensors have at least one 0-sized dimension
482 return outputInfo.GetNumElements() == 0u;
483}
484
Jim Flynn829ad302019-12-13 14:43:24 +0000485std::string GetFileTimestamp()
486{
487 // used to get a timestamp to name diagnostic files (the ArmNN serialized graph
488 // and getSupportedOperations.txt files)
489 timespec ts;
490 int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts);
491 std::stringstream ss;
492 if (iRet == 0)
493 {
494 ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec);
495 }
496 else
497 {
498 ALOGW("clock_gettime failed with errno %s : %s", std::to_string(errno).c_str(), std::strerror(errno));
499 }
500 return ss.str();
501}
502
503void RenameGraphDotFile(const std::string& oldName, const std::string& dumpDir, const armnn::NetworkId networkId)
504{
505 if (dumpDir.empty())
506 {
507 return;
508 }
509 if (oldName.empty())
510 {
511 return;
512 }
513 const std::string newFileName = boost::str(boost::format("%1%/%2%_networkgraph.dot")
514 % dumpDir
515 % std::to_string(networkId));
516 int iRet = rename(oldName.c_str(), newFileName.c_str());
517 if (iRet != 0)
518 {
519 std::stringstream ss;
520 ss << "rename of [" << oldName << "] to [" << newFileName << "] failed with errno " << std::to_string(errno)
521 << " : " << std::strerror(errno);
522 ALOGW(ss.str().c_str());
523 }
524}
525
Kevin May42477c12020-03-26 13:34:14 +0000526void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools)
527{
528 if (memPools.empty())
529 {
530 return;
531 }
532 // Commit output buffers.
533 // Note that we update *all* pools, even if they aren't actually used as outputs -
534 // this is simpler and is what the CpuExecutor does.
535 for (auto& pool : memPools)
536 {
537 // Type android::nn::RunTimePoolInfo has changed between Android P & Q and Android R, where
538 // update() has been removed and flush() added.
539#if defined(ARMNN_ANDROID_R) // Use the new Android implementation.
540 pool.flush();
541#else
542 pool.update();
543#endif
544 }
545}
546
Jim Flynn829ad302019-12-13 14:43:24 +0000547
548
telsoa015307bc12018-03-09 13:51:08 +0000549} // namespace armnn_driver