telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
David Beck | 93e4898 | 2018-09-05 13:05:09 +0100 | [diff] [blame] | 3 | // SPDX-License-Identifier: MIT |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 4 | // |
| 5 | |
| 6 | #define LOG_TAG "ArmnnDriver" |
| 7 | |
| 8 | #include "Utils.hpp" |
| 9 | |
Mike Kelly | 3c67394 | 2019-07-25 09:26:06 +0100 | [diff] [blame] | 10 | #include <Half.hpp> |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 11 | #include <Permute.hpp> |
| 12 | |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 13 | #include <cassert> |
Jim Flynn | d269798 | 2019-12-16 11:50:29 +0000 | [diff] [blame] | 14 | #include <cerrno> |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 15 | #include <cinttypes> |
Jim Flynn | d269798 | 2019-12-16 11:50:29 +0000 | [diff] [blame] | 16 | #include <sstream> |
| 17 | #include <cstdio> |
| 18 | #include <time.h> |
| 19 | |
| 20 | |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 21 | |
| 22 | using namespace android; |
telsoa01 | ce3e84a | 2018-08-31 09:31:35 +0100 | [diff] [blame] | 23 | using namespace android::hardware; |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 24 | using namespace android::hidl::memory::V1_0; |
| 25 | |
| 26 | namespace armnn_driver |
| 27 | { |
| 28 | const armnn::PermutationVector g_DontPermute{}; |
| 29 | |
| 30 | namespace |
| 31 | { |
| 32 | |
| 33 | template <typename T> |
| 34 | void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input, |
| 35 | void* output, const armnn::PermutationVector& mappings) |
| 36 | { |
| 37 | const auto inputData = static_cast<const T*>(input); |
| 38 | const auto outputData = static_cast<T*>(output); |
| 39 | |
Matteo Martincigh | 2c444fc | 2019-01-07 10:18:47 +0000 | [diff] [blame] | 40 | armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, inputData, outputData, sizeof(T)); |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 41 | } |
| 42 | |
| 43 | } // anonymous namespace |
| 44 | |
| 45 | void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output, |
| 46 | const armnn::PermutationVector& mappings) |
| 47 | { |
| 48 | assert(tensor.GetNumDimensions() == 4U); |
| 49 | |
| 50 | switch(tensor.GetDataType()) |
| 51 | { |
Mike Kelly | 3c67394 | 2019-07-25 09:26:06 +0100 | [diff] [blame] | 52 | case armnn::DataType::Float16: |
| 53 | SwizzleAndroidNn4dTensorToArmNn<armnn::Half>(tensor.GetShape(), input, output, mappings); |
| 54 | break; |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 55 | case armnn::DataType::Float32: |
| 56 | SwizzleAndroidNn4dTensorToArmNn<float>(tensor.GetShape(), input, output, mappings); |
| 57 | break; |
| 58 | case armnn::DataType::QuantisedAsymm8: |
| 59 | SwizzleAndroidNn4dTensorToArmNn<uint8_t>(tensor.GetShape(), input, output, mappings); |
| 60 | break; |
Aron Virginas-Tar | 9f0693b | 2019-11-06 14:32:30 +0000 | [diff] [blame] | 61 | case armnn::DataType::QuantizedSymm8PerAxis: |
| 62 | SwizzleAndroidNn4dTensorToArmNn<int8_t>(tensor.GetShape(), input, output, mappings); |
| 63 | break; |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 64 | default: |
| 65 | ALOGW("Unknown armnn::DataType for swizzling"); |
| 66 | assert(0); |
| 67 | } |
| 68 | } |
| 69 | |
| 70 | void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools) |
| 71 | { |
| 72 | // find the location within the pool |
| 73 | assert(location.poolIndex < memPools.size()); |
| 74 | |
surmeh01 | deb3bdb | 2018-07-05 12:06:04 +0100 | [diff] [blame] | 75 | const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex]; |
| 76 | |
| 77 | // Type android::nn::RunTimePoolInfo has changed between Android O and Android P, where |
| 78 | // "buffer" has been made private and must be accessed via the accessor method "getBuffer". |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 79 | #if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q) // Use the new Android implementation. |
surmeh01 | deb3bdb | 2018-07-05 12:06:04 +0100 | [diff] [blame] | 80 | uint8_t* memPoolBuffer = memPool.getBuffer(); |
| 81 | #else // Fallback to the old Android O implementation. |
| 82 | uint8_t* memPoolBuffer = memPool.buffer; |
| 83 | #endif |
| 84 | |
| 85 | uint8_t* memory = memPoolBuffer + location.offset; |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 86 | |
| 87 | return memory; |
| 88 | } |
| 89 | |
Matthew Bentham | 912b362 | 2019-05-03 15:49:14 +0100 | [diff] [blame] | 90 | armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand) |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 91 | { |
| 92 | armnn::DataType type; |
| 93 | |
| 94 | switch (operand.type) |
| 95 | { |
Matthew Bentham | 912b362 | 2019-05-03 15:49:14 +0100 | [diff] [blame] | 96 | case V1_0::OperandType::TENSOR_FLOAT32: |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 97 | type = armnn::DataType::Float32; |
| 98 | break; |
Matthew Bentham | 912b362 | 2019-05-03 15:49:14 +0100 | [diff] [blame] | 99 | case V1_0::OperandType::TENSOR_QUANT8_ASYMM: |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 100 | type = armnn::DataType::QuantisedAsymm8; |
| 101 | break; |
Matthew Bentham | 912b362 | 2019-05-03 15:49:14 +0100 | [diff] [blame] | 102 | case V1_0::OperandType::TENSOR_INT32: |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 103 | type = armnn::DataType::Signed32; |
| 104 | break; |
| 105 | default: |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 106 | throw UnsupportedOperand<V1_0::OperandType>(operand.type); |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 107 | } |
| 108 | |
| 109 | armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type); |
| 110 | |
| 111 | ret.SetQuantizationScale(operand.scale); |
| 112 | ret.SetQuantizationOffset(operand.zeroPoint); |
| 113 | |
| 114 | return ret; |
| 115 | } |
| 116 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 117 | #ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2 |
| 118 | |
| 119 | armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand) |
| 120 | { |
Aron Virginas-Tar | 9f0693b | 2019-11-06 14:32:30 +0000 | [diff] [blame] | 121 | using namespace armnn; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 122 | |
Aron Virginas-Tar | 9f0693b | 2019-11-06 14:32:30 +0000 | [diff] [blame] | 123 | DataType type; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 124 | switch (operand.type) |
| 125 | { |
| 126 | case V1_2::OperandType::TENSOR_FLOAT32: |
| 127 | type = armnn::DataType::Float32; |
| 128 | break; |
Mike Kelly | 3c67394 | 2019-07-25 09:26:06 +0100 | [diff] [blame] | 129 | case V1_2::OperandType::TENSOR_FLOAT16: |
| 130 | type = armnn::DataType::Float16; |
| 131 | break; |
Aron Virginas-Tar | 9f0693b | 2019-11-06 14:32:30 +0000 | [diff] [blame] | 132 | case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL: |
| 133 | type = armnn::DataType::QuantizedSymm8PerAxis; |
| 134 | break; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 135 | case V1_2::OperandType::TENSOR_QUANT8_ASYMM: |
| 136 | type = armnn::DataType::QuantisedAsymm8; |
| 137 | break; |
Mike Kelly | d7de165 | 2019-11-19 09:16:00 +0000 | [diff] [blame] | 138 | case V1_2::OperandType::TENSOR_QUANT8_SYMM: |
| 139 | type = armnn::DataType::QuantisedSymm8; |
| 140 | break; |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 141 | case V1_2::OperandType::TENSOR_QUANT16_SYMM: |
| 142 | type = armnn::DataType::QuantisedSymm16; |
| 143 | break; |
| 144 | case V1_2::OperandType::TENSOR_INT32: |
| 145 | type = armnn::DataType::Signed32; |
| 146 | break; |
| 147 | default: |
| 148 | throw UnsupportedOperand<V1_2::OperandType>(operand.type); |
| 149 | } |
| 150 | |
Aron Virginas-Tar | 9f0693b | 2019-11-06 14:32:30 +0000 | [diff] [blame] | 151 | TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type); |
| 152 | if (type == DataType::QuantizedSymm8PerAxis) |
| 153 | { |
| 154 | // ExtraParams is expected to be of type channelQuant |
| 155 | BOOST_ASSERT(operand.extraParams.getDiscriminator() == |
| 156 | V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant); |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 157 | |
Aron Virginas-Tar | 9f0693b | 2019-11-06 14:32:30 +0000 | [diff] [blame] | 158 | auto perAxisQuantParams = operand.extraParams.channelQuant(); |
| 159 | |
| 160 | ret.SetQuantizationScales(perAxisQuantParams.scales); |
| 161 | ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim)); |
| 162 | } |
| 163 | else |
| 164 | { |
| 165 | ret.SetQuantizationScale(operand.scale); |
| 166 | ret.SetQuantizationOffset(operand.zeroPoint); |
| 167 | } |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 168 | |
| 169 | return ret; |
| 170 | } |
| 171 | |
| 172 | #endif |
| 173 | |
Matthew Bentham | 912b362 | 2019-05-03 15:49:14 +0100 | [diff] [blame] | 174 | std::string GetOperandSummary(const V1_0::Operand& operand) |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 175 | { |
| 176 | return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " + |
| 177 | toString(operand.type); |
| 178 | } |
| 179 | |
Mike Kelly | b5fdf38 | 2019-06-11 16:35:25 +0100 | [diff] [blame] | 180 | #ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2 |
| 181 | |
| 182 | std::string GetOperandSummary(const V1_2::Operand& operand) |
| 183 | { |
| 184 | return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " + |
| 185 | toString(operand.type); |
| 186 | } |
| 187 | |
| 188 | #endif |
| 189 | |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 190 | using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor, |
| 191 | unsigned int elementIndex, |
| 192 | std::ofstream& fileStream); |
| 193 | |
| 194 | namespace |
| 195 | { |
| 196 | template <typename ElementType, typename PrintableType = ElementType> |
| 197 | void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream) |
| 198 | { |
| 199 | const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea()); |
| 200 | fileStream << static_cast<PrintableType>(elements[elementIndex]) << ","; |
| 201 | } |
| 202 | |
| 203 | constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor) |
| 204 | { |
| 205 | const char* str = ""; |
| 206 | |
| 207 | switch (tensor.GetNumDimensions()) |
| 208 | { |
| 209 | case 4: { str = "(BHWC) "; break; } |
| 210 | case 3: { str = "(HWC) "; break; } |
| 211 | case 2: { str = "(HW) "; break; } |
| 212 | default: { str = ""; break; } |
| 213 | } |
| 214 | |
| 215 | return str; |
| 216 | } |
| 217 | } // namespace |
| 218 | |
| 219 | void DumpTensor(const std::string& dumpDir, |
| 220 | const std::string& requestName, |
| 221 | const std::string& tensorName, |
| 222 | const armnn::ConstTensor& tensor) |
| 223 | { |
| 224 | // The dump directory must exist in advance. |
| 225 | const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.dump") % dumpDir % requestName % tensorName); |
| 226 | |
| 227 | std::ofstream fileStream; |
| 228 | fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc); |
| 229 | |
| 230 | if (!fileStream.good()) |
| 231 | { |
| 232 | ALOGW("Could not open file %s for writing", fileName.c_str()); |
| 233 | return; |
| 234 | } |
| 235 | |
| 236 | DumpElementFunction dumpElementFunction = nullptr; |
| 237 | |
| 238 | switch (tensor.GetDataType()) |
| 239 | { |
| 240 | case armnn::DataType::Float32: |
| 241 | { |
| 242 | dumpElementFunction = &DumpTensorElement<float>; |
| 243 | break; |
| 244 | } |
| 245 | case armnn::DataType::QuantisedAsymm8: |
| 246 | { |
| 247 | dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>; |
| 248 | break; |
| 249 | } |
| 250 | case armnn::DataType::Signed32: |
| 251 | { |
| 252 | dumpElementFunction = &DumpTensorElement<int32_t>; |
| 253 | break; |
| 254 | } |
Jim Flynn | d269798 | 2019-12-16 11:50:29 +0000 | [diff] [blame] | 255 | case armnn::DataType::Float16: |
| 256 | { |
| 257 | dumpElementFunction = &DumpTensorElement<armnn::Half>; |
| 258 | break; |
| 259 | } |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 260 | default: |
| 261 | { |
| 262 | dumpElementFunction = nullptr; |
| 263 | } |
| 264 | } |
| 265 | |
| 266 | if (dumpElementFunction != nullptr) |
| 267 | { |
| 268 | const unsigned int numDimensions = tensor.GetNumDimensions(); |
| 269 | |
| 270 | const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1; |
| 271 | |
| 272 | const unsigned int height = (numDimensions >= 3) |
| 273 | ? tensor.GetShape()[numDimensions - 3] |
| 274 | : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1; |
| 275 | |
| 276 | const unsigned int width = (numDimensions >= 3) |
| 277 | ? tensor.GetShape()[numDimensions - 2] |
| 278 | : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0; |
| 279 | |
| 280 | const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1; |
| 281 | |
| 282 | fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl; |
| 283 | fileStream << "# Dimensions " << MemoryLayoutString(tensor); |
| 284 | fileStream << "[" << tensor.GetShape()[0]; |
| 285 | for (unsigned int d = 1; d < numDimensions; d++) |
| 286 | { |
| 287 | fileStream << "," << tensor.GetShape()[d]; |
| 288 | } |
| 289 | fileStream << "]" << std::endl; |
| 290 | |
| 291 | for (unsigned int e = 0, b = 0; b < batch; ++b) |
| 292 | { |
| 293 | if (numDimensions >= 4) |
| 294 | { |
| 295 | fileStream << "# Batch " << b << std::endl; |
| 296 | } |
| 297 | for (unsigned int c = 0; c < channels; c++) |
| 298 | { |
| 299 | if (numDimensions >= 3) |
| 300 | { |
| 301 | fileStream << "# Channel " << c << std::endl; |
| 302 | } |
| 303 | for (unsigned int h = 0; h < height; h++) |
| 304 | { |
| 305 | for (unsigned int w = 0; w < width; w++, e += channels) |
| 306 | { |
| 307 | (*dumpElementFunction)(tensor, e, fileStream); |
| 308 | } |
| 309 | fileStream << std::endl; |
| 310 | } |
| 311 | e -= channels - 1; |
| 312 | if (c < channels) |
| 313 | { |
| 314 | e -= ((height * width) - 1) * channels; |
| 315 | } |
| 316 | } |
| 317 | fileStream << std::endl; |
| 318 | } |
| 319 | fileStream << std::endl; |
| 320 | } |
| 321 | else |
| 322 | { |
| 323 | fileStream << "Cannot dump tensor elements: Unsupported data type " |
| 324 | << static_cast<unsigned int>(tensor.GetDataType()) << std::endl; |
| 325 | } |
| 326 | |
| 327 | if (!fileStream.good()) |
| 328 | { |
| 329 | ALOGW("An error occurred when writing to file %s", fileName.c_str()); |
| 330 | } |
| 331 | } |
| 332 | |
telsoa01 | ce3e84a | 2018-08-31 09:31:35 +0100 | [diff] [blame] | 333 | void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled, |
| 334 | const std::string& dumpDir, |
| 335 | armnn::NetworkId networkId, |
| 336 | const armnn::IProfiler* profiler) |
| 337 | { |
| 338 | // Check if profiling is required. |
| 339 | if (!gpuProfilingEnabled) |
| 340 | { |
| 341 | return; |
| 342 | } |
| 343 | |
| 344 | // The dump directory must exist in advance. |
| 345 | if (dumpDir.empty()) |
| 346 | { |
| 347 | return; |
| 348 | } |
| 349 | |
| 350 | BOOST_ASSERT(profiler); |
| 351 | |
| 352 | // Set the name of the output profiling file. |
| 353 | const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.json") |
| 354 | % dumpDir |
| 355 | % std::to_string(networkId) |
| 356 | % "profiling"); |
| 357 | |
| 358 | // Open the ouput file for writing. |
| 359 | std::ofstream fileStream; |
| 360 | fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc); |
| 361 | |
| 362 | if (!fileStream.good()) |
| 363 | { |
| 364 | ALOGW("Could not open file %s for writing", fileName.c_str()); |
| 365 | return; |
| 366 | } |
| 367 | |
| 368 | // Write the profiling info to a JSON file. |
| 369 | profiler->Print(fileStream); |
| 370 | } |
| 371 | |
Jim Flynn | d269798 | 2019-12-16 11:50:29 +0000 | [diff] [blame] | 372 | std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork, |
| 373 | const std::string& dumpDir) |
| 374 | { |
| 375 | std::string fileName; |
| 376 | // The dump directory must exist in advance. |
| 377 | if (dumpDir.empty()) |
| 378 | { |
| 379 | return fileName; |
| 380 | } |
| 381 | |
| 382 | std::string timestamp = GetFileTimestamp(); |
| 383 | if (timestamp.empty()) |
| 384 | { |
| 385 | return fileName; |
| 386 | } |
| 387 | |
| 388 | // Set the name of the output .dot file. |
| 389 | fileName = boost::str(boost::format("%1%/%2%_networkgraph.dot") |
| 390 | % dumpDir |
| 391 | % timestamp); |
| 392 | |
| 393 | ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str()); |
| 394 | |
| 395 | // Write the network graph to a dot file. |
| 396 | std::ofstream fileStream; |
| 397 | fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc); |
| 398 | |
| 399 | if (!fileStream.good()) |
| 400 | { |
| 401 | ALOGW("Could not open file %s for writing", fileName.c_str()); |
| 402 | return fileName; |
| 403 | } |
| 404 | |
| 405 | if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success) |
| 406 | { |
| 407 | ALOGW("An error occurred when writing to file %s", fileName.c_str()); |
| 408 | } |
| 409 | return fileName; |
| 410 | } |
| 411 | |
Aron Virginas-Tar | 573a8fa | 2019-07-23 14:01:37 +0100 | [diff] [blame] | 412 | bool IsDynamicTensor(const armnn::TensorInfo& outputInfo) |
| 413 | { |
| 414 | // Dynamic tensors have at least one 0-sized dimension |
| 415 | return outputInfo.GetNumElements() == 0u; |
| 416 | } |
| 417 | |
Jim Flynn | d269798 | 2019-12-16 11:50:29 +0000 | [diff] [blame] | 418 | std::string GetFileTimestamp() |
| 419 | { |
| 420 | // used to get a timestamp to name diagnostic files (the ArmNN serialized graph |
| 421 | // and getSupportedOperations.txt files) |
| 422 | timespec ts; |
| 423 | int iRet = clock_gettime(CLOCK_MONOTONIC_RAW, &ts); |
| 424 | std::stringstream ss; |
| 425 | if (iRet == 0) |
| 426 | { |
| 427 | ss << std::to_string(ts.tv_sec) << "_" << std::to_string(ts.tv_nsec); |
| 428 | } |
| 429 | else |
| 430 | { |
| 431 | ALOGW("clock_gettime failed with errno %s : %s", std::to_string(errno).c_str(), std::strerror(errno)); |
| 432 | } |
| 433 | return ss.str(); |
| 434 | } |
| 435 | |
| 436 | void RenameGraphDotFile(const std::string& oldName, const std::string& dumpDir, const armnn::NetworkId networkId) |
| 437 | { |
| 438 | if (dumpDir.empty()) |
| 439 | { |
| 440 | return; |
| 441 | } |
| 442 | if (oldName.empty()) |
| 443 | { |
| 444 | return; |
| 445 | } |
| 446 | const std::string newFileName = boost::str(boost::format("%1%/%2%_networkgraph.dot") |
| 447 | % dumpDir |
| 448 | % std::to_string(networkId)); |
| 449 | int iRet = rename(oldName.c_str(), newFileName.c_str()); |
| 450 | if (iRet != 0) |
| 451 | { |
| 452 | std::stringstream ss; |
| 453 | ss << "rename of [" << oldName << "] to [" << newFileName << "] failed with errno " << std::to_string(errno) |
| 454 | << " : " << std::strerror(errno); |
| 455 | ALOGW(ss.str().c_str()); |
| 456 | } |
| 457 | } |
| 458 | |
telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame] | 459 | } // namespace armnn_driver |