telsoa01 | 5307bc1 | 2018-03-09 13:51:08 +0000 | [diff] [blame^] | 1 | // |
| 2 | // Copyright © 2017 Arm Ltd. All rights reserved. |
| 3 | // See LICENSE file in the project root for full license information. |
| 4 | // |
| 5 | |
| 6 | #define LOG_TAG "ArmnnDriver" |
| 7 | |
| 8 | #include "Utils.hpp" |
| 9 | |
| 10 | #include <Permute.hpp> |
| 11 | |
| 12 | #include <boost/format.hpp> |
| 13 | #include <log/log.h> |
| 14 | |
| 15 | #include <cassert> |
| 16 | #include <cinttypes> |
| 17 | #include <fstream> |
| 18 | |
| 19 | using namespace android; |
| 20 | using namespace android::hidl::memory::V1_0; |
| 21 | |
| 22 | namespace armnn_driver |
| 23 | { |
| 24 | const armnn::PermutationVector g_DontPermute{}; |
| 25 | |
| 26 | namespace |
| 27 | { |
| 28 | |
| 29 | template <typename T> |
| 30 | void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input, |
| 31 | void* output, const armnn::PermutationVector& mappings) |
| 32 | { |
| 33 | const auto inputData = static_cast<const T*>(input); |
| 34 | const auto outputData = static_cast<T*>(output); |
| 35 | |
| 36 | armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, inputData, outputData); |
| 37 | } |
| 38 | |
| 39 | } // anonymous namespace |
| 40 | |
| 41 | void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output, |
| 42 | const armnn::PermutationVector& mappings) |
| 43 | { |
| 44 | assert(tensor.GetNumDimensions() == 4U); |
| 45 | |
| 46 | switch(tensor.GetDataType()) |
| 47 | { |
| 48 | case armnn::DataType::Float32: |
| 49 | SwizzleAndroidNn4dTensorToArmNn<float>(tensor.GetShape(), input, output, mappings); |
| 50 | break; |
| 51 | case armnn::DataType::QuantisedAsymm8: |
| 52 | SwizzleAndroidNn4dTensorToArmNn<uint8_t>(tensor.GetShape(), input, output, mappings); |
| 53 | break; |
| 54 | default: |
| 55 | ALOGW("Unknown armnn::DataType for swizzling"); |
| 56 | assert(0); |
| 57 | } |
| 58 | } |
| 59 | |
| 60 | void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools) |
| 61 | { |
| 62 | // find the location within the pool |
| 63 | assert(location.poolIndex < memPools.size()); |
| 64 | |
| 65 | uint8_t* memory = |
| 66 | static_cast<uint8_t*>(static_cast<void*>(memPools[location.poolIndex].buffer)) + location.offset; |
| 67 | |
| 68 | return memory; |
| 69 | } |
| 70 | |
| 71 | armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand) |
| 72 | { |
| 73 | armnn::DataType type; |
| 74 | |
| 75 | switch (operand.type) |
| 76 | { |
| 77 | case OperandType::TENSOR_FLOAT32: |
| 78 | type = armnn::DataType::Float32; |
| 79 | break; |
| 80 | case OperandType::TENSOR_QUANT8_ASYMM: |
| 81 | type = armnn::DataType::QuantisedAsymm8; |
| 82 | break; |
| 83 | case OperandType::TENSOR_INT32: |
| 84 | type = armnn::DataType::Signed32; |
| 85 | break; |
| 86 | default: |
| 87 | throw UnsupportedOperand(operand.type); |
| 88 | } |
| 89 | |
| 90 | armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type); |
| 91 | |
| 92 | ret.SetQuantizationScale(operand.scale); |
| 93 | ret.SetQuantizationOffset(operand.zeroPoint); |
| 94 | |
| 95 | return ret; |
| 96 | } |
| 97 | |
| 98 | std::string GetOperandSummary(const Operand& operand) |
| 99 | { |
| 100 | return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " + |
| 101 | toString(operand.type); |
| 102 | } |
| 103 | |
| 104 | std::string GetModelSummary(const Model& model) |
| 105 | { |
| 106 | std::stringstream result; |
| 107 | |
| 108 | result << model.inputIndexes.size() << " input(s), " << model.operations.size() << " operation(s), " << |
| 109 | model.outputIndexes.size() << " output(s), " << model.operands.size() << " operand(s)" << std::endl; |
| 110 | |
| 111 | result << "Inputs: "; |
| 112 | for (uint32_t i = 0; i < model.inputIndexes.size(); i++) |
| 113 | { |
| 114 | result << GetOperandSummary(model.operands[model.inputIndexes[i]]) << ", "; |
| 115 | } |
| 116 | result << std::endl; |
| 117 | |
| 118 | result << "Operations: "; |
| 119 | for (uint32_t i = 0; i < model.operations.size(); i++) |
| 120 | { |
| 121 | result << toString(model.operations[i].type).c_str() << ", "; |
| 122 | } |
| 123 | result << std::endl; |
| 124 | |
| 125 | result << "Outputs: "; |
| 126 | for (uint32_t i = 0; i < model.outputIndexes.size(); i++) |
| 127 | { |
| 128 | result << GetOperandSummary(model.operands[model.outputIndexes[i]]) << ", "; |
| 129 | } |
| 130 | result << std::endl; |
| 131 | |
| 132 | return result.str(); |
| 133 | } |
| 134 | |
| 135 | using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor, |
| 136 | unsigned int elementIndex, |
| 137 | std::ofstream& fileStream); |
| 138 | |
| 139 | namespace |
| 140 | { |
| 141 | template <typename ElementType, typename PrintableType = ElementType> |
| 142 | void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream) |
| 143 | { |
| 144 | const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea()); |
| 145 | fileStream << static_cast<PrintableType>(elements[elementIndex]) << ","; |
| 146 | } |
| 147 | |
| 148 | constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor) |
| 149 | { |
| 150 | const char* str = ""; |
| 151 | |
| 152 | switch (tensor.GetNumDimensions()) |
| 153 | { |
| 154 | case 4: { str = "(BHWC) "; break; } |
| 155 | case 3: { str = "(HWC) "; break; } |
| 156 | case 2: { str = "(HW) "; break; } |
| 157 | default: { str = ""; break; } |
| 158 | } |
| 159 | |
| 160 | return str; |
| 161 | } |
| 162 | } // namespace |
| 163 | |
| 164 | void DumpTensor(const std::string& dumpDir, |
| 165 | const std::string& requestName, |
| 166 | const std::string& tensorName, |
| 167 | const armnn::ConstTensor& tensor) |
| 168 | { |
| 169 | // The dump directory must exist in advance. |
| 170 | const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.dump") % dumpDir % requestName % tensorName); |
| 171 | |
| 172 | std::ofstream fileStream; |
| 173 | fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc); |
| 174 | |
| 175 | if (!fileStream.good()) |
| 176 | { |
| 177 | ALOGW("Could not open file %s for writing", fileName.c_str()); |
| 178 | return; |
| 179 | } |
| 180 | |
| 181 | DumpElementFunction dumpElementFunction = nullptr; |
| 182 | |
| 183 | switch (tensor.GetDataType()) |
| 184 | { |
| 185 | case armnn::DataType::Float32: |
| 186 | { |
| 187 | dumpElementFunction = &DumpTensorElement<float>; |
| 188 | break; |
| 189 | } |
| 190 | case armnn::DataType::QuantisedAsymm8: |
| 191 | { |
| 192 | dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>; |
| 193 | break; |
| 194 | } |
| 195 | case armnn::DataType::Signed32: |
| 196 | { |
| 197 | dumpElementFunction = &DumpTensorElement<int32_t>; |
| 198 | break; |
| 199 | } |
| 200 | default: |
| 201 | { |
| 202 | dumpElementFunction = nullptr; |
| 203 | } |
| 204 | } |
| 205 | |
| 206 | if (dumpElementFunction != nullptr) |
| 207 | { |
| 208 | const unsigned int numDimensions = tensor.GetNumDimensions(); |
| 209 | |
| 210 | const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1; |
| 211 | |
| 212 | const unsigned int height = (numDimensions >= 3) |
| 213 | ? tensor.GetShape()[numDimensions - 3] |
| 214 | : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1; |
| 215 | |
| 216 | const unsigned int width = (numDimensions >= 3) |
| 217 | ? tensor.GetShape()[numDimensions - 2] |
| 218 | : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0; |
| 219 | |
| 220 | const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1; |
| 221 | |
| 222 | fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl; |
| 223 | fileStream << "# Dimensions " << MemoryLayoutString(tensor); |
| 224 | fileStream << "[" << tensor.GetShape()[0]; |
| 225 | for (unsigned int d = 1; d < numDimensions; d++) |
| 226 | { |
| 227 | fileStream << "," << tensor.GetShape()[d]; |
| 228 | } |
| 229 | fileStream << "]" << std::endl; |
| 230 | |
| 231 | for (unsigned int e = 0, b = 0; b < batch; ++b) |
| 232 | { |
| 233 | if (numDimensions >= 4) |
| 234 | { |
| 235 | fileStream << "# Batch " << b << std::endl; |
| 236 | } |
| 237 | for (unsigned int c = 0; c < channels; c++) |
| 238 | { |
| 239 | if (numDimensions >= 3) |
| 240 | { |
| 241 | fileStream << "# Channel " << c << std::endl; |
| 242 | } |
| 243 | for (unsigned int h = 0; h < height; h++) |
| 244 | { |
| 245 | for (unsigned int w = 0; w < width; w++, e += channels) |
| 246 | { |
| 247 | (*dumpElementFunction)(tensor, e, fileStream); |
| 248 | } |
| 249 | fileStream << std::endl; |
| 250 | } |
| 251 | e -= channels - 1; |
| 252 | if (c < channels) |
| 253 | { |
| 254 | e -= ((height * width) - 1) * channels; |
| 255 | } |
| 256 | } |
| 257 | fileStream << std::endl; |
| 258 | } |
| 259 | fileStream << std::endl; |
| 260 | } |
| 261 | else |
| 262 | { |
| 263 | fileStream << "Cannot dump tensor elements: Unsupported data type " |
| 264 | << static_cast<unsigned int>(tensor.GetDataType()) << std::endl; |
| 265 | } |
| 266 | |
| 267 | if (!fileStream.good()) |
| 268 | { |
| 269 | ALOGW("An error occurred when writing to file %s", fileName.c_str()); |
| 270 | } |
| 271 | } |
| 272 | |
| 273 | } // namespace armnn_driver |