blob: 38a8cd31fb92affbd27937442ebb8232fa1eed09 [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// See LICENSE file in the project root for full license information.
4//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "Utils.hpp"
9
10#include <Permute.hpp>
11
12#include <boost/format.hpp>
13#include <log/log.h>
14
15#include <cassert>
16#include <cinttypes>
17#include <fstream>
surmeh0176660052018-03-29 16:33:54 +010018#include <iomanip>
telsoa015307bc12018-03-09 13:51:08 +000019
20using namespace android;
telsoa01ce3e84a2018-08-31 09:31:35 +010021using namespace android::hardware;
telsoa015307bc12018-03-09 13:51:08 +000022using namespace android::hidl::memory::V1_0;
23
24namespace armnn_driver
25{
26const armnn::PermutationVector g_DontPermute{};
27
28namespace
29{
30
31template <typename T>
32void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input,
33 void* output, const armnn::PermutationVector& mappings)
34{
35 const auto inputData = static_cast<const T*>(input);
36 const auto outputData = static_cast<T*>(output);
37
38 armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, inputData, outputData);
39}
40
41} // anonymous namespace
42
43void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
44 const armnn::PermutationVector& mappings)
45{
46 assert(tensor.GetNumDimensions() == 4U);
47
48 switch(tensor.GetDataType())
49 {
50 case armnn::DataType::Float32:
51 SwizzleAndroidNn4dTensorToArmNn<float>(tensor.GetShape(), input, output, mappings);
52 break;
53 case armnn::DataType::QuantisedAsymm8:
54 SwizzleAndroidNn4dTensorToArmNn<uint8_t>(tensor.GetShape(), input, output, mappings);
55 break;
56 default:
57 ALOGW("Unknown armnn::DataType for swizzling");
58 assert(0);
59 }
60}
61
62void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
63{
64 // find the location within the pool
65 assert(location.poolIndex < memPools.size());
66
surmeh01deb3bdb2018-07-05 12:06:04 +010067 const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
68
69 // Type android::nn::RunTimePoolInfo has changed between Android O and Android P, where
70 // "buffer" has been made private and must be accessed via the accessor method "getBuffer".
71#if defined(ARMNN_ANDROID_P) // Use the new Android P implementation.
72 uint8_t* memPoolBuffer = memPool.getBuffer();
73#else // Fallback to the old Android O implementation.
74 uint8_t* memPoolBuffer = memPool.buffer;
75#endif
76
77 uint8_t* memory = memPoolBuffer + location.offset;
telsoa015307bc12018-03-09 13:51:08 +000078
79 return memory;
80}
81
82armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand)
83{
84 armnn::DataType type;
85
86 switch (operand.type)
87 {
88 case OperandType::TENSOR_FLOAT32:
89 type = armnn::DataType::Float32;
90 break;
91 case OperandType::TENSOR_QUANT8_ASYMM:
92 type = armnn::DataType::QuantisedAsymm8;
93 break;
94 case OperandType::TENSOR_INT32:
95 type = armnn::DataType::Signed32;
96 break;
97 default:
98 throw UnsupportedOperand(operand.type);
99 }
100
101 armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
102
103 ret.SetQuantizationScale(operand.scale);
104 ret.SetQuantizationOffset(operand.zeroPoint);
105
106 return ret;
107}
108
109std::string GetOperandSummary(const Operand& operand)
110{
111 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
112 toString(operand.type);
113}
114
telsoa01ce3e84a2018-08-31 09:31:35 +0100115std::string GetModelSummary(const neuralnetworks::V1_0::Model& model)
telsoa015307bc12018-03-09 13:51:08 +0000116{
117 std::stringstream result;
118
119 result << model.inputIndexes.size() << " input(s), " << model.operations.size() << " operation(s), " <<
120 model.outputIndexes.size() << " output(s), " << model.operands.size() << " operand(s)" << std::endl;
121
122 result << "Inputs: ";
123 for (uint32_t i = 0; i < model.inputIndexes.size(); i++)
124 {
125 result << GetOperandSummary(model.operands[model.inputIndexes[i]]) << ", ";
126 }
127 result << std::endl;
128
129 result << "Operations: ";
130 for (uint32_t i = 0; i < model.operations.size(); i++)
131 {
132 result << toString(model.operations[i].type).c_str() << ", ";
133 }
134 result << std::endl;
135
136 result << "Outputs: ";
137 for (uint32_t i = 0; i < model.outputIndexes.size(); i++)
138 {
139 result << GetOperandSummary(model.operands[model.outputIndexes[i]]) << ", ";
140 }
141 result << std::endl;
142
143 return result.str();
144}
145
146using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
147 unsigned int elementIndex,
148 std::ofstream& fileStream);
149
150namespace
151{
152template <typename ElementType, typename PrintableType = ElementType>
153void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
154{
155 const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
156 fileStream << static_cast<PrintableType>(elements[elementIndex]) << ",";
157}
158
159constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor)
160{
161 const char* str = "";
162
163 switch (tensor.GetNumDimensions())
164 {
165 case 4: { str = "(BHWC) "; break; }
166 case 3: { str = "(HWC) "; break; }
167 case 2: { str = "(HW) "; break; }
168 default: { str = ""; break; }
169 }
170
171 return str;
172}
173} // namespace
174
175void DumpTensor(const std::string& dumpDir,
176 const std::string& requestName,
177 const std::string& tensorName,
178 const armnn::ConstTensor& tensor)
179{
180 // The dump directory must exist in advance.
181 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.dump") % dumpDir % requestName % tensorName);
182
183 std::ofstream fileStream;
184 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
185
186 if (!fileStream.good())
187 {
188 ALOGW("Could not open file %s for writing", fileName.c_str());
189 return;
190 }
191
192 DumpElementFunction dumpElementFunction = nullptr;
193
194 switch (tensor.GetDataType())
195 {
196 case armnn::DataType::Float32:
197 {
198 dumpElementFunction = &DumpTensorElement<float>;
199 break;
200 }
201 case armnn::DataType::QuantisedAsymm8:
202 {
203 dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
204 break;
205 }
206 case armnn::DataType::Signed32:
207 {
208 dumpElementFunction = &DumpTensorElement<int32_t>;
209 break;
210 }
211 default:
212 {
213 dumpElementFunction = nullptr;
214 }
215 }
216
217 if (dumpElementFunction != nullptr)
218 {
219 const unsigned int numDimensions = tensor.GetNumDimensions();
220
221 const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1;
222
223 const unsigned int height = (numDimensions >= 3)
224 ? tensor.GetShape()[numDimensions - 3]
225 : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1;
226
227 const unsigned int width = (numDimensions >= 3)
228 ? tensor.GetShape()[numDimensions - 2]
229 : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0;
230
231 const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1;
232
233 fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
234 fileStream << "# Dimensions " << MemoryLayoutString(tensor);
235 fileStream << "[" << tensor.GetShape()[0];
236 for (unsigned int d = 1; d < numDimensions; d++)
237 {
238 fileStream << "," << tensor.GetShape()[d];
239 }
240 fileStream << "]" << std::endl;
241
242 for (unsigned int e = 0, b = 0; b < batch; ++b)
243 {
244 if (numDimensions >= 4)
245 {
246 fileStream << "# Batch " << b << std::endl;
247 }
248 for (unsigned int c = 0; c < channels; c++)
249 {
250 if (numDimensions >= 3)
251 {
252 fileStream << "# Channel " << c << std::endl;
253 }
254 for (unsigned int h = 0; h < height; h++)
255 {
256 for (unsigned int w = 0; w < width; w++, e += channels)
257 {
258 (*dumpElementFunction)(tensor, e, fileStream);
259 }
260 fileStream << std::endl;
261 }
262 e -= channels - 1;
263 if (c < channels)
264 {
265 e -= ((height * width) - 1) * channels;
266 }
267 }
268 fileStream << std::endl;
269 }
270 fileStream << std::endl;
271 }
272 else
273 {
274 fileStream << "Cannot dump tensor elements: Unsupported data type "
275 << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
276 }
277
278 if (!fileStream.good())
279 {
280 ALOGW("An error occurred when writing to file %s", fileName.c_str());
281 }
282}
283
telsoa01ce3e84a2018-08-31 09:31:35 +0100284void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
285 const std::string& dumpDir,
286 armnn::NetworkId networkId,
287 const armnn::IProfiler* profiler)
288{
289 // Check if profiling is required.
290 if (!gpuProfilingEnabled)
291 {
292 return;
293 }
294
295 // The dump directory must exist in advance.
296 if (dumpDir.empty())
297 {
298 return;
299 }
300
301 BOOST_ASSERT(profiler);
302
303 // Set the name of the output profiling file.
304 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.json")
305 % dumpDir
306 % std::to_string(networkId)
307 % "profiling");
308
309 // Open the ouput file for writing.
310 std::ofstream fileStream;
311 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
312
313 if (!fileStream.good())
314 {
315 ALOGW("Could not open file %s for writing", fileName.c_str());
316 return;
317 }
318
319 // Write the profiling info to a JSON file.
320 profiler->Print(fileStream);
321}
322
surmeh0176660052018-03-29 16:33:54 +0100323void ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
324 const std::string& dumpDir,
telsoa01ce3e84a2018-08-31 09:31:35 +0100325 const neuralnetworks::V1_0::Model& model)
surmeh0176660052018-03-29 16:33:54 +0100326{
327 // The dump directory must exist in advance.
328 if (dumpDir.empty())
329 {
330 return;
331 }
332
333 // Get the memory address of the model and convert it to a hex string (of at least a '0' character).
334 size_t modelAddress = uintptr_t(&model);
335 std::stringstream ss;
336 ss << std::uppercase << std::hex << std::setfill('0') << std::setw(1) << modelAddress;
337 std::string modelAddressHexString = ss.str();
338
339 // Set the name of the output .dot file.
340 const std::string fileName = boost::str(boost::format("%1%/networkgraph_%2%.dot")
341 % dumpDir
342 % modelAddressHexString);
343
344 ALOGV("Exporting the optimized network graph to file: %s", fileName.c_str());
345
346 // Write the network graph to a dot file.
347 std::ofstream fileStream;
348 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
349
350 if (!fileStream.good())
351 {
352 ALOGW("Could not open file %s for writing", fileName.c_str());
353 return;
354 }
355
356 if (optimizedNetwork.SerializeToDot(fileStream) != armnn::Status::Success)
357 {
358 ALOGW("An error occurred when writing to file %s", fileName.c_str());
359 }
360}
telsoa01ce3e84a2018-08-31 09:31:35 +0100361
telsoa015307bc12018-03-09 13:51:08 +0000362} // namespace armnn_driver