blob: 7b5d9d3f91ce96ffd6c6c2fbaf04a702e911c672 [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa015307bc12018-03-09 13:51:08 +00004//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "Utils.hpp"
9
10#include <Permute.hpp>
11
telsoa015307bc12018-03-09 13:51:08 +000012#include <cassert>
13#include <cinttypes>
telsoa015307bc12018-03-09 13:51:08 +000014
15using namespace android;
telsoa01ce3e84a2018-08-31 09:31:35 +010016using namespace android::hardware;
telsoa015307bc12018-03-09 13:51:08 +000017using namespace android::hidl::memory::V1_0;
18
19namespace armnn_driver
20{
21const armnn::PermutationVector g_DontPermute{};
22
23namespace
24{
25
26template <typename T>
27void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input,
28 void* output, const armnn::PermutationVector& mappings)
29{
30 const auto inputData = static_cast<const T*>(input);
31 const auto outputData = static_cast<T*>(output);
32
33 armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, inputData, outputData);
34}
35
36} // anonymous namespace
37
38void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
39 const armnn::PermutationVector& mappings)
40{
41 assert(tensor.GetNumDimensions() == 4U);
42
43 switch(tensor.GetDataType())
44 {
45 case armnn::DataType::Float32:
46 SwizzleAndroidNn4dTensorToArmNn<float>(tensor.GetShape(), input, output, mappings);
47 break;
48 case armnn::DataType::QuantisedAsymm8:
49 SwizzleAndroidNn4dTensorToArmNn<uint8_t>(tensor.GetShape(), input, output, mappings);
50 break;
51 default:
52 ALOGW("Unknown armnn::DataType for swizzling");
53 assert(0);
54 }
55}
56
57void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
58{
59 // find the location within the pool
60 assert(location.poolIndex < memPools.size());
61
surmeh01deb3bdb2018-07-05 12:06:04 +010062 const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
63
64 // Type android::nn::RunTimePoolInfo has changed between Android O and Android P, where
65 // "buffer" has been made private and must be accessed via the accessor method "getBuffer".
66#if defined(ARMNN_ANDROID_P) // Use the new Android P implementation.
67 uint8_t* memPoolBuffer = memPool.getBuffer();
68#else // Fallback to the old Android O implementation.
69 uint8_t* memPoolBuffer = memPool.buffer;
70#endif
71
72 uint8_t* memory = memPoolBuffer + location.offset;
telsoa015307bc12018-03-09 13:51:08 +000073
74 return memory;
75}
76
77armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand)
78{
79 armnn::DataType type;
80
81 switch (operand.type)
82 {
83 case OperandType::TENSOR_FLOAT32:
84 type = armnn::DataType::Float32;
85 break;
86 case OperandType::TENSOR_QUANT8_ASYMM:
87 type = armnn::DataType::QuantisedAsymm8;
88 break;
89 case OperandType::TENSOR_INT32:
90 type = armnn::DataType::Signed32;
91 break;
92 default:
93 throw UnsupportedOperand(operand.type);
94 }
95
96 armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
97
98 ret.SetQuantizationScale(operand.scale);
99 ret.SetQuantizationOffset(operand.zeroPoint);
100
101 return ret;
102}
103
104std::string GetOperandSummary(const Operand& operand)
105{
106 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
107 toString(operand.type);
108}
109
telsoa015307bc12018-03-09 13:51:08 +0000110using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
111 unsigned int elementIndex,
112 std::ofstream& fileStream);
113
114namespace
115{
116template <typename ElementType, typename PrintableType = ElementType>
117void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
118{
119 const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
120 fileStream << static_cast<PrintableType>(elements[elementIndex]) << ",";
121}
122
123constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor)
124{
125 const char* str = "";
126
127 switch (tensor.GetNumDimensions())
128 {
129 case 4: { str = "(BHWC) "; break; }
130 case 3: { str = "(HWC) "; break; }
131 case 2: { str = "(HW) "; break; }
132 default: { str = ""; break; }
133 }
134
135 return str;
136}
137} // namespace
138
139void DumpTensor(const std::string& dumpDir,
140 const std::string& requestName,
141 const std::string& tensorName,
142 const armnn::ConstTensor& tensor)
143{
144 // The dump directory must exist in advance.
145 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.dump") % dumpDir % requestName % tensorName);
146
147 std::ofstream fileStream;
148 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
149
150 if (!fileStream.good())
151 {
152 ALOGW("Could not open file %s for writing", fileName.c_str());
153 return;
154 }
155
156 DumpElementFunction dumpElementFunction = nullptr;
157
158 switch (tensor.GetDataType())
159 {
160 case armnn::DataType::Float32:
161 {
162 dumpElementFunction = &DumpTensorElement<float>;
163 break;
164 }
165 case armnn::DataType::QuantisedAsymm8:
166 {
167 dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
168 break;
169 }
170 case armnn::DataType::Signed32:
171 {
172 dumpElementFunction = &DumpTensorElement<int32_t>;
173 break;
174 }
175 default:
176 {
177 dumpElementFunction = nullptr;
178 }
179 }
180
181 if (dumpElementFunction != nullptr)
182 {
183 const unsigned int numDimensions = tensor.GetNumDimensions();
184
185 const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1;
186
187 const unsigned int height = (numDimensions >= 3)
188 ? tensor.GetShape()[numDimensions - 3]
189 : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1;
190
191 const unsigned int width = (numDimensions >= 3)
192 ? tensor.GetShape()[numDimensions - 2]
193 : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0;
194
195 const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1;
196
197 fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
198 fileStream << "# Dimensions " << MemoryLayoutString(tensor);
199 fileStream << "[" << tensor.GetShape()[0];
200 for (unsigned int d = 1; d < numDimensions; d++)
201 {
202 fileStream << "," << tensor.GetShape()[d];
203 }
204 fileStream << "]" << std::endl;
205
206 for (unsigned int e = 0, b = 0; b < batch; ++b)
207 {
208 if (numDimensions >= 4)
209 {
210 fileStream << "# Batch " << b << std::endl;
211 }
212 for (unsigned int c = 0; c < channels; c++)
213 {
214 if (numDimensions >= 3)
215 {
216 fileStream << "# Channel " << c << std::endl;
217 }
218 for (unsigned int h = 0; h < height; h++)
219 {
220 for (unsigned int w = 0; w < width; w++, e += channels)
221 {
222 (*dumpElementFunction)(tensor, e, fileStream);
223 }
224 fileStream << std::endl;
225 }
226 e -= channels - 1;
227 if (c < channels)
228 {
229 e -= ((height * width) - 1) * channels;
230 }
231 }
232 fileStream << std::endl;
233 }
234 fileStream << std::endl;
235 }
236 else
237 {
238 fileStream << "Cannot dump tensor elements: Unsupported data type "
239 << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
240 }
241
242 if (!fileStream.good())
243 {
244 ALOGW("An error occurred when writing to file %s", fileName.c_str());
245 }
246}
247
telsoa01ce3e84a2018-08-31 09:31:35 +0100248void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
249 const std::string& dumpDir,
250 armnn::NetworkId networkId,
251 const armnn::IProfiler* profiler)
252{
253 // Check if profiling is required.
254 if (!gpuProfilingEnabled)
255 {
256 return;
257 }
258
259 // The dump directory must exist in advance.
260 if (dumpDir.empty())
261 {
262 return;
263 }
264
265 BOOST_ASSERT(profiler);
266
267 // Set the name of the output profiling file.
268 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.json")
269 % dumpDir
270 % std::to_string(networkId)
271 % "profiling");
272
273 // Open the ouput file for writing.
274 std::ofstream fileStream;
275 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
276
277 if (!fileStream.good())
278 {
279 ALOGW("Could not open file %s for writing", fileName.c_str());
280 return;
281 }
282
283 // Write the profiling info to a JSON file.
284 profiler->Print(fileStream);
285}
286
telsoa015307bc12018-03-09 13:51:08 +0000287} // namespace armnn_driver