blob: 2f9a4a345c618055524dc13d7b29b3a6657e63ba [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa015307bc12018-03-09 13:51:08 +00004//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "Utils.hpp"
9
Mike Kelly3c673942019-07-25 09:26:06 +010010#include <Half.hpp>
telsoa015307bc12018-03-09 13:51:08 +000011#include <Permute.hpp>
12
telsoa015307bc12018-03-09 13:51:08 +000013#include <cassert>
14#include <cinttypes>
telsoa015307bc12018-03-09 13:51:08 +000015
16using namespace android;
telsoa01ce3e84a2018-08-31 09:31:35 +010017using namespace android::hardware;
telsoa015307bc12018-03-09 13:51:08 +000018using namespace android::hidl::memory::V1_0;
19
20namespace armnn_driver
21{
22const armnn::PermutationVector g_DontPermute{};
23
24namespace
25{
26
27template <typename T>
28void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input,
29 void* output, const armnn::PermutationVector& mappings)
30{
31 const auto inputData = static_cast<const T*>(input);
32 const auto outputData = static_cast<T*>(output);
33
Matteo Martincigh2c444fc2019-01-07 10:18:47 +000034 armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, inputData, outputData, sizeof(T));
telsoa015307bc12018-03-09 13:51:08 +000035}
36
37} // anonymous namespace
38
39void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
40 const armnn::PermutationVector& mappings)
41{
42 assert(tensor.GetNumDimensions() == 4U);
43
44 switch(tensor.GetDataType())
45 {
Mike Kelly3c673942019-07-25 09:26:06 +010046 case armnn::DataType::Float16:
47 SwizzleAndroidNn4dTensorToArmNn<armnn::Half>(tensor.GetShape(), input, output, mappings);
48 break;
telsoa015307bc12018-03-09 13:51:08 +000049 case armnn::DataType::Float32:
50 SwizzleAndroidNn4dTensorToArmNn<float>(tensor.GetShape(), input, output, mappings);
51 break;
52 case armnn::DataType::QuantisedAsymm8:
53 SwizzleAndroidNn4dTensorToArmNn<uint8_t>(tensor.GetShape(), input, output, mappings);
54 break;
55 default:
56 ALOGW("Unknown armnn::DataType for swizzling");
57 assert(0);
58 }
59}
60
61void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
62{
63 // find the location within the pool
64 assert(location.poolIndex < memPools.size());
65
surmeh01deb3bdb2018-07-05 12:06:04 +010066 const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
67
68 // Type android::nn::RunTimePoolInfo has changed between Android O and Android P, where
69 // "buffer" has been made private and must be accessed via the accessor method "getBuffer".
Mike Kellyb5fdf382019-06-11 16:35:25 +010070#if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q) // Use the new Android implementation.
surmeh01deb3bdb2018-07-05 12:06:04 +010071 uint8_t* memPoolBuffer = memPool.getBuffer();
72#else // Fallback to the old Android O implementation.
73 uint8_t* memPoolBuffer = memPool.buffer;
74#endif
75
76 uint8_t* memory = memPoolBuffer + location.offset;
telsoa015307bc12018-03-09 13:51:08 +000077
78 return memory;
79}
80
Matthew Bentham912b3622019-05-03 15:49:14 +010081armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +000082{
83 armnn::DataType type;
84
85 switch (operand.type)
86 {
Matthew Bentham912b3622019-05-03 15:49:14 +010087 case V1_0::OperandType::TENSOR_FLOAT32:
telsoa015307bc12018-03-09 13:51:08 +000088 type = armnn::DataType::Float32;
89 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010090 case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
telsoa015307bc12018-03-09 13:51:08 +000091 type = armnn::DataType::QuantisedAsymm8;
92 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010093 case V1_0::OperandType::TENSOR_INT32:
telsoa015307bc12018-03-09 13:51:08 +000094 type = armnn::DataType::Signed32;
95 break;
96 default:
Mike Kellyb5fdf382019-06-11 16:35:25 +010097 throw UnsupportedOperand<V1_0::OperandType>(operand.type);
telsoa015307bc12018-03-09 13:51:08 +000098 }
99
100 armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
101
102 ret.SetQuantizationScale(operand.scale);
103 ret.SetQuantizationOffset(operand.zeroPoint);
104
105 return ret;
106}
107
Mike Kellyb5fdf382019-06-11 16:35:25 +0100108#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
109
110armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
111{
112 armnn::DataType type;
113
114 switch (operand.type)
115 {
116 case V1_2::OperandType::TENSOR_FLOAT32:
117 type = armnn::DataType::Float32;
118 break;
Mike Kelly3c673942019-07-25 09:26:06 +0100119 case V1_2::OperandType::TENSOR_FLOAT16:
120 type = armnn::DataType::Float16;
121 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100122 case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
123 type = armnn::DataType::QuantisedAsymm8;
124 break;
Mike Kellyc7d0d442019-12-11 19:27:11 +0000125 case V1_2::OperandType::TENSOR_QUANT8_SYMM:
126 type = armnn::DataType::QuantisedSymm8;
127 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100128 case V1_2::OperandType::TENSOR_QUANT16_SYMM:
129 type = armnn::DataType::QuantisedSymm16;
130 break;
131 case V1_2::OperandType::TENSOR_INT32:
132 type = armnn::DataType::Signed32;
133 break;
134 default:
135 throw UnsupportedOperand<V1_2::OperandType>(operand.type);
136 }
137
138 armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
139
140 ret.SetQuantizationScale(operand.scale);
141 ret.SetQuantizationOffset(operand.zeroPoint);
142
143 return ret;
144}
145
146#endif
147
Matthew Bentham912b3622019-05-03 15:49:14 +0100148std::string GetOperandSummary(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +0000149{
150 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
151 toString(operand.type);
152}
153
Mike Kellyb5fdf382019-06-11 16:35:25 +0100154#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
155
156std::string GetOperandSummary(const V1_2::Operand& operand)
157{
158 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
159 toString(operand.type);
160}
161
162#endif
163
telsoa015307bc12018-03-09 13:51:08 +0000164using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
165 unsigned int elementIndex,
166 std::ofstream& fileStream);
167
168namespace
169{
170template <typename ElementType, typename PrintableType = ElementType>
171void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
172{
173 const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
174 fileStream << static_cast<PrintableType>(elements[elementIndex]) << ",";
175}
176
177constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor)
178{
179 const char* str = "";
180
181 switch (tensor.GetNumDimensions())
182 {
183 case 4: { str = "(BHWC) "; break; }
184 case 3: { str = "(HWC) "; break; }
185 case 2: { str = "(HW) "; break; }
186 default: { str = ""; break; }
187 }
188
189 return str;
190}
191} // namespace
192
193void DumpTensor(const std::string& dumpDir,
194 const std::string& requestName,
195 const std::string& tensorName,
196 const armnn::ConstTensor& tensor)
197{
198 // The dump directory must exist in advance.
199 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.dump") % dumpDir % requestName % tensorName);
200
201 std::ofstream fileStream;
202 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
203
204 if (!fileStream.good())
205 {
206 ALOGW("Could not open file %s for writing", fileName.c_str());
207 return;
208 }
209
210 DumpElementFunction dumpElementFunction = nullptr;
211
212 switch (tensor.GetDataType())
213 {
214 case armnn::DataType::Float32:
215 {
216 dumpElementFunction = &DumpTensorElement<float>;
217 break;
218 }
219 case armnn::DataType::QuantisedAsymm8:
220 {
221 dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
222 break;
223 }
224 case armnn::DataType::Signed32:
225 {
226 dumpElementFunction = &DumpTensorElement<int32_t>;
227 break;
228 }
229 default:
230 {
231 dumpElementFunction = nullptr;
232 }
233 }
234
235 if (dumpElementFunction != nullptr)
236 {
237 const unsigned int numDimensions = tensor.GetNumDimensions();
238
239 const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1;
240
241 const unsigned int height = (numDimensions >= 3)
242 ? tensor.GetShape()[numDimensions - 3]
243 : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1;
244
245 const unsigned int width = (numDimensions >= 3)
246 ? tensor.GetShape()[numDimensions - 2]
247 : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0;
248
249 const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1;
250
251 fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
252 fileStream << "# Dimensions " << MemoryLayoutString(tensor);
253 fileStream << "[" << tensor.GetShape()[0];
254 for (unsigned int d = 1; d < numDimensions; d++)
255 {
256 fileStream << "," << tensor.GetShape()[d];
257 }
258 fileStream << "]" << std::endl;
259
260 for (unsigned int e = 0, b = 0; b < batch; ++b)
261 {
262 if (numDimensions >= 4)
263 {
264 fileStream << "# Batch " << b << std::endl;
265 }
266 for (unsigned int c = 0; c < channels; c++)
267 {
268 if (numDimensions >= 3)
269 {
270 fileStream << "# Channel " << c << std::endl;
271 }
272 for (unsigned int h = 0; h < height; h++)
273 {
274 for (unsigned int w = 0; w < width; w++, e += channels)
275 {
276 (*dumpElementFunction)(tensor, e, fileStream);
277 }
278 fileStream << std::endl;
279 }
280 e -= channels - 1;
281 if (c < channels)
282 {
283 e -= ((height * width) - 1) * channels;
284 }
285 }
286 fileStream << std::endl;
287 }
288 fileStream << std::endl;
289 }
290 else
291 {
292 fileStream << "Cannot dump tensor elements: Unsupported data type "
293 << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
294 }
295
296 if (!fileStream.good())
297 {
298 ALOGW("An error occurred when writing to file %s", fileName.c_str());
299 }
300}
301
telsoa01ce3e84a2018-08-31 09:31:35 +0100302void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
303 const std::string& dumpDir,
304 armnn::NetworkId networkId,
305 const armnn::IProfiler* profiler)
306{
307 // Check if profiling is required.
308 if (!gpuProfilingEnabled)
309 {
310 return;
311 }
312
313 // The dump directory must exist in advance.
314 if (dumpDir.empty())
315 {
316 return;
317 }
318
319 BOOST_ASSERT(profiler);
320
321 // Set the name of the output profiling file.
322 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.json")
323 % dumpDir
324 % std::to_string(networkId)
325 % "profiling");
326
327 // Open the ouput file for writing.
328 std::ofstream fileStream;
329 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
330
331 if (!fileStream.good())
332 {
333 ALOGW("Could not open file %s for writing", fileName.c_str());
334 return;
335 }
336
337 // Write the profiling info to a JSON file.
338 profiler->Print(fileStream);
339}
340
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100341bool IsDynamicTensor(const armnn::TensorInfo& outputInfo)
342{
343 // Dynamic tensors have at least one 0-sized dimension
344 return outputInfo.GetNumElements() == 0u;
345}
346
telsoa015307bc12018-03-09 13:51:08 +0000347} // namespace armnn_driver