blob: 6ee025e6c2fcad2b3db9226e7a3e532463ec2f3a [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa015307bc12018-03-09 13:51:08 +00004//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "Utils.hpp"
Jim Flynnf2e175c2019-12-12 15:11:30 +00009#include "Half.hpp"
telsoa015307bc12018-03-09 13:51:08 +000010
Matteo Martincigh00d6ed12019-11-28 17:13:24 +000011#include <armnnUtils/Permute.hpp>
12
Derek Lambertid00ad912020-01-22 15:55:16 +000013#include <armnn/Utils.hpp>
14
telsoa015307bc12018-03-09 13:51:08 +000015#include <cassert>
16#include <cinttypes>
telsoa015307bc12018-03-09 13:51:08 +000017
18using namespace android;
telsoa01ce3e84a2018-08-31 09:31:35 +010019using namespace android::hardware;
telsoa015307bc12018-03-09 13:51:08 +000020using namespace android::hidl::memory::V1_0;
21
22namespace armnn_driver
23{
24const armnn::PermutationVector g_DontPermute{};
25
26namespace
27{
28
telsoa015307bc12018-03-09 13:51:08 +000029void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorShape& inTensorShape, const void* input,
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000030 void* output, size_t dataTypeSize, const armnn::PermutationVector& mappings)
telsoa015307bc12018-03-09 13:51:08 +000031{
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000032 assert(inTensorShape.GetNumDimensions() == 4U);
telsoa015307bc12018-03-09 13:51:08 +000033
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000034 armnnUtils::Permute(armnnUtils::Permuted(inTensorShape, mappings), mappings, input, output, dataTypeSize);
telsoa015307bc12018-03-09 13:51:08 +000035}
36
37} // anonymous namespace
38
39void SwizzleAndroidNn4dTensorToArmNn(const armnn::TensorInfo& tensor, const void* input, void* output,
40 const armnn::PermutationVector& mappings)
41{
42 assert(tensor.GetNumDimensions() == 4U);
43
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000044 armnn::DataType dataType = tensor.GetDataType();
45 switch (dataType)
telsoa015307bc12018-03-09 13:51:08 +000046 {
Mike Kelly3c673942019-07-25 09:26:06 +010047 case armnn::DataType::Float16:
telsoa015307bc12018-03-09 13:51:08 +000048 case armnn::DataType::Float32:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000049 case armnn::DataType::QAsymmU8:
Derek Lambertid00ad912020-01-22 15:55:16 +000050 case armnn::DataType::QSymmS8:
Matteo Martincighbf19d2a2019-11-29 11:46:50 +000051 SwizzleAndroidNn4dTensorToArmNn(tensor.GetShape(), input, output, armnn::GetDataTypeSize(dataType), mappings);
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +000052 break;
telsoa015307bc12018-03-09 13:51:08 +000053 default:
54 ALOGW("Unknown armnn::DataType for swizzling");
55 assert(0);
56 }
57}
58
59void* GetMemoryFromPool(DataLocation location, const std::vector<android::nn::RunTimePoolInfo>& memPools)
60{
61 // find the location within the pool
62 assert(location.poolIndex < memPools.size());
63
surmeh01deb3bdb2018-07-05 12:06:04 +010064 const android::nn::RunTimePoolInfo& memPool = memPools[location.poolIndex];
65
66 // Type android::nn::RunTimePoolInfo has changed between Android O and Android P, where
67 // "buffer" has been made private and must be accessed via the accessor method "getBuffer".
Mike Kellyb5fdf382019-06-11 16:35:25 +010068#if defined(ARMNN_ANDROID_P) || defined(ARMNN_ANDROID_Q) // Use the new Android implementation.
surmeh01deb3bdb2018-07-05 12:06:04 +010069 uint8_t* memPoolBuffer = memPool.getBuffer();
70#else // Fallback to the old Android O implementation.
71 uint8_t* memPoolBuffer = memPool.buffer;
72#endif
73
74 uint8_t* memory = memPoolBuffer + location.offset;
telsoa015307bc12018-03-09 13:51:08 +000075
76 return memory;
77}
78
Matthew Bentham912b3622019-05-03 15:49:14 +010079armnn::TensorInfo GetTensorInfoForOperand(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +000080{
81 armnn::DataType type;
82
83 switch (operand.type)
84 {
Matthew Bentham912b3622019-05-03 15:49:14 +010085 case V1_0::OperandType::TENSOR_FLOAT32:
telsoa015307bc12018-03-09 13:51:08 +000086 type = armnn::DataType::Float32;
87 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010088 case V1_0::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +000089 type = armnn::DataType::QAsymmU8;
telsoa015307bc12018-03-09 13:51:08 +000090 break;
Matthew Bentham912b3622019-05-03 15:49:14 +010091 case V1_0::OperandType::TENSOR_INT32:
telsoa015307bc12018-03-09 13:51:08 +000092 type = armnn::DataType::Signed32;
93 break;
94 default:
Mike Kellyb5fdf382019-06-11 16:35:25 +010095 throw UnsupportedOperand<V1_0::OperandType>(operand.type);
telsoa015307bc12018-03-09 13:51:08 +000096 }
97
98 armnn::TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
99
100 ret.SetQuantizationScale(operand.scale);
101 ret.SetQuantizationOffset(operand.zeroPoint);
102
103 return ret;
104}
105
Mike Kellyb5fdf382019-06-11 16:35:25 +0100106#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
107
108armnn::TensorInfo GetTensorInfoForOperand(const V1_2::Operand& operand)
109{
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000110 using namespace armnn;
Derek Lambertid00ad912020-01-22 15:55:16 +0000111 bool perChannel = false;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100112
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000113 DataType type;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100114 switch (operand.type)
115 {
116 case V1_2::OperandType::TENSOR_FLOAT32:
117 type = armnn::DataType::Float32;
118 break;
Mike Kelly3c673942019-07-25 09:26:06 +0100119 case V1_2::OperandType::TENSOR_FLOAT16:
120 type = armnn::DataType::Float16;
121 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100122 case V1_2::OperandType::TENSOR_QUANT8_ASYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000123 type = armnn::DataType::QAsymmU8;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100124 break;
Derek Lambertid00ad912020-01-22 15:55:16 +0000125 case V1_2::OperandType::TENSOR_QUANT8_SYMM_PER_CHANNEL:
126 perChannel=true;
127 ARMNN_FALLTHROUGH;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000128 case V1_2::OperandType::TENSOR_QUANT8_SYMM:
FinnWilliamsArm624fe9f2019-12-06 17:12:42 +0000129 type = armnn::DataType::QSymmS8;
Mike Kelly0e2e31b2019-11-19 09:16:00 +0000130 break;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100131 case V1_2::OperandType::TENSOR_QUANT16_SYMM:
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000132 type = armnn::DataType::QSymmS16;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100133 break;
134 case V1_2::OperandType::TENSOR_INT32:
135 type = armnn::DataType::Signed32;
136 break;
137 default:
138 throw UnsupportedOperand<V1_2::OperandType>(operand.type);
139 }
140
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000141 TensorInfo ret(operand.dimensions.size(), operand.dimensions.data(), type);
Derek Lambertid00ad912020-01-22 15:55:16 +0000142 if (perChannel)
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000143 {
144 // ExtraParams is expected to be of type channelQuant
145 BOOST_ASSERT(operand.extraParams.getDiscriminator() ==
146 V1_2::Operand::ExtraParams::hidl_discriminator::channelQuant);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100147
Aron Virginas-Tar9f0693b2019-11-06 14:32:30 +0000148 auto perAxisQuantParams = operand.extraParams.channelQuant();
149
150 ret.SetQuantizationScales(perAxisQuantParams.scales);
151 ret.SetQuantizationDim(MakeOptional<unsigned int>(perAxisQuantParams.channelDim));
152 }
153 else
154 {
155 ret.SetQuantizationScale(operand.scale);
156 ret.SetQuantizationOffset(operand.zeroPoint);
157 }
Mike Kellyb5fdf382019-06-11 16:35:25 +0100158
159 return ret;
160}
161
162#endif
163
Matthew Bentham912b3622019-05-03 15:49:14 +0100164std::string GetOperandSummary(const V1_0::Operand& operand)
telsoa015307bc12018-03-09 13:51:08 +0000165{
166 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
167 toString(operand.type);
168}
169
Mike Kellyb5fdf382019-06-11 16:35:25 +0100170#ifdef ARMNN_ANDROID_NN_V1_2 // Using ::android::hardware::neuralnetworks::V1_2
171
172std::string GetOperandSummary(const V1_2::Operand& operand)
173{
174 return android::hardware::details::arrayToString(operand.dimensions, operand.dimensions.size()) + " " +
175 toString(operand.type);
176}
177
178#endif
179
telsoa015307bc12018-03-09 13:51:08 +0000180using DumpElementFunction = void (*)(const armnn::ConstTensor& tensor,
181 unsigned int elementIndex,
182 std::ofstream& fileStream);
183
184namespace
185{
186template <typename ElementType, typename PrintableType = ElementType>
187void DumpTensorElement(const armnn::ConstTensor& tensor, unsigned int elementIndex, std::ofstream& fileStream)
188{
189 const ElementType* elements = reinterpret_cast<const ElementType*>(tensor.GetMemoryArea());
190 fileStream << static_cast<PrintableType>(elements[elementIndex]) << ",";
191}
192
193constexpr const char* MemoryLayoutString(const armnn::ConstTensor& tensor)
194{
195 const char* str = "";
196
197 switch (tensor.GetNumDimensions())
198 {
199 case 4: { str = "(BHWC) "; break; }
200 case 3: { str = "(HWC) "; break; }
201 case 2: { str = "(HW) "; break; }
202 default: { str = ""; break; }
203 }
204
205 return str;
206}
207} // namespace
208
209void DumpTensor(const std::string& dumpDir,
210 const std::string& requestName,
211 const std::string& tensorName,
212 const armnn::ConstTensor& tensor)
213{
214 // The dump directory must exist in advance.
215 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.dump") % dumpDir % requestName % tensorName);
216
217 std::ofstream fileStream;
218 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
219
220 if (!fileStream.good())
221 {
222 ALOGW("Could not open file %s for writing", fileName.c_str());
223 return;
224 }
225
226 DumpElementFunction dumpElementFunction = nullptr;
227
228 switch (tensor.GetDataType())
229 {
230 case armnn::DataType::Float32:
231 {
232 dumpElementFunction = &DumpTensorElement<float>;
233 break;
234 }
Derek Lamberti1a38cda2020-01-10 17:28:20 +0000235 case armnn::DataType::QAsymmU8:
telsoa015307bc12018-03-09 13:51:08 +0000236 {
237 dumpElementFunction = &DumpTensorElement<uint8_t, uint32_t>;
238 break;
239 }
240 case armnn::DataType::Signed32:
241 {
242 dumpElementFunction = &DumpTensorElement<int32_t>;
243 break;
244 }
Jim Flynnf2e175c2019-12-12 15:11:30 +0000245 case armnn::DataType::Float16:
246 {
247 dumpElementFunction = &DumpTensorElement<armnn::Half>;
248 break;
249 }
telsoa015307bc12018-03-09 13:51:08 +0000250 default:
251 {
252 dumpElementFunction = nullptr;
253 }
254 }
255
256 if (dumpElementFunction != nullptr)
257 {
258 const unsigned int numDimensions = tensor.GetNumDimensions();
259
260 const unsigned int batch = (numDimensions == 4) ? tensor.GetShape()[numDimensions - 4] : 1;
261
262 const unsigned int height = (numDimensions >= 3)
263 ? tensor.GetShape()[numDimensions - 3]
264 : (numDimensions >= 2) ? tensor.GetShape()[numDimensions - 2] : 1;
265
266 const unsigned int width = (numDimensions >= 3)
267 ? tensor.GetShape()[numDimensions - 2]
268 : (numDimensions >= 1) ? tensor.GetShape()[numDimensions - 1] : 0;
269
270 const unsigned int channels = (numDimensions >= 3) ? tensor.GetShape()[numDimensions - 1] : 1;
271
272 fileStream << "# Number of elements " << tensor.GetNumElements() << std::endl;
273 fileStream << "# Dimensions " << MemoryLayoutString(tensor);
274 fileStream << "[" << tensor.GetShape()[0];
275 for (unsigned int d = 1; d < numDimensions; d++)
276 {
277 fileStream << "," << tensor.GetShape()[d];
278 }
279 fileStream << "]" << std::endl;
280
281 for (unsigned int e = 0, b = 0; b < batch; ++b)
282 {
283 if (numDimensions >= 4)
284 {
285 fileStream << "# Batch " << b << std::endl;
286 }
287 for (unsigned int c = 0; c < channels; c++)
288 {
289 if (numDimensions >= 3)
290 {
291 fileStream << "# Channel " << c << std::endl;
292 }
293 for (unsigned int h = 0; h < height; h++)
294 {
295 for (unsigned int w = 0; w < width; w++, e += channels)
296 {
297 (*dumpElementFunction)(tensor, e, fileStream);
298 }
299 fileStream << std::endl;
300 }
301 e -= channels - 1;
302 if (c < channels)
303 {
304 e -= ((height * width) - 1) * channels;
305 }
306 }
307 fileStream << std::endl;
308 }
309 fileStream << std::endl;
310 }
311 else
312 {
313 fileStream << "Cannot dump tensor elements: Unsupported data type "
314 << static_cast<unsigned int>(tensor.GetDataType()) << std::endl;
315 }
316
317 if (!fileStream.good())
318 {
319 ALOGW("An error occurred when writing to file %s", fileName.c_str());
320 }
321}
322
telsoa01ce3e84a2018-08-31 09:31:35 +0100323void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
324 const std::string& dumpDir,
325 armnn::NetworkId networkId,
326 const armnn::IProfiler* profiler)
327{
328 // Check if profiling is required.
329 if (!gpuProfilingEnabled)
330 {
331 return;
332 }
333
334 // The dump directory must exist in advance.
335 if (dumpDir.empty())
336 {
337 return;
338 }
339
340 BOOST_ASSERT(profiler);
341
342 // Set the name of the output profiling file.
343 const std::string fileName = boost::str(boost::format("%1%/%2%_%3%.json")
344 % dumpDir
345 % std::to_string(networkId)
346 % "profiling");
347
348 // Open the ouput file for writing.
349 std::ofstream fileStream;
350 fileStream.open(fileName, std::ofstream::out | std::ofstream::trunc);
351
352 if (!fileStream.good())
353 {
354 ALOGW("Could not open file %s for writing", fileName.c_str());
355 return;
356 }
357
358 // Write the profiling info to a JSON file.
359 profiler->Print(fileStream);
360}
361
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100362bool IsDynamicTensor(const armnn::TensorInfo& outputInfo)
363{
364 // Dynamic tensors have at least one 0-sized dimension
365 return outputInfo.GetNumElements() == 0u;
366}
367
telsoa015307bc12018-03-09 13:51:08 +0000368} // namespace armnn_driver