blob: b94fd5e126d80082166c2f7742e3ea9a817c3115 [file] [log] [blame]
Sadik Armagan8f397a12022-06-17 15:38:22 +01001//
2// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7#include <armnn/ArmNN.hpp>
8
9#include <CpuExecutor.h>
10#include <nnapi/OperandTypes.h>
11#include <nnapi/Result.h>
12#include <nnapi/Types.h>
13
14#include <vector>
15#include <string>
16#include <fstream>
17#include <iomanip>
18
19namespace armnn_driver
20{
21
22using namespace android::nn;
23
24extern const armnn::PermutationVector g_DontPermute;
25
26template <typename OperandType>
27class UnsupportedOperand: public std::runtime_error
28{
29public:
30 UnsupportedOperand(const OperandType type)
31 : std::runtime_error("Operand type is unsupported")
32 , m_type(type)
33 {}
34
35 OperandType m_type;
36};
37
38/// Swizzles tensor data in @a input according to the dimension mappings.
39void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensor,
40 const void* input,
41 void* output,
42 const armnn::PermutationVector& mappings);
43
44/// Returns a pointer to a specific location in a pool`
45void* GetMemoryFromPool(DataLocation location,
46 const std::vector<android::nn::RunTimePoolInfo>& memPools);
47
48void* GetMemoryFromPointer(const Request::Argument& requestArg);
49
50armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand);
51
52std::string GetOperandSummary(const Operand& operand);
53
54bool isQuantizedOperand(const OperandType& operandType);
55
56std::string GetModelSummary(const Model& model);
57
Sadik Armagan9d9dd222022-07-15 10:22:49 +010058template <typename TensorType>
Sadik Armagan8f397a12022-06-17 15:38:22 +010059void DumpTensor(const std::string& dumpDir,
60 const std::string& requestName,
61 const std::string& tensorName,
Sadik Armagan9d9dd222022-07-15 10:22:49 +010062 const TensorType& tensor);
Sadik Armagan8f397a12022-06-17 15:38:22 +010063
64void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
65 const std::string& dumpDir,
66 armnn::NetworkId networkId,
67 const armnn::IProfiler* profiler);
68
69std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
70 const std::string& dumpDir);
71
72std::string SerializeNetwork(const armnn::INetwork& network,
73 const std::string& dumpDir,
74 std::vector<uint8_t>& dataCacheData,
75 bool dataCachingActive = true);
76
77void RenameExportedFiles(const std::string& existingSerializedFileName,
78 const std::string& existingDotFileName,
79 const std::string& dumpDir,
80 const armnn::NetworkId networkId);
81
82void RenameFile(const std::string& existingName,
83 const std::string& extension,
84 const std::string& dumpDir,
85 const armnn::NetworkId networkId);
86
87/// Checks if a tensor info represents a dynamic tensor
88bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);
89
90/// Checks for ArmNN support of dynamic tensors.
91bool AreDynamicTensorsSupported(void);
92
93std::string GetFileTimestamp();
94
95inline OutputShape ComputeShape(const armnn::TensorInfo& info)
96{
97 OutputShape shape;
98
99 armnn::TensorShape tensorShape = info.GetShape();
100 // Android will expect scalars as a zero dimensional tensor
101 if(tensorShape.GetDimensionality() == armnn::Dimensionality::Scalar)
102 {
103 shape.dimensions = std::vector<uint32_t>{};
104 }
105 else
106 {
107 std::vector<uint32_t> dimensions;
108 const unsigned int numDims = tensorShape.GetNumDimensions();
109 dimensions.resize(numDims);
110 for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
111 {
112 dimensions[outputIdx] = tensorShape[outputIdx];
113 }
114 shape.dimensions = dimensions;
115 }
116
117 shape.isSufficient = true;
118
119 return shape;
120}
121
122void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);
123
124} // namespace armnn_driver