blob: a5096841539b50b287f140edfeb199503bb9952d [file] [log] [blame]
Sadik Armagan8f397a12022-06-17 15:38:22 +01001//
2// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7#include <armnn/ArmNN.hpp>
8
9#include <CpuExecutor.h>
10#include <nnapi/OperandTypes.h>
11#include <nnapi/Result.h>
12#include <nnapi/Types.h>
13
14#include <vector>
15#include <string>
16#include <fstream>
17#include <iomanip>
18
19namespace armnn_driver
20{
21
22using namespace android::nn;
23
24extern const armnn::PermutationVector g_DontPermute;
25
26template <typename OperandType>
27class UnsupportedOperand: public std::runtime_error
28{
29public:
30 UnsupportedOperand(const OperandType type)
31 : std::runtime_error("Operand type is unsupported")
32 , m_type(type)
33 {}
34
35 OperandType m_type;
36};
37
38/// Swizzles tensor data in @a input according to the dimension mappings.
39void SwizzleAndroidNn4dTensorToArmNn(armnn::TensorInfo& tensor,
40 const void* input,
41 void* output,
42 const armnn::PermutationVector& mappings);
43
44/// Returns a pointer to a specific location in a pool`
45void* GetMemoryFromPool(DataLocation location,
46 const std::vector<android::nn::RunTimePoolInfo>& memPools);
47
48void* GetMemoryFromPointer(const Request::Argument& requestArg);
49
50armnn::TensorInfo GetTensorInfoForOperand(const Operand& operand);
51
52std::string GetOperandSummary(const Operand& operand);
53
54bool isQuantizedOperand(const OperandType& operandType);
55
56std::string GetModelSummary(const Model& model);
57
58void DumpTensor(const std::string& dumpDir,
59 const std::string& requestName,
60 const std::string& tensorName,
61 const armnn::ConstTensor& tensor);
62
63void DumpJsonProfilingIfRequired(bool gpuProfilingEnabled,
64 const std::string& dumpDir,
65 armnn::NetworkId networkId,
66 const armnn::IProfiler* profiler);
67
68std::string ExportNetworkGraphToDotFile(const armnn::IOptimizedNetwork& optimizedNetwork,
69 const std::string& dumpDir);
70
71std::string SerializeNetwork(const armnn::INetwork& network,
72 const std::string& dumpDir,
73 std::vector<uint8_t>& dataCacheData,
74 bool dataCachingActive = true);
75
76void RenameExportedFiles(const std::string& existingSerializedFileName,
77 const std::string& existingDotFileName,
78 const std::string& dumpDir,
79 const armnn::NetworkId networkId);
80
81void RenameFile(const std::string& existingName,
82 const std::string& extension,
83 const std::string& dumpDir,
84 const armnn::NetworkId networkId);
85
86/// Checks if a tensor info represents a dynamic tensor
87bool IsDynamicTensor(const armnn::TensorInfo& outputInfo);
88
89/// Checks for ArmNN support of dynamic tensors.
90bool AreDynamicTensorsSupported(void);
91
92std::string GetFileTimestamp();
93
94inline OutputShape ComputeShape(const armnn::TensorInfo& info)
95{
96 OutputShape shape;
97
98 armnn::TensorShape tensorShape = info.GetShape();
99 // Android will expect scalars as a zero dimensional tensor
100 if(tensorShape.GetDimensionality() == armnn::Dimensionality::Scalar)
101 {
102 shape.dimensions = std::vector<uint32_t>{};
103 }
104 else
105 {
106 std::vector<uint32_t> dimensions;
107 const unsigned int numDims = tensorShape.GetNumDimensions();
108 dimensions.resize(numDims);
109 for (unsigned int outputIdx = 0u; outputIdx < numDims; ++outputIdx)
110 {
111 dimensions[outputIdx] = tensorShape[outputIdx];
112 }
113 shape.dimensions = dimensions;
114 }
115
116 shape.isSufficient = true;
117
118 return shape;
119}
120
121void CommitPools(std::vector<::android::nn::RunTimePoolInfo>& memPools);
122
123} // namespace armnn_driver