blob: 7ced514b1593a1e9679e9df1d750d598a8e5ccf9 [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// See LICENSE file in the project root for full license information.
4//
5
6#pragma once
7
8#include "HalInterfaces.h"
9#include "NeuralNetworks.h"
10#include "ActivationFunctor.h"
11
12#include <armnn/ArmNN.hpp>
13#include <armnn/INetwork.hpp>
14#include <CpuExecutor.h>
15
16#include "Utils.hpp"
17
18#include <memory>
19#include <vector>
20#include <set>
21
22namespace armnn_driver
23{
24
25class ConstTensorPin;
26class LayerInputHandle;
27
28enum class ConversionResult
29{
30 Success,
31 ErrorMappingPools,
32 UnsupportedFeature
33};
34
35// A helper performing the conversion from an AndroidNN driver Model representation,
36// to an armnn::INetwork object
37class ModelToINetworkConverter
38{
39public:
40 ModelToINetworkConverter(armnn::Compute compute, const Model& model,
41 const std::set<unsigned int>& forcedUnsupportedOperations);
42
43 ConversionResult GetConversionResult() const { return m_ConversionResult; }
44
45 // Returns the ArmNN INetwork corresponding to the input model, if preparation went smoothly, nullptr otherwise.
46 armnn::INetwork* GetINetwork() const { return m_Network.get(); }
47
48 bool IsOperationSupported(uint32_t operationIndex) const;
49
50private:
51 void Convert();
52
53 bool ConvertOperation(const Operation& operation);
54
55 bool ConvertAdd(const Operation& operation);
56
57 bool ConvertAveragePool2d(const Operation& operation);
58
59 bool ConvertConcatenation(const Operation& operation);
60
61 bool ConvertConv2d(const Operation& operation);
62
63 bool ConvertDepthwiseConv2d(const Operation& operation);
64
65 bool ConvertFloor(const Operation& operation);
66
67 bool ConvertFullyConnected(const Operation& operation);
68
69 bool ConvertLogistic(const Operation& operation);
70
71 bool ConvertLocalResponseNormalization(const Operation& operation);
72
73 bool ConvertL2Normalization(const Operation& operation);
74
75 bool ConvertL2Pool2d(const Operation& operation);
76
77 bool ConvertMaxPool2d(const Operation& operation);
78
79 bool ConvertMul(const Operation& operation);
80
81 bool ConvertReLu(const Operation& operation);
82
83 bool ConvertReLu1(const Operation& operation);
84
85 bool ConvertReLu6(const Operation& operation);
86
87 bool ConvertSoftmax(const Operation& operation);
88
89 bool ConvertTanH(const Operation& operation);
90
91 bool ConvertReshape(const Operation& operation);
92
93 bool ConvertResizeBilinear(const Operation& operation);
94
95 bool ConvertToActivation(const Operation& operation, const char* operationName,
96 const armnn::ActivationDescriptor& activationDesc);
97
98 bool ConvertPooling2d(const Operation& operation, const char* name, armnn::PoolingAlgorithm poolType);
99
100
101 const void* GetOperandValueReadOnlyAddress(const Operand& operand) const;
102
103 const Operand* GetInputOperand(const Operation& operation, uint32_t inputIndex) const;
104
105 const Operand* GetOutputOperand(const Operation& operation, uint32_t outputIndex) const;
106
107 template<typename T>
108 bool GetInputScalar(const Operation& operation, uint32_t inputIndex, OperandType type, T& outValue) const;
109
110 bool GetInputInt32(const Operation& operation, uint32_t inputIndex, int32_t& outValue) const;
111
112 bool GetInputFloat32(const Operation& operation, uint32_t inputIndex, float& outValue) const;
113
114 bool GetInputActivationFunction(const Operation& operation, uint32_t inputIndex,
115 ActivationFn& outActivationFunction) const;
116
117 bool GetInputPaddingScheme(const Operation& operation, uint32_t inputIndex,
118 android::nn::PaddingScheme& outPaddingScheme) const;
119
120 LayerInputHandle ConvertToLayerInputHandle(const Operation& operation, uint32_t inputIndex);
121
122 ConstTensorPin ConvertOperationInputToConstTensorPin(const Operation& operation, uint32_t inputIndex,
123 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
124 const armnn::TensorShape* overrideTensorShape = nullptr);
125
126 ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
127 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
128 const armnn::TensorShape* overrideTensorShape = nullptr);
129
130 bool GetTensorInt32Values(const Operand& operand, std::vector<int32_t>& outValues) const;
131
132
133 armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo, ActivationFn activation,
134 armnn::IConnectableLayer* prevLayer);
135
136
137 bool SetupAndTrackLayerOutputSlot(const Operation& operation, uint32_t outputIndex,
138 armnn::IConnectableLayer& layer);
139
140
141 // Input data
142 armnn::Compute m_Compute;
143 const Model& m_Model;
144 const std::set<unsigned int>& m_ForcedUnsupportedOperations;
145
146 // Output data
147 armnn::INetworkPtr m_Network;
148 ConversionResult m_ConversionResult;
149 std::map<uint32_t, bool> m_OperationSupported;
150
151 // Working/intermediate data
152 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
153 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
154};
155
156} // armnn_driver