blob: 1b1c06ea582769b747f065086a4fe126446c4fa0 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
8#include "../1.0/HalPolicy.hpp"
9
10namespace armnn_driver
11{
12namespace hal_1_1
13{
14
15bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
16{
17 if (compliantWithV1_0(operation))
18 {
19 hal_1_0::HalPolicy::Operation v10Operation = convertToV1_0(operation);
20 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
21
22 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
23 }
24 else
25 {
26 switch (operation.type)
27 {
28 case V1_1::OperationType::DIV:
29 return ConvertDiv(operation, model, data);
David Beck38e12942018-09-12 16:02:24 +010030 case V1_1::OperationType::SUB:
31 return ConvertSub(operation, model, data);
narpra013c052562018-09-17 14:25:04 +010032 case V1_1::OperationType::MEAN:
33 return ConvertMean(operation, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +010034 case V1_1::OperationType::PAD:
35 return ConvertPad(operation, model, data);
saoste01b8471482018-10-10 09:44:51 +010036 case V1_1::OperationType::SQUEEZE:
37 return ConvertSqueeze(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010038 default:
39 return Fail("%s: Operation type %s not supported in ArmnnDriver",
40 __func__, toString(operation.type).c_str());
41 }
42 }
43}
44
45bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
46{
47 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
48 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
49
50 if (!input0.IsValid() || !input1.IsValid())
51 {
52 return Fail("%s: Operation has invalid inputs", __func__);
53 }
54
55 // The FuseActivation parameter is always the input index 2
56 // and it should be optional
57 ActivationFn activationFunction;
58 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
59 {
60 return Fail("%s: Operation has invalid inputs", __func__);
61 }
62
63 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
64 if (!outputOperand)
65 {
66 return false;
67 }
68
69 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
70
71 if (!IsLayerSupported(__func__,
72 armnn::IsDivisionSupported,
73 data.m_Compute,
74 input0.GetTensorInfo(),
75 input1.GetTensorInfo(),
76 outInfo))
77 {
78 return false;
79 }
80
81 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
82 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
83
84 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
85 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
86
87 if (endLayer)
88 {
89 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
90 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
91 }
92
93 return Fail("%s: ProcessActivation failed", __func__);
94}
95
David Beck38e12942018-09-12 16:02:24 +010096bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
97{
98 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
99 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
100
101 if (!input0.IsValid() || !input1.IsValid())
102 {
103 return Fail("%s: Operation has invalid inputs", __func__);
104 }
105
106 // The FuseActivation parameter is always the input index 2
107 // and it should be optional
108 ActivationFn activationFunction;
109 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
110 {
111 return Fail("%s: Operation has invalid inputs", __func__);
112 }
113
114 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
115 if (!outputOperand)
116 {
117 return false;
118 }
119
120 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
121
122 if (!IsLayerSupported(__func__,
123 armnn::IsSubtractionSupported,
124 data.m_Compute,
125 input0.GetTensorInfo(),
126 input1.GetTensorInfo(),
127 outInfo))
128 {
129 return false;
130 }
131
132 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
133 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
134
135 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
136 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
137
138 if (endLayer)
139 {
140 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
141 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
142 }
143
144 return Fail("%s: ProcessActivation failed", __func__);
145}
146
narpra013c052562018-09-17 14:25:04 +0100147bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
148{
149 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
150
151 if (!input.IsValid())
152 {
153 return Fail("%s: Operation has invalid inputs", __func__);
154 }
155
156 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
157
158 armnn::MeanDescriptor descriptor;
159
160 const Operand* axisOperand = GetInputOperand(operation, 1, model);
161 if (axisOperand)
162 {
163 std::vector<int32_t> axis;
164 GetTensorInt32Values(*axisOperand, axis, model, data);
165 unsigned int rank = inputInfo.GetNumDimensions();
166 // convert the axis to unsigned int.
167 for (auto& i : axis)
168 {
169 unsigned int unsignedAxis = (i + rank) % rank;
170 if (std::find(descriptor.m_Axis.begin(), descriptor.m_Axis.end(), unsignedAxis) == descriptor.m_Axis.end())
171 {
172 descriptor.m_Axis.push_back(unsignedAxis);
173 }
174 }
175 }
176
177 int32_t keepDims;
178 GetInputInt32(operation, 2, keepDims, model, data);
179 if (keepDims > 0)
180 {
181 descriptor.m_KeepDims = true;
182 }
183
184 const Operand* output = GetOutputOperand(operation, 0, model);
185 if (!output)
186 {
187 return Fail("%s: Could not read output 0", __func__);
188 }
189
190 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
191
192 if (!IsLayerSupported(__func__,
193 armnn::IsMeanSupported,
194 data.m_Compute,
195 inputInfo,
196 outputInfo,
197 descriptor))
198 {
199 return false;
200 }
201
202 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
narpra0196bedf02018-09-26 16:57:28 +0100203 assert(layer != nullptr);
204 input.Connect(layer->GetInputSlot(0));
205 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
narpra013c052562018-09-17 14:25:04 +0100206
207 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
208}
209
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100210bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
211{
212 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
213
214 if (!input.IsValid())
215 {
216 return Fail("%s: Operation has invalid inputs", __func__);
217 }
218
219 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
220
221 const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
222
223 if (!paddingsOperand)
224 {
225 return Fail("%s: Could not read paddings operand", __func__);
226 }
227
228 unsigned int rank = inputInfo.GetNumDimensions();
229 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
230 if (paddingsOperandShape.GetNumDimensions() != rank || paddingsOperandShape.GetNumElements() != 2)
231 {
232 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
233 }
234
235 std::vector<int32_t> paddings;
236 GetTensorInt32Values(*paddingsOperand, paddings, model, data);
237
238 // add padding for each dimension of input tensor.
239 armnn::PadDescriptor descriptor;
240 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
241 {
242 int paddingBeforeInput = paddings[i];
243 int paddingAfterInput = paddings[i + 1];
244 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
245 {
246 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
247 }
248 descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
249 }
250
251 const Operand* output = GetOutputOperand(operation, 0, model);
252 if (!output)
253 {
254 return Fail("%s: Could not read output 0", __func__);
255 }
256
257 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
258
259 if (!IsLayerSupported(__func__,
260 armnn::IsPadSupported,
261 data.m_Compute,
262 inputInfo,
263 outputInfo,
264 descriptor))
265 {
266 return false;
267 }
268
269 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
270 assert(layer != nullptr);
271 input.Connect(layer->GetInputSlot(0));
272 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
273
274 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
275}
276
saoste01b8471482018-10-10 09:44:51 +0100277bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
278{
279 static const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
280 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
281
282 if (!input.IsValid())
283 {
284 return Fail("%s: Operation has invalid inputs", __func__);
285 }
286
287 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
288
289 unsigned int rank = inputInfo.GetNumDimensions();
290 if( rank > 4 )
291 {
292 Fail("%s: Inputs with rank greater than: %i are not supported", __func__, rank);
293 }
294
295 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
296 // if the operand index is out of bounds.
297 const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
298
299 std::vector<int32_t> axis;
300 if(!axisOperand)
301 {
302 axis.assign(dimensionSequence,
303 dimensionSequence+inputInfo.GetNumDimensions());
304 }
305 else
306 {
307 GetTensorInt32Values(*axisOperand, axis, model, data);
308 }
309
310 std::vector<uint32_t> outputDims;
311 for (auto& i : axis)
312 {
313 auto currentDimension = inputInfo.GetShape()[i];
314 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
315
316 if (skipSqueeze || currentDimension != 1)
317 {
318 outputDims.push_back(currentDimension);
319 }
320 }
321
322 armnn::TensorShape outShape = armnn::TensorShape(static_cast<unsigned int>(outputDims.size()), outputDims.data());
323
324 armnn::TensorInfo outputInfo = inputInfo;
325 outputInfo.SetShape(outShape);
326
327 armnn::ReshapeDescriptor reshapeDesc;
328 reshapeDesc.m_TargetShape = outputInfo.GetShape();
329
330 const Operand* output = GetOutputOperand(operation, 0, model);
331 if (!output)
332 {
333 return Fail("%s: Could not read output 0", __func__);
334 }
335
336 if (!IsLayerSupported(__func__,
337 armnn::IsReshapeSupported,
338 data.m_Compute,
339 inputInfo))
340 {
341 return false;
342 }
343
344 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
345 assert(layer != nullptr);
346 input.Connect(layer->GetInputSlot(0));
347 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
348
349 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
350}
351
arovir01b0717b52018-09-05 17:03:25 +0100352} // namespace hal_1_1
saoste01b8471482018-10-10 09:44:51 +0100353} // namespace armnn_driver