blob: 6c8dcb5de561ea71762043cca9983249e03141d0 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +010011#include "Utils.hpp"
arovir015602b192018-10-04 16:15:02 +010012
arovir01b0717b52018-09-05 17:03:25 +010013namespace armnn_driver
14{
15namespace hal_1_0
16{
17
18bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
19{
20 switch (operation.type)
21 {
22 case V1_0::OperationType::ADD:
23 return ConvertAdd(operation, model, data);
24 case V1_0::OperationType::AVERAGE_POOL_2D:
25 return ConvertAveragePool2d(operation, model, data);
26 case V1_0::OperationType::CONCATENATION:
27 return ConvertConcatenation(operation, model, data);
28 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010029 return ConvertConv2d(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010030 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010031 return ConvertDepthwiseConv2d(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010032 case V1_0::OperationType::DEQUANTIZE:
33 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010034 case V1_0::OperationType::FLOOR:
35 return ConvertFloor(operation, model, data);
36 case V1_0::OperationType::FULLY_CONNECTED:
37 return ConvertFullyConnected(operation, model, data);
38 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
39 return ConvertLocalResponseNormalization(operation, model, data);
40 case V1_0::OperationType::LOGISTIC:
41 return ConvertLogistic(operation, model, data);
42 case V1_0::OperationType::LSTM:
43 return ConvertLstm(operation, model, data);
44 case V1_0::OperationType::L2_NORMALIZATION:
45 return ConvertL2Normalization(operation, model, data);
46 case V1_0::OperationType::L2_POOL_2D:
47 return ConvertL2Pool2d(operation, model, data);
48 case V1_0::OperationType::MAX_POOL_2D:
49 return ConvertMaxPool2d(operation, model, data);
50 case V1_0::OperationType::MUL:
51 return ConvertMul(operation, model, data);
52 case V1_0::OperationType::RELU:
53 return ConvertReLu(operation, model, data);
54 case V1_0::OperationType::RELU1:
55 return ConvertReLu1(operation, model, data);
56 case V1_0::OperationType::RELU6:
57 return ConvertReLu6(operation, model, data);
58 case V1_0::OperationType::SOFTMAX:
59 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010060 case V1_0::OperationType::SPACE_TO_DEPTH:
61 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010062 case V1_0::OperationType::TANH:
63 return ConvertTanH(operation, model, data);
64 case V1_0::OperationType::RESHAPE:
65 return ConvertReshape(operation, model, data);
66 case V1_0::OperationType::RESIZE_BILINEAR:
67 return ConvertResizeBilinear(operation, model, data);
68 default:
69 return Fail("%s: Operation type %s not supported in ArmnnDriver",
70 __func__, toString(operation.type).c_str());
71 }
72}
73
74bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
75{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010076 ALOGV("hal_1_0::HalPolicy::ConvertAdd()");
77
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010078 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
79 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010080
81 if (!input0.IsValid() || !input1.IsValid())
82 {
83 return Fail("%s: Operation has invalid inputs", __func__);
84 }
85
86 // The FuseActivation parameter is always the input index 2
87 // and it should be optional
88 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010089 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +010090 {
91 return Fail("%s: Operation has invalid inputs", __func__);
92 }
93
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010094 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +010095 if (!outputOperand)
96 {
97 return false;
98 }
99
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100100 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
101 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
102
103 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
104 if (IsDynamicTensor(outputInfo))
105 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100106 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100107 }
arovir01b0717b52018-09-05 17:03:25 +0100108
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100109 bool isSupported = false;
110 FORWARD_LAYER_SUPPORT_FUNC(__func__,
111 IsAdditionSupported,
112 data.m_Backends,
113 isSupported,
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100114 inputInfo0,
115 inputInfo1,
116 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100117 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100118 {
119 return false;
120 }
121
122 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100123 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
arovir01b0717b52018-09-05 17:03:25 +0100124
125 if (endLayer != nullptr)
126 {
127 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100128 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100129 }
130 else
131 {
132 return Fail("%s: ProcessActivation failed", __func__);
133 }
134}
135
136bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
137{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100138 ALOGV("hal_1_0::HalPolicy::ConvertAveragePool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100139 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100140}
141
142bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
143{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100144 ALOGV("hal_1_0::HalPolicy::ConvertConcatenation()");
Mike Kellyb8805202019-07-31 17:25:43 +0100145 return ::ConvertConcatenation<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100146}
147
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100148bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
149{
150 ALOGV("hal_1_0::HalPolicy::ConvertConv2d()");
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +0100151 return ::ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100152}
153
154bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
155{
156 ALOGV("hal_1_0::HalPolicy::ConvertDepthwiseConv2d()");
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +0100157 return ::ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100158}
159
David Monahanacf479a2019-05-29 14:27:04 +0100160bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
161{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100162 ALOGV("hal_1_0::HalPolicy::ConvertDequantize()");
David Monahanacf479a2019-05-29 14:27:04 +0100163
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100164 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100165 if (!input.IsValid())
166 {
167 return Fail("%s: Operation has invalid input", __func__);
168 }
169
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100170 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100171 if (!outputOperand)
172 {
173 return Fail("%s: Operation has invalid outputs", __func__);
174 }
175
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100176 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100177 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100178 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100179 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100180 }
181
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100182 bool isSupported = false;
183 FORWARD_LAYER_SUPPORT_FUNC(__func__,
184 IsDequantizeSupported,
185 data.m_Backends,
186 isSupported,
187 input.GetTensorInfo(),
188 GetTensorInfoForOperand(*outputOperand));
189 if (!isSupported)
David Monahanacf479a2019-05-29 14:27:04 +0100190 {
191 return false;
192 }
193
194 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
195 assert(layer != nullptr);
196 input.Connect(layer->GetInputSlot(0));
197
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100198 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100199}
200
arovir01b0717b52018-09-05 17:03:25 +0100201bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
202{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100203 ALOGV("hal_1_0::HalPolicy::ConvertFloor()");
204
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100205 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100206 if (!input.IsValid())
207 {
208 return Fail("%s: Operation has invalid inputs", __func__);
209 }
210
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100211 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100212 if (!outputOperand)
213 {
214 return Fail("%s: Operation has invalid outputs", __func__);
215 }
216
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100217 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
218 if (IsDynamicTensor(outputInfo))
219 {
220 return Fail("%s: Dynamic output tensors are not supported", __func__);
221 }
222
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100223 bool isSupported = false;
224 FORWARD_LAYER_SUPPORT_FUNC(__func__,
225 IsFloorSupported,
226 data.m_Backends,
227 isSupported,
228 input.GetTensorInfo(),
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100229 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100230 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100231 {
232 return false;
233 }
234
235 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
236 assert(layer != nullptr);
237 input.Connect(layer->GetInputSlot(0));
238
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100239 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100240}
241
242bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
243{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100244 ALOGV("hal_1_0::HalPolicy::ConvertFullyConnected()");
245
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100246 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100247 if (!input.IsValid())
248 {
249 return Fail("%s: Operation has invalid inputs", __func__);
250 }
251
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100252 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100253 if (!output)
254 {
255 return Fail("%s: Could not read output 0", __func__);
256 }
257
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100258 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
259 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100260
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100261 if (IsDynamicTensor(outputInfo))
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100262 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100263 return Fail("%s: Dynamic output tensors are not supported", __func__);
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100264 }
arovir01b0717b52018-09-05 17:03:25 +0100265
266 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100267 ConstTensorPin weightsPin =
268 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
269 ConstTensorPin biasPin =
270 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100271
272 if (!weightsPin.IsValid() || !biasPin.IsValid())
273 {
274 return Fail("%s: Operation has invalid inputs", __func__);
275 }
276
277 armnn::ConstTensor weights = weightsPin.GetConstTensor();
278 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100279 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100280
281 try
arovir01b0717b52018-09-05 17:03:25 +0100282 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100283 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
284 } catch (const std::exception &e) {
285 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100286 }
287
288 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
289 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
290
291 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100292 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100293 {
294 return Fail("%s: Operation has invalid inputs", __func__);
295 }
296
297 armnn::FullyConnectedDescriptor desc;
298 desc.m_TransposeWeightMatrix = true;
299 desc.m_BiasEnabled = true;
300
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100301 bool isSupported = false;
302 FORWARD_LAYER_SUPPORT_FUNC(__func__,
303 IsFullyConnectedSupported,
304 data.m_Backends,
305 isSupported,
306 reshapedInfo,
307 outputInfo,
308 weights.GetInfo(),
309 bias.GetInfo(),
310 desc);
311 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100312 {
313 return false;
314 }
315
Matteo Martincighba01f372019-05-14 13:28:21 +0100316 armnn::IConnectableLayer* startLayer =
317 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100318 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
319
320 if (endLayer != nullptr)
321 {
322 if (inputInfo.GetNumDimensions() > 2U)
323 {
324 armnn::ReshapeDescriptor reshapeDescriptor;
325 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
326
327 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
328 assert(reshapeLayer != nullptr);
329 input.Connect(reshapeLayer->GetInputSlot(0));
330 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
331 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
332 }
333 else
334 {
335 input.Connect(startLayer->GetInputSlot(0));
336 }
337
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100338 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100339 }
340 else
341 {
342 return Fail("%s: ProcessActivation failed", __func__);
343 }
344}
345
346bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
347 const Model& model,
348 ConversionData& data)
349{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100350 ALOGV("hal_1_0::HalPolicy::ConvertLocalResponseNormalization()");
351
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100352 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100353 if (!input.IsValid())
354 {
355 return Fail("%s: Operation has invalid inputs", __func__);
356 }
357
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100358 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100359 if (!output)
360 {
361 return Fail("%s: Could not read output 0", __func__);
362 }
363
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100364 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100365 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
366
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100367 if (IsDynamicTensor(outputInfo))
368 {
369 return Fail("%s: Dynamic output tensors are not supported", __func__);
370 }
arovir01b0717b52018-09-05 17:03:25 +0100371
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100372 armnn::NormalizationDescriptor descriptor;
373 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100374 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100375 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100376
377 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100378 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
379 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
380 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
381 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100382 {
383 return Fail("%s: Operation has invalid inputs", __func__);
384 }
385
386 // ArmNN expects normSize to be the full size of the normalization
387 // window rather than the radius as in AndroidNN.
388 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
389
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100390 bool isSupported = false;
391 FORWARD_LAYER_SUPPORT_FUNC(__func__,
392 IsNormalizationSupported,
393 data.m_Backends,
394 isSupported,
395 inputInfo,
396 outputInfo,
397 descriptor);
398 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100399 {
400 return false;
401 }
402
403
404 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
405 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100406 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100407
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100408 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100409}
410
411bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
412{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100413 ALOGV("hal_1_0::HalPolicy::ConvertLogistic()");
414
arovir01b0717b52018-09-05 17:03:25 +0100415 armnn::ActivationDescriptor desc;
416 desc.m_Function = armnn::ActivationFunction::Sigmoid;
417
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100418 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100419}
420
421bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
422{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100423 ALOGV("hal_1_0::HalPolicy::ConvertLstm()");
424
arovir01b0717b52018-09-05 17:03:25 +0100425 // Inputs:
426 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
427 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100428 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100429 if (!input.IsValid())
430 {
431 return Fail("%s: Could not read input 0: input", __func__);
432 }
433 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100434 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100435 if (!outputStateIn.IsValid())
436 {
437 return Fail("%s: Could not read input 18: outputStateIn", __func__);
438 }
439 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100440 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100441 if (!cellStateIn.IsValid())
442 {
443 return Fail("%s: Could not read input 19: cellStateIn", __func__);
444 }
445
446 // Get the mandatory input tensors:
447 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
448 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100449 const ConstTensorPin inputToForgetWeightsPin =
450 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
451 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
452 // [num_units, input_size].
453 const ConstTensorPin inputToCellWeightsPin =
454 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100455 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
456 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100457 const ConstTensorPin inputToOutputWeightsPin =
458 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100459 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
460 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100461 const ConstTensorPin recurrentToForgetWeightsPin =
462 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100463 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
464 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100465 const ConstTensorPin recurrentToCellWeightsPin =
466 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100467 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
468 // [num_units, output_size].
469 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100470 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100471 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100472 const ConstTensorPin forgetGateBiasPin =
473 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100474 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100475 const ConstTensorPin cellBiasPin =
476 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100477 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100478 const ConstTensorPin outputGateBiasPin =
479 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100480
481 if (!inputToForgetWeightsPin.IsValid() ||
482 !inputToCellWeightsPin.IsValid() ||
483 !inputToOutputWeightsPin.IsValid() ||
484 !recurrentToForgetWeightsPin.IsValid() ||
485 !recurrentToCellWeightsPin.IsValid() ||
486 !recurrentToOutputWeightsPin.IsValid() ||
487 !forgetGateBiasPin.IsValid() ||
488 !cellBiasPin.IsValid() ||
489 !outputGateBiasPin.IsValid())
490 {
491 return Fail("%s: Operation has invalid tensor inputs", __func__);
492 }
493
494 // Get the optional input tensors:
495 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
496 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100497 const ConstTensorPin inputToInputWeightsPin =
498 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
499 1,
500 model,
501 data,
502 g_DontPermute,
503 nullptr,
504 true);
505
arovir01b0717b52018-09-05 17:03:25 +0100506 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
507 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
508 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100509 const ConstTensorPin recurrentToInputWeightsPin =
510 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
511 5,
512 model,
513 data,
514 g_DontPermute,
515 nullptr,
516 true);
517
arovir01b0717b52018-09-05 17:03:25 +0100518 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100519 const ConstTensorPin cellToInputWeightsPin =
520 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
521 9,
522 model,
523 data,
524 g_DontPermute,
525 nullptr,
526 true);
527
arovir01b0717b52018-09-05 17:03:25 +0100528 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100529 const ConstTensorPin cellToForgetWeightsPin =
530 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
531 10,
532 model,
533 data,
534 g_DontPermute,
535 nullptr,
536 true);
537
arovir01b0717b52018-09-05 17:03:25 +0100538 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100539 const ConstTensorPin cellToOutputWeightsPin =
540 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
541 11,
542 model,
543 data,
544 g_DontPermute,
545 nullptr,
546 true);
547
arovir01b0717b52018-09-05 17:03:25 +0100548 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100549 const ConstTensorPin inputGateBiasPin =
550 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
551 12,
552 model,
553 data,
554 g_DontPermute,
555 nullptr,
556 true);
557
arovir01b0717b52018-09-05 17:03:25 +0100558 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
559 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100560 const ConstTensorPin projectionWeightsPin =
561 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
562 16,
563 model,
564 data,
565 g_DontPermute,
566 nullptr,
567 true);
568
arovir01b0717b52018-09-05 17:03:25 +0100569 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100570 const ConstTensorPin projectionBiasPin =
571 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
572 17,
573 model,
574 data,
575 g_DontPermute,
576 nullptr,
577 true);
arovir01b0717b52018-09-05 17:03:25 +0100578
579 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
580 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
581 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
582 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
583 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
584 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
585 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
586 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
587 {
588 return Fail("%s: Operation has invalid tensor inputs", __func__);
589 }
590
591 // Get the mandatory input scalars (actually 1-D tensors of size 1):
592 // 20: The activation function: A value indicating the activation function:
593 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
594 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
595 // If set to 0.0 then clipping is disabled.
596 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
597 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
598 ActivationFn activation;
599 float cellClip;
600 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100601 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
602 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
603 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100604 {
605 return Fail("%s: Operation has invalid scalar inputs", __func__);
606 }
607
608 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100609 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
610 // with CIFG, or [batch_size, num_units * 3] without CIFG.
611 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100612 if (!scratchBuffer)
613 {
614 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
615 }
616 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100617 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100618 if (!outputStateOut)
619 {
620 return Fail("%s: Could not read output 1: outputStateOut", __func__);
621 }
622 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100623 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100624 if (!cellStateOut)
625 {
626 return Fail("%s: Could not read output 2: cellStateOut", __func__);
627 }
628 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
629 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100630 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100631 if (!output)
632 {
633 return Fail("%s: Could not read output 3: output", __func__);
634 }
635
636 // set the params structure for the AddLstmLayer call
637 armnn::LstmInputParams params;
638 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
639 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
640 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
641 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
642 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
643 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
644 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
645 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
646 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
647 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
648 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
649 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
650 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
651 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
652 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
653 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
654 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
655
656 // set the layer descriptor
657 armnn::LstmDescriptor desc;
658 desc.m_ActivationFunc = activation;
659 desc.m_ClippingThresCell = cellClip;
660 desc.m_ClippingThresProj = projClip;
661 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
662 params.m_RecurrentToInputWeights == nullptr ||
663 params.m_InputGateBias == nullptr);
664 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
665 params.m_CellToOutputWeights != nullptr);
666 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
667
668 // validate the optional input groups
669 if (desc.m_CifgEnabled &&
670 (params.m_InputToInputWeights != nullptr ||
671 params.m_RecurrentToInputWeights != nullptr ||
672 params.m_InputGateBias != nullptr))
673 {
674 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
675 " and input gate bias must be provided", __func__);
676 }
677
678 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
679 {
680 return Fail("%s: projection bias should not be provided without projection weights", __func__);
681 }
682
683 if (desc.m_PeepholeEnabled &&
684 (params.m_CellToForgetWeights == nullptr ||
685 params.m_CellToOutputWeights == nullptr ||
686 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
687 {
688 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
689 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
690 }
691
692 // Check if the layer is supported
693 // Inputs
694 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
695 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
696 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
697
698 // Outputs
699 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
700 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
701 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
702 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
703
704 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100705 armnn::LstmInputParamsInfo paramsInfo;
706 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
707 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
708 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
709 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
710 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
711 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
712 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
713 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
714 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100715
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100716 // Optional parameters
arovir01b0717b52018-09-05 17:03:25 +0100717 if(!desc.m_CifgEnabled)
718 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100719 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
720 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100721 if (params.m_CellToInputWeights != nullptr)
722 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100723 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100724 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100725 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100726 }
727
728 if(desc.m_ProjectionEnabled)
729 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100730 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100731 if (params.m_ProjectionBias != nullptr)
732 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100733 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100734 }
735 }
736
737 if(desc.m_PeepholeEnabled)
738 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100739 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
740 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100741 }
742
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100743 bool isSupported = false;
744 FORWARD_LAYER_SUPPORT_FUNC(__func__,
745 IsLstmSupported,
746 data.m_Backends,
747 isSupported,
748 inputInfo,
749 outputStateInInfo,
750 cellStateInInfo,
751 scratchBufferInfo,
752 outputStateOutInfo,
753 cellStateOutInfo,
754 outputInfo,
755 desc,
756 paramsInfo);
757 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100758 {
759 return false;
760 }
761
762 // Add the layer
763 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
764
765 input.Connect(layer->GetInputSlot(0));
766 outputStateIn.Connect(layer->GetInputSlot(1));
767 cellStateIn.Connect(layer->GetInputSlot(2));
768
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100769 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
770 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
771 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
772 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100773}
774
775bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
776{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100777 ALOGV("hal_1_0::HalPolicy::ConvertL2Normalization()");
778
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100779 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100780 if (!input.IsValid())
781 {
782 return Fail("%s: Operation has invalid inputs", __func__);
783 }
784
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100785 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100786 if (!output)
787 {
788 return Fail("%s: Could not read output 0", __func__);
789 }
790
791 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100792 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
arovir01b0717b52018-09-05 17:03:25 +0100793
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100794 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100795 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100796 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100797 }
798
Matteo Martincigh58f71092018-09-25 15:58:52 +0100799 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100800 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +0100801
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100802 bool isSupported = false;
803 FORWARD_LAYER_SUPPORT_FUNC(__func__,
804 IsL2NormalizationSupported,
805 data.m_Backends,
806 isSupported,
807 inputInfo,
808 outputInfo,
809 desc);
810 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100811 {
812 return false;
813 }
814
Matteo Martincigh58f71092018-09-25 15:58:52 +0100815 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +0100816 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100817 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100818
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100819 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100820}
821
822bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
823{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100824 ALOGV("hal_1_0::HalPolicy::ConvertL2Pool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100825 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100826}
827
828bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
829{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100830 ALOGV("hal_1_0::HalPolicy::ConvertMaxPool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100831 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100832}
833
834bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
835{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100836 ALOGV("hal_1_0::HalPolicy::ConvertMul()");
837
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100838 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
839 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100840
841 if (!input0.IsValid() || !input1.IsValid())
842 {
843 return Fail("%s: Operation has invalid inputs", __func__);
844 }
845
846 // The FuseActivation parameter is always the input index 2
847 // and it should be optional
848 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100849 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100850 {
851 return Fail("%s: Operation has invalid inputs", __func__);
852 }
853
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100854 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100855
856 if (outputOperand == nullptr)
857 {
858 return false;
859 }
860
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100861 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
862 if (IsDynamicTensor(outputInfo))
863 {
864 return Fail("%s: Dynamic output tensors are not supported", __func__);
865 }
arovir01b0717b52018-09-05 17:03:25 +0100866
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100867 bool isSupported = false;
868 FORWARD_LAYER_SUPPORT_FUNC(__func__,
869 IsMultiplicationSupported,
870 data.m_Backends,
871 isSupported,
872 input0.GetTensorInfo(),
873 input1.GetTensorInfo(),
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100874 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100875 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100876 {
877 return false;
878 }
879
880 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100881 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
arovir01b0717b52018-09-05 17:03:25 +0100882
883 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
884 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
885
886 if (endLayer != nullptr)
887 {
888 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100889 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100890 }
891 else
892 {
893 return Fail("%s: ProcessActivation failed", __func__);
894 }
895}
896
897bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
898{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100899 ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
Sadik Armagan61113162019-07-25 09:09:40 +0100900 return ::ConvertReLu<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100901}
902
903bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
904{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100905 ALOGV("hal_1_0::HalPolicy::ConvertReLu1()");
Sadik Armagan61113162019-07-25 09:09:40 +0100906 return ::ConvertReLu1<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100907}
908
909bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
910{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100911 ALOGV("hal_1_0::HalPolicy::ConvertReLu6()");
Sadik Armagan61113162019-07-25 09:09:40 +0100912 return ::ConvertReLu6<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100913}
914
915bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
916{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100917 ALOGV("hal_1_0::HalPolicy::ConvertSoftmax()");
918
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100919 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100920 if (!input.IsValid())
921 {
922 return Fail("%s: Operation has invalid inputs", __func__);
923 }
924
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100925 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100926 if (!outputOperand)
927 {
928 return Fail("%s: Operation has no outputs", __func__);
929 }
930
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100931 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100932 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100933 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100934 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100935 }
arovir01b0717b52018-09-05 17:03:25 +0100936
937 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100938 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100939 {
940 return Fail("%s: Operation has invalid inputs", __func__);
941 }
942
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100943 bool isSupported = false;
944 FORWARD_LAYER_SUPPORT_FUNC(__func__,
945 IsSoftmaxSupported,
946 data.m_Backends,
947 isSupported,
948 input.GetTensorInfo(),
949 outputInfo,
950 desc);
951 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100952 {
953 return false;
954 }
955
956 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
957 assert(layer != nullptr);
958 input.Connect(layer->GetInputSlot(0));
959
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100960 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100961}
962
Keith Davisa6bc52f2019-06-26 09:39:49 +0100963bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
964{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100965 ALOGV("hal_1_0::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100966
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100967 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100968 if (!input.IsValid() )
969 {
970 return Fail("%s: Operation has invalid inputs", __func__);
971 }
972
973 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
974 unsigned int rank = inputInfo.GetNumDimensions();
975
976 if (rank != 4)
977 {
978 return Fail("%s: Only inputs with rank 4 are supported", __func__);
979 }
980
981 armnn::SpaceToDepthDescriptor desc;
982 bool dataLayoutCheck;
983
984 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
985
986 if (desc.m_BlockSize <= 1)
987 {
988 return Fail("%s: Block size must be at least 1 in all dimensions");
989 }
990
991 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
992 if (!output)
993 {
994 return Fail("%s: Could not read output 0", __func__);
995 }
996
997 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100998 if (IsDynamicTensor(outputInfo))
999 {
1000 return Fail("%s: Dynamic output tensors are not supported", __func__);
1001 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001002
1003 bool isSupported = false;
1004 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1005 IsSpaceToDepthSupported,
1006 data.m_Backends,
1007 isSupported,
1008 inputInfo,
1009 outputInfo,
1010 desc);
1011 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001012 {
1013 return false;
1014 }
1015
1016 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1017 assert(layer != nullptr);
1018 input.Connect(layer->GetInputSlot(0));
1019
1020 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1021}
1022
arovir01b0717b52018-09-05 17:03:25 +01001023bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1024{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001025 ALOGV("hal_1_0::HalPolicy::ConvertTanH()");
Sadik Armagan61113162019-07-25 09:09:40 +01001026 return ::ConvertTanH<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001027}
1028
1029bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1030{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001031 ALOGV("hal_1_0::HalPolicy::ConvertReshape()");
1032
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001033 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1034 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1035 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001036
1037 if (inputOperand == nullptr
1038 || requestedShapeOperand == nullptr
1039 || outputOperand == nullptr)
1040 {
1041 return Fail("%s: Operation has invalid inputs", __func__);
1042 }
1043
1044
1045 if (requestedShapeOperand->dimensions.size() != 1)
1046 {
1047 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1048 __func__, requestedShapeOperand->dimensions.size());
1049 }
1050
1051 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001052 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001053 {
1054 return Fail("%s: Could not read values of input 1", __func__);
1055 }
1056
1057 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1058
1059 Shape requestedShape;
1060 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1061 // function that resolves these values into a fully specified tensor shape.
1062 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1063 {
1064 return Fail("%s: Failed to resolve the requested shape", __func__);
1065 }
1066
1067 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1068 if (!SameShape(requestedShape, outputOperandShape))
1069 {
1070 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1071 }
1072
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001073 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001074 if (!input.IsValid())
1075 {
1076 return Fail("%s: Could not read input 0", __func__);
1077 }
1078
arovir01b0717b52018-09-05 17:03:25 +01001079 armnn::ReshapeDescriptor reshapeDescriptor;
1080 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1081 requestedShape.dimensions.data());
1082
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001083 bool isSupported = false;
1084 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1085 IsReshapeSupported,
1086 data.m_Backends,
1087 isSupported,
1088 input.GetTensorInfo(),
1089 reshapeDescriptor);
1090 if (!isSupported)
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001091 {
1092 return false;
1093 }
1094
arovir01b0717b52018-09-05 17:03:25 +01001095 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1096 assert(layer != nullptr);
1097 input.Connect(layer->GetInputSlot(0));
1098
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001099 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001100}
1101
1102bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1103{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001104 ALOGV("hal_1_0::HalPolicy::ConvertResizeBilinear()");
1105
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001106 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001107 if (!input.IsValid())
1108 {
1109 return Fail("%s: Could not read input 0", __func__);
1110 }
1111
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001112 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001113 if (!output)
1114 {
1115 return Fail("%s: Could not read output 0", __func__);
1116 }
1117
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001118 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +01001119 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1120
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001121 if (IsDynamicTensor(outputInfo))
1122 {
1123 return Fail("%s: Dynamic output tensors are not supported", __func__);
1124 }
1125
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001126 armnn::ResizeDescriptor desc;
1127 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001128 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001129
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001130 bool isSupported = false;
1131 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1132 IsResizeSupported,
1133 data.m_Backends,
1134 isSupported,
1135 inputInfo,
1136 outputInfo,
1137 desc);
1138 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001139 {
1140 return false;
1141 }
1142
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001143 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1144 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001145 {
1146 return Fail("%s: Operation has invalid inputs", __func__);
1147 }
1148
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001149 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001150
arovir01b0717b52018-09-05 17:03:25 +01001151 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001152
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001153 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1154 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001155
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001156 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001157
1158}
1159
1160} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001161} // namespace armnn_driver