blob: dee4a7a57f3cdf1152fbbb4ad03ec8150f80a456 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
arovir015602b192018-10-04 16:15:02 +01008#include "armnn/Optional.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010namespace armnn_driver
11{
12namespace hal_1_0
13{
14
15bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
16{
17 switch (operation.type)
18 {
19 case V1_0::OperationType::ADD:
20 return ConvertAdd(operation, model, data);
21 case V1_0::OperationType::AVERAGE_POOL_2D:
22 return ConvertAveragePool2d(operation, model, data);
23 case V1_0::OperationType::CONCATENATION:
24 return ConvertConcatenation(operation, model, data);
25 case V1_0::OperationType::CONV_2D:
26 return ConvertConv2d(operation, model, data);
27 case V1_0::OperationType::DEPTHWISE_CONV_2D:
28 return ConvertDepthwiseConv2d(operation, model, data);
29 case V1_0::OperationType::FLOOR:
30 return ConvertFloor(operation, model, data);
31 case V1_0::OperationType::FULLY_CONNECTED:
32 return ConvertFullyConnected(operation, model, data);
33 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
34 return ConvertLocalResponseNormalization(operation, model, data);
35 case V1_0::OperationType::LOGISTIC:
36 return ConvertLogistic(operation, model, data);
37 case V1_0::OperationType::LSTM:
38 return ConvertLstm(operation, model, data);
39 case V1_0::OperationType::L2_NORMALIZATION:
40 return ConvertL2Normalization(operation, model, data);
41 case V1_0::OperationType::L2_POOL_2D:
42 return ConvertL2Pool2d(operation, model, data);
43 case V1_0::OperationType::MAX_POOL_2D:
44 return ConvertMaxPool2d(operation, model, data);
45 case V1_0::OperationType::MUL:
46 return ConvertMul(operation, model, data);
47 case V1_0::OperationType::RELU:
48 return ConvertReLu(operation, model, data);
49 case V1_0::OperationType::RELU1:
50 return ConvertReLu1(operation, model, data);
51 case V1_0::OperationType::RELU6:
52 return ConvertReLu6(operation, model, data);
53 case V1_0::OperationType::SOFTMAX:
54 return ConvertSoftmax(operation, model, data);
55 case V1_0::OperationType::TANH:
56 return ConvertTanH(operation, model, data);
57 case V1_0::OperationType::RESHAPE:
58 return ConvertReshape(operation, model, data);
59 case V1_0::OperationType::RESIZE_BILINEAR:
60 return ConvertResizeBilinear(operation, model, data);
61 default:
62 return Fail("%s: Operation type %s not supported in ArmnnDriver",
63 __func__, toString(operation.type).c_str());
64 }
65}
66
67bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
68{
69 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
70 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
71
72 if (!input0.IsValid() || !input1.IsValid())
73 {
74 return Fail("%s: Operation has invalid inputs", __func__);
75 }
76
77 // The FuseActivation parameter is always the input index 2
78 // and it should be optional
79 ActivationFn activationFunction;
80 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
81 {
82 return Fail("%s: Operation has invalid inputs", __func__);
83 }
84
85 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
86 if (!outputOperand)
87 {
88 return false;
89 }
90
91 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
92
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010093 if (!IsLayerSupportedForAnyBackend(__func__,
94 armnn::IsAdditionSupported,
95 data.m_Backends,
96 input0.GetTensorInfo(),
97 input1.GetTensorInfo(),
98 outInfo))
arovir01b0717b52018-09-05 17:03:25 +010099 {
100 return false;
101 }
102
103 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
104 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
105
106 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
107 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
108
109 if (endLayer != nullptr)
110 {
111 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
112 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
113 }
114 else
115 {
116 return Fail("%s: ProcessActivation failed", __func__);
117 }
118}
119
120bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
121{
122 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
123}
124
125bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
126{
127 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
128 if (operation.inputs.size() <= 1)
129 {
130 return Fail("%s: Operation has insufficient arguments", __func__);
131 }
132
133 // Get inputs and outputs
134 const std::size_t numInputTensors = operation.inputs.size() - 1;
135
136 int32_t concatDim;
137 if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
138 {
139 return Fail("%s: Operation has invalid inputs", __func__);
140 }
141
142 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
143 if (!outputOperand)
144 {
145 return Fail("%s: Operation has no outputs", __func__);
146 }
147
148
149 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
150 armnn::TensorShape outputShape = outputInfo.GetShape();
151
152 //
153 // handle negative concat dims along the lines of tensorflow as described here:
154 // https://www.tensorflow.org/api_docs/python/tf/concat
155 // "negative axis refers to axis + rank(values)-th dimension"
156 //
157 if (concatDim < 0)
158 {
159 concatDim += outputShape.GetNumDimensions();
160 }
161
162 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
163 {
164 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
165 }
166
167 std::vector<LayerInputHandle> inputHandles;
168 std::vector<armnn::TensorShape> inputShapes;
169
170 inputHandles.reserve(numInputTensors);
171 inputShapes.reserve(numInputTensors);
172
173 bool inputsHaveBeenReshaped = false;
174 unsigned int tensorDimensionsAdded = 0;
175
176 for (uint32_t i = 0; i < numInputTensors; ++i)
177 {
178 const Operand* const operand = GetInputOperand(operation, i, model);
179 if (!operand)
180 {
181 return Fail("%s: Operation has invalid inputs", __func__);
182 }
183
184 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
185 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
186
187 if (operandShape.GetNumDimensions() == 0)
188 {
189 return Fail("%s: Operands with rank 0 are not supported", __func__);
190 }
191
192 if (RequiresReshape(operandShape))
193 {
194 inputsHaveBeenReshaped = true;
195
196 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
197
198 // Expand the tensor to three dimensions
199 if (operandShape.GetNumDimensions() == 2)
200 {
201 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
202 tensorDimensionsAdded = 1;
203 }
204 else
205 {
206 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
207 tensorDimensionsAdded = 2;
208 }
209
210 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
211 *data.m_Network,
212 operandInputHandle,
213 reshapeInfo
214 );
215
216 // Point to the reshape operation rather then the input operation
217 operandShape = reshapeInfo.GetShape();
218 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
219 }
220
221 inputShapes.emplace_back(operandShape);
222 inputHandles.emplace_back(operandInputHandle);
223
224 if (!inputHandles.back().IsValid())
225 {
226 return Fail("%s: Operation has invalid inputs", __func__);
227 }
228 }
229
230 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
231
232 if (inputsHaveBeenReshaped)
233 {
234 // Adjust the concatenation dimension by the amount of dimensions added (if any)
235 concatDim += tensorDimensionsAdded;
236
237 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
238 if (tensorDimensionsAdded == 1)
239 {
240 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
241 }
242 else if (tensorDimensionsAdded == 2)
243 {
narpra01f176d5a2018-11-18 20:17:48 +0000244 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100245 }
246 }
247
narpra01f176d5a2018-11-18 20:17:48 +0000248 // Check if permutations is required and get the pair of permutations required for the concatenation.
249 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100250 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
251 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
252
narpra01f176d5a2018-11-18 20:17:48 +0000253 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100254
narpra01f176d5a2018-11-18 20:17:48 +0000255 if (needPermute)
256 {
257 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
258 }
259
arovir01b0717b52018-09-05 17:03:25 +0100260 outputInfo.SetShape(outputShape);
261
262 // this is no-op for identity swizzles, otherwise it replaces both
263 // the handles and shapes with the swizzled layer output handles and shapes
264 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
265
266 // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
267 armnn::OriginsDescriptor mergerDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000268
arovir01b0717b52018-09-05 17:03:25 +0100269 try
270 {
narpra01f176d5a2018-11-18 20:17:48 +0000271 // The merger descriptor is always created across the only supported concat dimension
272 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100273 mergerDescriptor =
274 armnn::CreateMergerDescriptorForConcatenation(
275 inputShapes.begin(), inputShapes.end(), concatDim);
276 }
277 catch (const armnn::Exception& error)
278 {
279 return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
280 }
281
282 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000283 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100284 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
285 {
286 return Fail("%s: Error validating the output shape for concat", __func__);
287 }
288
289 std::vector<const armnn::TensorInfo*> inputTensorInfos;
290 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
291 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100292 if (!IsLayerSupportedForAnyBackend(__func__,
293 armnn::IsMergerSupported,
294 data.m_Backends,
295 inputTensorInfos,
296 outputInfo,
297 mergerDescriptor))
arovir01b0717b52018-09-05 17:03:25 +0100298 {
299 return false;
300 }
301
302 armnn::IConnectableLayer* layer = data.m_Network->AddMergerLayer(mergerDescriptor);
303 assert(layer != nullptr);
304 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
305
306 // Connect inputs to the layer
307 const int numInputSlots = layer->GetNumInputSlots();
308 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
309 for (int i = 0; i < numInputSlots; ++i)
310 {
311 // connect the input directly to the merge (concat) layer
312 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
313 }
314
narpra01f176d5a2018-11-18 20:17:48 +0000315 if (needPermute)
316 {
317 // Add permutation layer and connect the output to it, the permutation becomes the output layer
318 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
319 layer->GetOutputSlot(0),
320 permutationPair.second);
321 layer = &deswizzleLayer;
322 }
arovir01b0717b52018-09-05 17:03:25 +0100323
324 if (inputsHaveBeenReshaped)
325 {
326 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
327
328 // Undo the reshape knowing the amount of dimensions added
329 if (tensorDimensionsAdded == 1)
330 {
331 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
332 afterConcatInfo.GetShape()[2] }));
333 }
334 else if (tensorDimensionsAdded == 2)
335 {
narpra01f176d5a2018-11-18 20:17:48 +0000336 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100337 }
338
339 layer = &AddReshapeLayer(
340 *data.m_Network,
341 layer->GetOutputSlot(0),
342 afterConcatInfo
343 );
344 }
345
346 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
347}
348
349bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
350{
351 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
352 if (!input.IsValid())
353 {
354 return Fail("%s: Operation has invalid inputs", __func__);
355 }
356
357 const Operand* output = GetOutputOperand(operation, 0, model);
358 if (!output)
359 {
360 return Fail("%s: Could not read output 0", __func__);
361 }
362
363 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
364 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
365
arovir01b0717b52018-09-05 17:03:25 +0100366 // ArmNN does not currently support non-fixed weights or bias
narpra01fb60a562018-10-30 15:46:01 +0000367 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100368 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
369
370 if (!weightsPin.IsValid() || !biasPin.IsValid())
371 {
372 return Fail("%s: Operation has invalid inputs", __func__);
373 }
374
375 armnn::ConstTensor weights = weightsPin.GetConstTensor();
376 armnn::ConstTensor bias = biasPin.GetConstTensor();
narpra01fb60a562018-10-30 15:46:01 +0000377 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
arovir01b0717b52018-09-05 17:03:25 +0100378
379 armnn::Convolution2dDescriptor desc;
narpra01fb60a562018-10-30 15:46:01 +0000380 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100381 ActivationFn activation;
382
383 if (operation.inputs.size() == 10)
384 {
385 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
386 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
387 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
388 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
389 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
390 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
391 !GetInputActivationFunction(operation, 9, activation, model, data))
392 {
393 return Fail("%s: Operation has invalid inputs", __func__);
394 }
395 }
396 else if (operation.inputs.size() == 7)
397 {
398 android::nn::PaddingScheme paddingScheme;
399 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
400 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
401 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
402 !GetInputActivationFunction(operation, 6, activation, model, data))
403 {
404 return Fail("%s: Operation has invalid inputs", __func__);
405 }
406
narpra01fb60a562018-10-30 15:46:01 +0000407 const uint32_t kernelX = weights.GetShape()[2];
408 const uint32_t kernelY = weights.GetShape()[1];
409 const uint32_t inputX = inputInfo.GetShape()[2];
410 const uint32_t inputY = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100411
412 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
413 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
414 }
415 else
416 {
417 return Fail("%s: Unsupported number of operation inputs", __func__);
418 }
419
420 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100421 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100422
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100423 if (!IsLayerSupportedForAnyBackend(__func__,
424 armnn::IsConvolution2dSupported,
425 data.m_Backends,
426 inputInfo,
427 outputInfo,
428 desc,
429 weights.GetInfo(),
430 biases))
arovir01b0717b52018-09-05 17:03:25 +0100431 {
432 return false;
433 }
434
435 armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc, weights, bias);
arovir01b0717b52018-09-05 17:03:25 +0100436
narpra01fb60a562018-10-30 15:46:01 +0000437 if (!startLayer)
arovir01b0717b52018-09-05 17:03:25 +0100438 {
narpra01fb60a562018-10-30 15:46:01 +0000439 return Fail("%s: AddConvolution2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100440 }
narpra01fb60a562018-10-30 15:46:01 +0000441
442 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
443
444 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100445 {
446 return Fail("%s: ProcessActivation failed", __func__);
447 }
narpra01fb60a562018-10-30 15:46:01 +0000448
449 input.Connect(startLayer->GetInputSlot(0));
450
451 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100452}
453
454bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
455{
456 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
457 if (!input.IsValid())
458 {
459 return Fail("%s: Operation has invalid inputs", __func__);
460 }
461
462 const Operand* output = GetOutputOperand(operation, 0, model);
463 if (!output)
464 {
465 return Fail("%s: Could not read output 0", __func__);
466 }
467
468 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
469 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
470
arovir01b0717b52018-09-05 17:03:25 +0100471 // ArmNN does not currently support non-fixed weights or bias
472
473 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
arovir01b0717b52018-09-05 17:03:25 +0100474 const Operand* weightsOperand = GetInputOperand(operation, 1, model);
475
476 if (weightsOperand == nullptr)
477 {
478 return Fail("%s: Operand is invalid", __func__);
479 }
480
481 // Reinterpret weight data as [ H, W, I, M ]
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000482 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
483 weightsOperand->dimensions[2],
arovir01b0717b52018-09-05 17:03:25 +0100484 inputInfo.GetShape()[3],
485 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
486
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000487 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
488 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
James Conroy6bf1cf02018-10-12 14:13:18 +0100489
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000490 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
491 HWIMToMIHW, &weightsShape);
arovir01b0717b52018-09-05 17:03:25 +0100492
493 // Bias is a 1D tensor
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000494 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100495
496 if (!weightsPin.IsValid() || !biasPin.IsValid())
497 {
498 return Fail("%s: Operation has invalid inputs", __func__);
499 }
500
501 armnn::ConstTensor weights = weightsPin.GetConstTensor();
502 armnn::ConstTensor bias = biasPin.GetConstTensor();
James Conroy6bf1cf02018-10-12 14:13:18 +0100503 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
arovir01b0717b52018-09-05 17:03:25 +0100504
505 armnn::DepthwiseConvolution2dDescriptor desc;
James Conroy6bf1cf02018-10-12 14:13:18 +0100506 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100507 ActivationFn activation;
508
509 if (operation.inputs.size() == 11)
510 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100511 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
512 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
513 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
514 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
515 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
516 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100517 !GetInputActivationFunction(operation, 10, activation, model, data))
518 {
519 return Fail("%s: Operation has invalid inputs", __func__);
520 }
521 }
522 else if (operation.inputs.size() == 8)
523 {
524 android::nn::PaddingScheme paddingScheme;
James Conroy6bf1cf02018-10-12 14:13:18 +0100525 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
526 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
527 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100528 !GetInputActivationFunction(operation, 7, activation, model, data))
529 {
530 return Fail("%s: Operation has invalid inputs", __func__);
531 }
532
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000533 const uint32_t kernelX = weights.GetShape()[3];
534 const uint32_t kernelY = weights.GetShape()[2];
James Conroy6bf1cf02018-10-12 14:13:18 +0100535 const uint32_t inputX = inputInfo.GetShape()[2];
536 const uint32_t inputY = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100537
538 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
539 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
540 }
541 else
542 {
543 return Fail("%s: Unsupported number of operation inputs", __func__);
544 }
545
546 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100547 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100548
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100549 if (!IsLayerSupportedForAnyBackend(__func__,
550 armnn::IsDepthwiseConvolutionSupported,
551 data.m_Backends,
552 inputInfo,
553 outputInfo,
554 desc,
555 weights.GetInfo(),
556 biases))
arovir01b0717b52018-09-05 17:03:25 +0100557 {
558 return false;
559 }
560
561 armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, bias);
James Conroy6bf1cf02018-10-12 14:13:18 +0100562 if (!startLayer)
arovir01b0717b52018-09-05 17:03:25 +0100563 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100564 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100565 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100566
567 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
568 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100569 {
570 return Fail("%s: ProcessActivation failed", __func__);
571 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100572
573 input.Connect(startLayer->GetInputSlot(0));
574
575 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100576}
577
578bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
579{
580 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
581 if (!input.IsValid())
582 {
583 return Fail("%s: Operation has invalid inputs", __func__);
584 }
585
586 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
587 if (!outputOperand)
588 {
589 return Fail("%s: Operation has invalid outputs", __func__);
590 }
591
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100592 if (!IsLayerSupportedForAnyBackend(__func__,
593 armnn::IsFloorSupported,
594 data.m_Backends,
595 input.GetTensorInfo(),
596 GetTensorInfoForOperand(*outputOperand)))
arovir01b0717b52018-09-05 17:03:25 +0100597 {
598 return false;
599 }
600
601 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
602 assert(layer != nullptr);
603 input.Connect(layer->GetInputSlot(0));
604
605 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
606}
607
608bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
609{
610 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
611 if (!input.IsValid())
612 {
613 return Fail("%s: Operation has invalid inputs", __func__);
614 }
615
616 const Operand* output = GetOutputOperand(operation, 0, model);
617 if (!output)
618 {
619 return Fail("%s: Could not read output 0", __func__);
620 }
621
622 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
623 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
624
625 // ArmNN does not currently support non-fixed weights or bias
626 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D
627 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D
628
629 if (!weightsPin.IsValid() || !biasPin.IsValid())
630 {
631 return Fail("%s: Operation has invalid inputs", __func__);
632 }
633
634 armnn::ConstTensor weights = weightsPin.GetConstTensor();
635 armnn::ConstTensor bias = biasPin.GetConstTensor();
636
637 armnn::TensorInfo reshapedInfo = inputInfo;
638 if (inputInfo.GetNumDimensions() > 2U)
639 {
640 unsigned int dim0 = inputInfo.GetShape()[0];
641 unsigned int dim1 = inputInfo.GetShape()[1];
642
643 for (unsigned int i = 2U; i < inputInfo.GetNumDimensions(); ++i)
644 {
645 dim1 *= inputInfo.GetShape()[i];
646 }
647
648 unsigned int divisor = weights.GetInfo().GetShape()[1] / dim1;
649 if(dim0 % divisor != 0)
650 {
651 return Fail("%s: Failed to deduce tensor shape", __func__);
652 }
653
654 reshapedInfo.SetShape(armnn::TensorShape({dim0 / divisor, dim1 * divisor}));
655 }
656
657 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
658 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
659
660 ActivationFn activationFunction;
661 if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
662 {
663 return Fail("%s: Operation has invalid inputs", __func__);
664 }
665
666 armnn::FullyConnectedDescriptor desc;
667 desc.m_TransposeWeightMatrix = true;
668 desc.m_BiasEnabled = true;
669
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100670 if (!IsLayerSupportedForAnyBackend(__func__,
671 armnn::IsFullyConnectedSupported,
672 data.m_Backends,
673 reshapedInfo,
674 outputInfo,
675 weights.GetInfo(),
676 bias.GetInfo(),
677 desc))
arovir01b0717b52018-09-05 17:03:25 +0100678 {
679 return false;
680 }
681
682 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc, weights, bias);
683 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
684
685 if (endLayer != nullptr)
686 {
687 if (inputInfo.GetNumDimensions() > 2U)
688 {
689 armnn::ReshapeDescriptor reshapeDescriptor;
690 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
691
692 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
693 assert(reshapeLayer != nullptr);
694 input.Connect(reshapeLayer->GetInputSlot(0));
695 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
696 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
697 }
698 else
699 {
700 input.Connect(startLayer->GetInputSlot(0));
701 }
702
703 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
704 }
705 else
706 {
707 return Fail("%s: ProcessActivation failed", __func__);
708 }
709}
710
711bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
712 const Model& model,
713 ConversionData& data)
714{
715 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
716 if (!input.IsValid())
717 {
718 return Fail("%s: Operation has invalid inputs", __func__);
719 }
720
721 const Operand* output = GetOutputOperand(operation, 0, model);
722 if (!output)
723 {
724 return Fail("%s: Could not read output 0", __func__);
725 }
726
narpra012fb804a2018-10-22 14:52:32 +0100727 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100728 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
729
arovir01b0717b52018-09-05 17:03:25 +0100730 armnn::NormalizationDescriptor descriptor;
731
narpra012fb804a2018-10-22 14:52:32 +0100732 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100733 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100734 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100735
736 if (!input.IsValid() ||
737 !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
738 !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
739 !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
740 !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
741 {
742 return Fail("%s: Operation has invalid inputs", __func__);
743 }
744
745 // ArmNN expects normSize to be the full size of the normalization
746 // window rather than the radius as in AndroidNN.
747 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
748
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100749 if (!IsLayerSupportedForAnyBackend(__func__,
750 armnn::IsNormalizationSupported,
751 data.m_Backends,
752 inputInfo,
753 outputInfo,
754 descriptor))
arovir01b0717b52018-09-05 17:03:25 +0100755 {
756 return false;
757 }
758
759
760 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
761 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100762 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100763
narpra012fb804a2018-10-22 14:52:32 +0100764 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100765}
766
767bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
768{
769 armnn::ActivationDescriptor desc;
770 desc.m_Function = armnn::ActivationFunction::Sigmoid;
771
772 return ConvertToActivation(operation, __func__, desc, model, data);
773}
774
775bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
776{
777 // Inputs:
778 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
779 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
780 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
781 if (!input.IsValid())
782 {
783 return Fail("%s: Could not read input 0: input", __func__);
784 }
785 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
786 LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
787 if (!outputStateIn.IsValid())
788 {
789 return Fail("%s: Could not read input 18: outputStateIn", __func__);
790 }
791 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
792 LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
793 if (!cellStateIn.IsValid())
794 {
795 return Fail("%s: Could not read input 19: cellStateIn", __func__);
796 }
797
798 // Get the mandatory input tensors:
799 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
800 // [num_units, input_size].
801 const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
802 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
803 const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3, model, data);
804 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
805 // [num_units, input_size].
806 const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4, model, data);
807 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
808 // [num_units, output_size].
809 const ConstTensorPin recurrentToForgetWeightsPin =
810 ConvertOperationInputToConstTensorPin(operation, 6, model, data);
811 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
812 // [num_units, output_size].
813 const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7, model, data);
814 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
815 // [num_units, output_size].
816 const ConstTensorPin recurrentToOutputWeightsPin =
817 ConvertOperationInputToConstTensorPin(operation, 8, model, data);
818 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
819 const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13, model, data);
820 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
821 const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14, model, data);
822 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
823 const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15, model, data);
824
825 if (!inputToForgetWeightsPin.IsValid() ||
826 !inputToCellWeightsPin.IsValid() ||
827 !inputToOutputWeightsPin.IsValid() ||
828 !recurrentToForgetWeightsPin.IsValid() ||
829 !recurrentToCellWeightsPin.IsValid() ||
830 !recurrentToOutputWeightsPin.IsValid() ||
831 !forgetGateBiasPin.IsValid() ||
832 !cellBiasPin.IsValid() ||
833 !outputGateBiasPin.IsValid())
834 {
835 return Fail("%s: Operation has invalid tensor inputs", __func__);
836 }
837
838 // Get the optional input tensors:
839 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
840 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
David Monahanecd7ca62019-02-22 14:29:51 +0000841 const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
842 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100843 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
844 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
845 // “num_units”), or the second dimension of the “projection_weights”, if defined.
David Monahanecd7ca62019-02-22 14:29:51 +0000846 const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5, model, data,
847 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100848 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000849 const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9, model, data,
850 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100851 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000852 const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10, model, data,
853 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100854 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000855 const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11, model, data,
856 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100857 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000858 const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12, model, data,
859 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100860 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
861 // [output_size, num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000862 const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16, model, data,
863 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100864 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
David Monahanecd7ca62019-02-22 14:29:51 +0000865 const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17, model, data,
866 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100867
868 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
869 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
870 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
871 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
872 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
873 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
874 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
875 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
876 {
877 return Fail("%s: Operation has invalid tensor inputs", __func__);
878 }
879
880 // Get the mandatory input scalars (actually 1-D tensors of size 1):
881 // 20: The activation function: A value indicating the activation function:
882 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
883 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
884 // If set to 0.0 then clipping is disabled.
885 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
886 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
887 ActivationFn activation;
888 float cellClip;
889 float projClip;
890 if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
891 !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
892 !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
893 {
894 return Fail("%s: Operation has invalid scalar inputs", __func__);
895 }
896
897 // Outputs:
898 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
899 // CIFG, or [batch_size, num_units * 3] without CIFG.
900 const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
901 if (!scratchBuffer)
902 {
903 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
904 }
905 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
906 const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
907 if (!outputStateOut)
908 {
909 return Fail("%s: Could not read output 1: outputStateOut", __func__);
910 }
911 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
912 const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
913 if (!cellStateOut)
914 {
915 return Fail("%s: Could not read output 2: cellStateOut", __func__);
916 }
917 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
918 // effectively the same as the current “output state (out)” value.
919 const Operand* output = GetOutputOperand(operation, 3, model);
920 if (!output)
921 {
922 return Fail("%s: Could not read output 3: output", __func__);
923 }
924
925 // set the params structure for the AddLstmLayer call
926 armnn::LstmInputParams params;
927 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
928 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
929 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
930 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
931 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
932 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
933 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
934 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
935 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
936 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
937 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
938 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
939 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
940 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
941 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
942 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
943 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
944
945 // set the layer descriptor
946 armnn::LstmDescriptor desc;
947 desc.m_ActivationFunc = activation;
948 desc.m_ClippingThresCell = cellClip;
949 desc.m_ClippingThresProj = projClip;
950 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
951 params.m_RecurrentToInputWeights == nullptr ||
952 params.m_InputGateBias == nullptr);
953 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
954 params.m_CellToOutputWeights != nullptr);
955 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
956
957 // validate the optional input groups
958 if (desc.m_CifgEnabled &&
959 (params.m_InputToInputWeights != nullptr ||
960 params.m_RecurrentToInputWeights != nullptr ||
961 params.m_InputGateBias != nullptr))
962 {
963 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
964 " and input gate bias must be provided", __func__);
965 }
966
967 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
968 {
969 return Fail("%s: projection bias should not be provided without projection weights", __func__);
970 }
971
972 if (desc.m_PeepholeEnabled &&
973 (params.m_CellToForgetWeights == nullptr ||
974 params.m_CellToOutputWeights == nullptr ||
975 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
976 {
977 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
978 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
979 }
980
981 // Check if the layer is supported
982 // Inputs
983 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
984 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
985 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
986
987 // Outputs
988 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
989 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
990 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
991 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
992
993 // Basic parameters
994 const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
995 const armnn::TensorInfo& inputToCellWeights = params.m_InputToCellWeights->GetInfo();
996 const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
997 const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
998 const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
999 const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
1000 const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
1001 const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
1002 const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
1003
1004 //Optional parameters
1005 const armnn::TensorInfo* inputToInputWeights = nullptr;
1006 const armnn::TensorInfo* recurrentToInputWeights = nullptr;
1007 const armnn::TensorInfo* cellToInputWeights = nullptr;
1008 const armnn::TensorInfo* inputGateBias = nullptr;
1009 const armnn::TensorInfo* projectionWeights = nullptr;
1010 const armnn::TensorInfo* projectionBias = nullptr;
1011 const armnn::TensorInfo* cellToForgetWeights = nullptr;
1012 const armnn::TensorInfo* cellToOutputWeights = nullptr;
1013
1014 if(!desc.m_CifgEnabled)
1015 {
1016 inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1017 recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1018 if (params.m_CellToInputWeights != nullptr)
1019 {
1020 cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1021 }
1022 inputGateBias = &(params.m_InputGateBias->GetInfo());
1023 }
1024
1025 if(desc.m_ProjectionEnabled)
1026 {
1027 projectionWeights = &(params.m_ProjectionWeights->GetInfo());
1028 if (params.m_ProjectionBias != nullptr)
1029 {
1030 projectionBias = &(params.m_ProjectionBias->GetInfo());
1031 }
1032 }
1033
1034 if(desc.m_PeepholeEnabled)
1035 {
1036 cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1037 cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1038 }
1039
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001040 if (!IsLayerSupportedForAnyBackend(__func__,
1041 armnn::IsLstmSupported,
1042 data.m_Backends,
1043 inputInfo,
1044 outputStateInInfo,
1045 cellStateInInfo,
1046 scratchBufferInfo,
1047 outputStateOutInfo,
1048 cellStateOutInfo,
1049 outputInfo,
1050 desc,
1051 inputToForgetWeights,
1052 inputToCellWeights,
1053 inputToOutputWeights,
1054 recurrentToForgetWeights,
1055 recurrentToCellWeights,
1056 recurrentToOutputWeights,
1057 forgetGateBias,
1058 cellBias,
1059 outputGateBias,
1060 inputToInputWeights,
1061 recurrentToInputWeights,
1062 cellToInputWeights,
1063 inputGateBias,
1064 projectionWeights,
1065 projectionBias,
1066 cellToForgetWeights,
1067 cellToOutputWeights))
arovir01b0717b52018-09-05 17:03:25 +01001068 {
1069 return false;
1070 }
1071
1072 // Add the layer
1073 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1074
1075 input.Connect(layer->GetInputSlot(0));
1076 outputStateIn.Connect(layer->GetInputSlot(1));
1077 cellStateIn.Connect(layer->GetInputSlot(2));
1078
1079 return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
1080 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
1081 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
1082 SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
1083}
1084
1085bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
1086{
1087 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1088 if (!input.IsValid())
1089 {
1090 return Fail("%s: Operation has invalid inputs", __func__);
1091 }
1092
1093 const Operand* output = GetOutputOperand(operation, 0, model);
1094 if (!output)
1095 {
1096 return Fail("%s: Could not read output 0", __func__);
1097 }
1098
1099 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1100 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1101
Matteo Martincigh58f71092018-09-25 15:58:52 +01001102 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001103 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001104
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001105 if (!IsLayerSupportedForAnyBackend(__func__,
1106 armnn::IsL2NormalizationSupported,
1107 data.m_Backends,
1108 inputInfo,
1109 outputInfo,
1110 desc))
arovir01b0717b52018-09-05 17:03:25 +01001111 {
1112 return false;
1113 }
1114
Matteo Martincigh58f71092018-09-25 15:58:52 +01001115 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001116 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001117 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001118
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001119 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001120}
1121
1122bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1123{
1124 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
1125}
1126
1127bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1128{
1129 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
1130}
1131
1132bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1133{
1134 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1135 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1136
1137 if (!input0.IsValid() || !input1.IsValid())
1138 {
1139 return Fail("%s: Operation has invalid inputs", __func__);
1140 }
1141
1142 // The FuseActivation parameter is always the input index 2
1143 // and it should be optional
1144 ActivationFn activationFunction;
1145 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1146 {
1147 return Fail("%s: Operation has invalid inputs", __func__);
1148 }
1149
1150 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1151
1152 if (outputOperand == nullptr)
1153 {
1154 return false;
1155 }
1156
1157 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1158
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001159 if (!IsLayerSupportedForAnyBackend(__func__,
1160 armnn::IsMultiplicationSupported,
1161 data.m_Backends,
1162 input0.GetTensorInfo(),
1163 input1.GetTensorInfo(),
1164 outInfo))
arovir01b0717b52018-09-05 17:03:25 +01001165 {
1166 return false;
1167 }
1168
1169 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1170 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1171
1172 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1173 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1174
1175 if (endLayer != nullptr)
1176 {
1177 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
1178 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
1179 }
1180 else
1181 {
1182 return Fail("%s: ProcessActivation failed", __func__);
1183 }
1184}
1185
1186bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1187{
1188 armnn::ActivationDescriptor desc;
1189 desc.m_Function = armnn::ActivationFunction::ReLu;
1190
1191 return ConvertToActivation(operation, __func__, desc, model, data);
1192}
1193
1194bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1195{
1196 armnn::ActivationDescriptor desc;
1197 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1198 desc.m_A = 1.0f;
1199 desc.m_B = -1.0f;
1200
1201 return ConvertToActivation(operation, __func__, desc, model, data);
1202}
1203
1204bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1205{
1206 armnn::ActivationDescriptor desc;
1207 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1208 desc.m_A = 6.0f;
1209
1210 return ConvertToActivation(operation, __func__, desc, model, data);
1211}
1212
1213bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1214{
1215 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1216 if (!input.IsValid())
1217 {
1218 return Fail("%s: Operation has invalid inputs", __func__);
1219 }
1220
1221 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1222 if (!outputOperand)
1223 {
1224 return Fail("%s: Operation has no outputs", __func__);
1225 }
1226
1227 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1228
1229 armnn::SoftmaxDescriptor desc;
1230 if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
1231 {
1232 return Fail("%s: Operation has invalid inputs", __func__);
1233 }
1234
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001235 if (!IsLayerSupportedForAnyBackend(__func__,
1236 armnn::IsSoftmaxSupported,
1237 data.m_Backends,
1238 input.GetTensorInfo(),
1239 outInfo,
1240 desc))
arovir01b0717b52018-09-05 17:03:25 +01001241 {
1242 return false;
1243 }
1244
1245 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1246 assert(layer != nullptr);
1247 input.Connect(layer->GetInputSlot(0));
1248
1249 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1250}
1251
1252bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1253{
1254 armnn::ActivationDescriptor desc;
1255 desc.m_Function = armnn::ActivationFunction::TanH;
1256 desc.m_A = 1.0f; // android nn does not support tanH parameters
1257 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1258
1259 return ConvertToActivation(operation, __func__, desc, model, data);
1260}
1261
1262bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1263{
1264 const Operand* inputOperand = GetInputOperand(operation, 0, model);
1265 const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
1266 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1267
1268 if (inputOperand == nullptr
1269 || requestedShapeOperand == nullptr
1270 || outputOperand == nullptr)
1271 {
1272 return Fail("%s: Operation has invalid inputs", __func__);
1273 }
1274
1275
1276 if (requestedShapeOperand->dimensions.size() != 1)
1277 {
1278 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1279 __func__, requestedShapeOperand->dimensions.size());
1280 }
1281
1282 std::vector<int32_t> targetDimensions;
1283 if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
1284 {
1285 return Fail("%s: Could not read values of input 1", __func__);
1286 }
1287
1288 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1289
1290 Shape requestedShape;
1291 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1292 // function that resolves these values into a fully specified tensor shape.
1293 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1294 {
1295 return Fail("%s: Failed to resolve the requested shape", __func__);
1296 }
1297
1298 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1299 if (!SameShape(requestedShape, outputOperandShape))
1300 {
1301 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1302 }
1303
1304 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1305 if (!input.IsValid())
1306 {
1307 return Fail("%s: Could not read input 0", __func__);
1308 }
1309
arovir01b0717b52018-09-05 17:03:25 +01001310 armnn::ReshapeDescriptor reshapeDescriptor;
1311 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1312 requestedShape.dimensions.data());
1313
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001314 if (!IsLayerSupportedForAnyBackend(__func__,
1315 armnn::IsReshapeSupported,
1316 data.m_Backends,
1317 input.GetTensorInfo(),
1318 reshapeDescriptor))
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001319 {
1320 return false;
1321 }
1322
arovir01b0717b52018-09-05 17:03:25 +01001323 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1324 assert(layer != nullptr);
1325 input.Connect(layer->GetInputSlot(0));
1326
1327 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1328}
1329
1330bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1331{
1332 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1333 if (!input.IsValid())
1334 {
1335 return Fail("%s: Could not read input 0", __func__);
1336 }
1337
1338 const Operand* output = GetOutputOperand(operation, 0, model);
1339 if (!output)
1340 {
1341 return Fail("%s: Could not read output 0", __func__);
1342 }
1343
1344 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1345 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1346
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001347 armnn::ResizeBilinearDescriptor desc;
1348 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001349
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001350 if (!IsLayerSupportedForAnyBackend(__func__,
1351 armnn::IsResizeBilinearSupported,
1352 data.m_Backends,
1353 inputInfo,
1354 outputInfo))
arovir01b0717b52018-09-05 17:03:25 +01001355 {
1356 return false;
1357 }
1358
arovir01b0717b52018-09-05 17:03:25 +01001359
1360 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
1361 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
1362 {
1363 return Fail("%s: Operation has invalid inputs", __func__);
1364 }
1365
1366 armnn::IConnectableLayer* layer = data.m_Network->AddResizeBilinearLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001367
arovir01b0717b52018-09-05 17:03:25 +01001368 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001369
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001370 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1371 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001372
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001373 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001374
1375}
1376
1377} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001378} // namespace armnn_driver