blob: 17d3d3e1c88620ba7a0db7e320ca232d2cf3ca25 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
arovir015602b192018-10-04 16:15:02 +010011
arovir01b0717b52018-09-05 17:03:25 +010012namespace armnn_driver
13{
14namespace hal_1_0
15{
16
17bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
18{
19 switch (operation.type)
20 {
21 case V1_0::OperationType::ADD:
22 return ConvertAdd(operation, model, data);
23 case V1_0::OperationType::AVERAGE_POOL_2D:
24 return ConvertAveragePool2d(operation, model, data);
25 case V1_0::OperationType::CONCATENATION:
26 return ConvertConcatenation(operation, model, data);
27 case V1_0::OperationType::CONV_2D:
28 return ConvertConv2d(operation, model, data);
29 case V1_0::OperationType::DEPTHWISE_CONV_2D:
30 return ConvertDepthwiseConv2d(operation, model, data);
31 case V1_0::OperationType::FLOOR:
32 return ConvertFloor(operation, model, data);
33 case V1_0::OperationType::FULLY_CONNECTED:
34 return ConvertFullyConnected(operation, model, data);
35 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
36 return ConvertLocalResponseNormalization(operation, model, data);
37 case V1_0::OperationType::LOGISTIC:
38 return ConvertLogistic(operation, model, data);
39 case V1_0::OperationType::LSTM:
40 return ConvertLstm(operation, model, data);
41 case V1_0::OperationType::L2_NORMALIZATION:
42 return ConvertL2Normalization(operation, model, data);
43 case V1_0::OperationType::L2_POOL_2D:
44 return ConvertL2Pool2d(operation, model, data);
45 case V1_0::OperationType::MAX_POOL_2D:
46 return ConvertMaxPool2d(operation, model, data);
47 case V1_0::OperationType::MUL:
48 return ConvertMul(operation, model, data);
49 case V1_0::OperationType::RELU:
50 return ConvertReLu(operation, model, data);
51 case V1_0::OperationType::RELU1:
52 return ConvertReLu1(operation, model, data);
53 case V1_0::OperationType::RELU6:
54 return ConvertReLu6(operation, model, data);
55 case V1_0::OperationType::SOFTMAX:
56 return ConvertSoftmax(operation, model, data);
57 case V1_0::OperationType::TANH:
58 return ConvertTanH(operation, model, data);
59 case V1_0::OperationType::RESHAPE:
60 return ConvertReshape(operation, model, data);
61 case V1_0::OperationType::RESIZE_BILINEAR:
62 return ConvertResizeBilinear(operation, model, data);
63 default:
64 return Fail("%s: Operation type %s not supported in ArmnnDriver",
65 __func__, toString(operation.type).c_str());
66 }
67}
68
69bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
70{
71 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
72 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
73
74 if (!input0.IsValid() || !input1.IsValid())
75 {
76 return Fail("%s: Operation has invalid inputs", __func__);
77 }
78
79 // The FuseActivation parameter is always the input index 2
80 // and it should be optional
81 ActivationFn activationFunction;
82 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
83 {
84 return Fail("%s: Operation has invalid inputs", __func__);
85 }
86
87 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
88 if (!outputOperand)
89 {
90 return false;
91 }
92
93 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
94
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010095 if (!IsLayerSupportedForAnyBackend(__func__,
96 armnn::IsAdditionSupported,
97 data.m_Backends,
98 input0.GetTensorInfo(),
99 input1.GetTensorInfo(),
100 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100101 {
102 return false;
103 }
104
105 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
106 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
107
108 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
109 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
110
111 if (endLayer != nullptr)
112 {
113 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
114 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
115 }
116 else
117 {
118 return Fail("%s: ProcessActivation failed", __func__);
119 }
120}
121
122bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
123{
124 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
125}
126
127bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
128{
129 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
130 if (operation.inputs.size() <= 1)
131 {
132 return Fail("%s: Operation has insufficient arguments", __func__);
133 }
134
135 // Get inputs and outputs
136 const std::size_t numInputTensors = operation.inputs.size() - 1;
137
138 int32_t concatDim;
139 if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
140 {
141 return Fail("%s: Operation has invalid inputs", __func__);
142 }
143
144 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
145 if (!outputOperand)
146 {
147 return Fail("%s: Operation has no outputs", __func__);
148 }
149
150
151 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
152 armnn::TensorShape outputShape = outputInfo.GetShape();
153
154 //
155 // handle negative concat dims along the lines of tensorflow as described here:
156 // https://www.tensorflow.org/api_docs/python/tf/concat
157 // "negative axis refers to axis + rank(values)-th dimension"
158 //
159 if (concatDim < 0)
160 {
161 concatDim += outputShape.GetNumDimensions();
162 }
163
164 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
165 {
166 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
167 }
168
169 std::vector<LayerInputHandle> inputHandles;
170 std::vector<armnn::TensorShape> inputShapes;
171
172 inputHandles.reserve(numInputTensors);
173 inputShapes.reserve(numInputTensors);
174
175 bool inputsHaveBeenReshaped = false;
176 unsigned int tensorDimensionsAdded = 0;
177
178 for (uint32_t i = 0; i < numInputTensors; ++i)
179 {
180 const Operand* const operand = GetInputOperand(operation, i, model);
181 if (!operand)
182 {
183 return Fail("%s: Operation has invalid inputs", __func__);
184 }
185
186 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
187 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
188
189 if (operandShape.GetNumDimensions() == 0)
190 {
191 return Fail("%s: Operands with rank 0 are not supported", __func__);
192 }
193
194 if (RequiresReshape(operandShape))
195 {
196 inputsHaveBeenReshaped = true;
197
198 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
199
200 // Expand the tensor to three dimensions
201 if (operandShape.GetNumDimensions() == 2)
202 {
203 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
204 tensorDimensionsAdded = 1;
205 }
206 else
207 {
208 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
209 tensorDimensionsAdded = 2;
210 }
211
212 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
213 *data.m_Network,
214 operandInputHandle,
215 reshapeInfo
216 );
217
218 // Point to the reshape operation rather then the input operation
219 operandShape = reshapeInfo.GetShape();
220 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
221 }
222
223 inputShapes.emplace_back(operandShape);
224 inputHandles.emplace_back(operandInputHandle);
225
226 if (!inputHandles.back().IsValid())
227 {
228 return Fail("%s: Operation has invalid inputs", __func__);
229 }
230 }
231
232 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
233
234 if (inputsHaveBeenReshaped)
235 {
236 // Adjust the concatenation dimension by the amount of dimensions added (if any)
237 concatDim += tensorDimensionsAdded;
238
239 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
240 if (tensorDimensionsAdded == 1)
241 {
242 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
243 }
244 else if (tensorDimensionsAdded == 2)
245 {
narpra01f176d5a2018-11-18 20:17:48 +0000246 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100247 }
248 }
249
narpra01f176d5a2018-11-18 20:17:48 +0000250 // Check if permutations is required and get the pair of permutations required for the concatenation.
251 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100252 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
253 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
254
narpra01f176d5a2018-11-18 20:17:48 +0000255 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100256
narpra01f176d5a2018-11-18 20:17:48 +0000257 if (needPermute)
258 {
259 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
260 }
261
arovir01b0717b52018-09-05 17:03:25 +0100262 outputInfo.SetShape(outputShape);
263
264 // this is no-op for identity swizzles, otherwise it replaces both
265 // the handles and shapes with the swizzled layer output handles and shapes
266 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
267
268 // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
269 armnn::OriginsDescriptor mergerDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000270
arovir01b0717b52018-09-05 17:03:25 +0100271 try
272 {
narpra01f176d5a2018-11-18 20:17:48 +0000273 // The merger descriptor is always created across the only supported concat dimension
274 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100275 mergerDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100276 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100277 }
278 catch (const armnn::Exception& error)
279 {
280 return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
281 }
282
283 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000284 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100285 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
286 {
287 return Fail("%s: Error validating the output shape for concat", __func__);
288 }
289
290 std::vector<const armnn::TensorInfo*> inputTensorInfos;
291 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
292 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100293 if (!IsLayerSupportedForAnyBackend(__func__,
Jim Flynn073d7a32019-05-13 13:52:56 +0100294 armnn::IsConcatSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100295 data.m_Backends,
296 inputTensorInfos,
297 outputInfo,
298 mergerDescriptor))
arovir01b0717b52018-09-05 17:03:25 +0100299 {
300 return false;
301 }
302
Jim Flynn073d7a32019-05-13 13:52:56 +0100303 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(mergerDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100304 assert(layer != nullptr);
305 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
306
307 // Connect inputs to the layer
308 const int numInputSlots = layer->GetNumInputSlots();
309 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
310 for (int i = 0; i < numInputSlots; ++i)
311 {
312 // connect the input directly to the merge (concat) layer
313 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
314 }
315
narpra01f176d5a2018-11-18 20:17:48 +0000316 if (needPermute)
317 {
318 // Add permutation layer and connect the output to it, the permutation becomes the output layer
319 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
320 layer->GetOutputSlot(0),
321 permutationPair.second);
322 layer = &deswizzleLayer;
323 }
arovir01b0717b52018-09-05 17:03:25 +0100324
325 if (inputsHaveBeenReshaped)
326 {
327 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
328
329 // Undo the reshape knowing the amount of dimensions added
330 if (tensorDimensionsAdded == 1)
331 {
332 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
333 afterConcatInfo.GetShape()[2] }));
334 }
335 else if (tensorDimensionsAdded == 2)
336 {
narpra01f176d5a2018-11-18 20:17:48 +0000337 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100338 }
339
340 layer = &AddReshapeLayer(
341 *data.m_Network,
342 layer->GetOutputSlot(0),
343 afterConcatInfo
344 );
345 }
346
347 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
348}
349
350bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
351{
352 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
353 if (!input.IsValid())
354 {
355 return Fail("%s: Operation has invalid inputs", __func__);
356 }
357
358 const Operand* output = GetOutputOperand(operation, 0, model);
359 if (!output)
360 {
361 return Fail("%s: Could not read output 0", __func__);
362 }
363
364 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
365 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
366
arovir01b0717b52018-09-05 17:03:25 +0100367 // ArmNN does not currently support non-fixed weights or bias
narpra01fb60a562018-10-30 15:46:01 +0000368 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100369 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
370
371 if (!weightsPin.IsValid() || !biasPin.IsValid())
372 {
373 return Fail("%s: Operation has invalid inputs", __func__);
374 }
375
376 armnn::ConstTensor weights = weightsPin.GetConstTensor();
377 armnn::ConstTensor bias = biasPin.GetConstTensor();
narpra01fb60a562018-10-30 15:46:01 +0000378 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
arovir01b0717b52018-09-05 17:03:25 +0100379
380 armnn::Convolution2dDescriptor desc;
narpra01fb60a562018-10-30 15:46:01 +0000381 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100382 ActivationFn activation;
383
384 if (operation.inputs.size() == 10)
385 {
386 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
387 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
388 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
389 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
390 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
391 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
392 !GetInputActivationFunction(operation, 9, activation, model, data))
393 {
394 return Fail("%s: Operation has invalid inputs", __func__);
395 }
396 }
397 else if (operation.inputs.size() == 7)
398 {
399 android::nn::PaddingScheme paddingScheme;
400 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
401 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
402 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
403 !GetInputActivationFunction(operation, 6, activation, model, data))
404 {
405 return Fail("%s: Operation has invalid inputs", __func__);
406 }
407
narpra01fb60a562018-10-30 15:46:01 +0000408 const uint32_t kernelX = weights.GetShape()[2];
409 const uint32_t kernelY = weights.GetShape()[1];
410 const uint32_t inputX = inputInfo.GetShape()[2];
411 const uint32_t inputY = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100412
413 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
414 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
415 }
416 else
417 {
418 return Fail("%s: Unsupported number of operation inputs", __func__);
419 }
420
421 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100422 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100423
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100424 if (!IsLayerSupportedForAnyBackend(__func__,
425 armnn::IsConvolution2dSupported,
426 data.m_Backends,
427 inputInfo,
428 outputInfo,
429 desc,
430 weights.GetInfo(),
431 biases))
arovir01b0717b52018-09-05 17:03:25 +0100432 {
433 return false;
434 }
435
Matteo Martincighba01f372019-05-14 13:28:21 +0100436 armnn::IConnectableLayer* startLayer =
437 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100438
narpra01fb60a562018-10-30 15:46:01 +0000439 if (!startLayer)
arovir01b0717b52018-09-05 17:03:25 +0100440 {
narpra01fb60a562018-10-30 15:46:01 +0000441 return Fail("%s: AddConvolution2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100442 }
narpra01fb60a562018-10-30 15:46:01 +0000443
444 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
445
446 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100447 {
448 return Fail("%s: ProcessActivation failed", __func__);
449 }
narpra01fb60a562018-10-30 15:46:01 +0000450
451 input.Connect(startLayer->GetInputSlot(0));
452
453 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100454}
455
456bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
457{
458 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
459 if (!input.IsValid())
460 {
461 return Fail("%s: Operation has invalid inputs", __func__);
462 }
463
464 const Operand* output = GetOutputOperand(operation, 0, model);
465 if (!output)
466 {
467 return Fail("%s: Could not read output 0", __func__);
468 }
469
470 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
471 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
472
arovir01b0717b52018-09-05 17:03:25 +0100473 // ArmNN does not currently support non-fixed weights or bias
474
475 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
arovir01b0717b52018-09-05 17:03:25 +0100476 const Operand* weightsOperand = GetInputOperand(operation, 1, model);
477
478 if (weightsOperand == nullptr)
479 {
480 return Fail("%s: Operand is invalid", __func__);
481 }
482
483 // Reinterpret weight data as [ H, W, I, M ]
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000484 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
485 weightsOperand->dimensions[2],
arovir01b0717b52018-09-05 17:03:25 +0100486 inputInfo.GetShape()[3],
487 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
488
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000489 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
490 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
James Conroy6bf1cf02018-10-12 14:13:18 +0100491
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000492 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
493 HWIMToMIHW, &weightsShape);
arovir01b0717b52018-09-05 17:03:25 +0100494
495 // Bias is a 1D tensor
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000496 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100497
498 if (!weightsPin.IsValid() || !biasPin.IsValid())
499 {
500 return Fail("%s: Operation has invalid inputs", __func__);
501 }
502
503 armnn::ConstTensor weights = weightsPin.GetConstTensor();
504 armnn::ConstTensor bias = biasPin.GetConstTensor();
James Conroy6bf1cf02018-10-12 14:13:18 +0100505 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
arovir01b0717b52018-09-05 17:03:25 +0100506
507 armnn::DepthwiseConvolution2dDescriptor desc;
James Conroy6bf1cf02018-10-12 14:13:18 +0100508 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100509 ActivationFn activation;
510
511 if (operation.inputs.size() == 11)
512 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100513 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
514 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
515 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
516 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
517 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
518 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100519 !GetInputActivationFunction(operation, 10, activation, model, data))
520 {
521 return Fail("%s: Operation has invalid inputs", __func__);
522 }
523 }
524 else if (operation.inputs.size() == 8)
525 {
526 android::nn::PaddingScheme paddingScheme;
James Conroy6bf1cf02018-10-12 14:13:18 +0100527 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
528 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
529 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100530 !GetInputActivationFunction(operation, 7, activation, model, data))
531 {
532 return Fail("%s: Operation has invalid inputs", __func__);
533 }
534
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000535 const uint32_t kernelX = weights.GetShape()[3];
536 const uint32_t kernelY = weights.GetShape()[2];
James Conroy6bf1cf02018-10-12 14:13:18 +0100537 const uint32_t inputX = inputInfo.GetShape()[2];
538 const uint32_t inputY = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100539
540 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
541 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
542 }
543 else
544 {
545 return Fail("%s: Unsupported number of operation inputs", __func__);
546 }
547
548 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100549 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100550
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100551 if (!IsLayerSupportedForAnyBackend(__func__,
552 armnn::IsDepthwiseConvolutionSupported,
553 data.m_Backends,
554 inputInfo,
555 outputInfo,
556 desc,
557 weights.GetInfo(),
558 biases))
arovir01b0717b52018-09-05 17:03:25 +0100559 {
560 return false;
561 }
562
Matteo Martincighba01f372019-05-14 13:28:21 +0100563 armnn::IConnectableLayer* startLayer =
564 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
James Conroy6bf1cf02018-10-12 14:13:18 +0100565 if (!startLayer)
arovir01b0717b52018-09-05 17:03:25 +0100566 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100567 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100568 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100569
570 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
571 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100572 {
573 return Fail("%s: ProcessActivation failed", __func__);
574 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100575
576 input.Connect(startLayer->GetInputSlot(0));
577
578 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100579}
580
581bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
582{
583 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
584 if (!input.IsValid())
585 {
586 return Fail("%s: Operation has invalid inputs", __func__);
587 }
588
589 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
590 if (!outputOperand)
591 {
592 return Fail("%s: Operation has invalid outputs", __func__);
593 }
594
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100595 if (!IsLayerSupportedForAnyBackend(__func__,
596 armnn::IsFloorSupported,
597 data.m_Backends,
598 input.GetTensorInfo(),
599 GetTensorInfoForOperand(*outputOperand)))
arovir01b0717b52018-09-05 17:03:25 +0100600 {
601 return false;
602 }
603
604 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
605 assert(layer != nullptr);
606 input.Connect(layer->GetInputSlot(0));
607
608 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
609}
610
611bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
612{
613 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
614 if (!input.IsValid())
615 {
616 return Fail("%s: Operation has invalid inputs", __func__);
617 }
618
619 const Operand* output = GetOutputOperand(operation, 0, model);
620 if (!output)
621 {
622 return Fail("%s: Could not read output 0", __func__);
623 }
624
625 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
626 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
627
628 // ArmNN does not currently support non-fixed weights or bias
629 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D
630 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D
631
632 if (!weightsPin.IsValid() || !biasPin.IsValid())
633 {
634 return Fail("%s: Operation has invalid inputs", __func__);
635 }
636
637 armnn::ConstTensor weights = weightsPin.GetConstTensor();
638 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100639 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100640
641 try
arovir01b0717b52018-09-05 17:03:25 +0100642 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100643 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
644 } catch (const std::exception &e) {
645 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100646 }
647
648 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
649 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
650
651 ActivationFn activationFunction;
652 if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
653 {
654 return Fail("%s: Operation has invalid inputs", __func__);
655 }
656
657 armnn::FullyConnectedDescriptor desc;
658 desc.m_TransposeWeightMatrix = true;
659 desc.m_BiasEnabled = true;
660
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100661 if (!IsLayerSupportedForAnyBackend(__func__,
662 armnn::IsFullyConnectedSupported,
663 data.m_Backends,
664 reshapedInfo,
665 outputInfo,
666 weights.GetInfo(),
667 bias.GetInfo(),
668 desc))
arovir01b0717b52018-09-05 17:03:25 +0100669 {
670 return false;
671 }
672
Matteo Martincighba01f372019-05-14 13:28:21 +0100673 armnn::IConnectableLayer* startLayer =
674 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100675 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
676
677 if (endLayer != nullptr)
678 {
679 if (inputInfo.GetNumDimensions() > 2U)
680 {
681 armnn::ReshapeDescriptor reshapeDescriptor;
682 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
683
684 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
685 assert(reshapeLayer != nullptr);
686 input.Connect(reshapeLayer->GetInputSlot(0));
687 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
688 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
689 }
690 else
691 {
692 input.Connect(startLayer->GetInputSlot(0));
693 }
694
695 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
696 }
697 else
698 {
699 return Fail("%s: ProcessActivation failed", __func__);
700 }
701}
702
703bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
704 const Model& model,
705 ConversionData& data)
706{
707 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
708 if (!input.IsValid())
709 {
710 return Fail("%s: Operation has invalid inputs", __func__);
711 }
712
713 const Operand* output = GetOutputOperand(operation, 0, model);
714 if (!output)
715 {
716 return Fail("%s: Could not read output 0", __func__);
717 }
718
narpra012fb804a2018-10-22 14:52:32 +0100719 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100720 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
721
arovir01b0717b52018-09-05 17:03:25 +0100722 armnn::NormalizationDescriptor descriptor;
723
narpra012fb804a2018-10-22 14:52:32 +0100724 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100725 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100726 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100727
728 if (!input.IsValid() ||
729 !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
730 !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
731 !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
732 !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
733 {
734 return Fail("%s: Operation has invalid inputs", __func__);
735 }
736
737 // ArmNN expects normSize to be the full size of the normalization
738 // window rather than the radius as in AndroidNN.
739 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
740
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100741 if (!IsLayerSupportedForAnyBackend(__func__,
742 armnn::IsNormalizationSupported,
743 data.m_Backends,
744 inputInfo,
745 outputInfo,
746 descriptor))
arovir01b0717b52018-09-05 17:03:25 +0100747 {
748 return false;
749 }
750
751
752 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
753 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100754 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100755
narpra012fb804a2018-10-22 14:52:32 +0100756 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100757}
758
759bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
760{
761 armnn::ActivationDescriptor desc;
762 desc.m_Function = armnn::ActivationFunction::Sigmoid;
763
764 return ConvertToActivation(operation, __func__, desc, model, data);
765}
766
767bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
768{
769 // Inputs:
770 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
771 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
772 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
773 if (!input.IsValid())
774 {
775 return Fail("%s: Could not read input 0: input", __func__);
776 }
777 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
778 LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
779 if (!outputStateIn.IsValid())
780 {
781 return Fail("%s: Could not read input 18: outputStateIn", __func__);
782 }
783 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
784 LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
785 if (!cellStateIn.IsValid())
786 {
787 return Fail("%s: Could not read input 19: cellStateIn", __func__);
788 }
789
790 // Get the mandatory input tensors:
791 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
792 // [num_units, input_size].
793 const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
794 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
795 const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3, model, data);
796 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
797 // [num_units, input_size].
798 const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4, model, data);
799 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
800 // [num_units, output_size].
801 const ConstTensorPin recurrentToForgetWeightsPin =
802 ConvertOperationInputToConstTensorPin(operation, 6, model, data);
803 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
804 // [num_units, output_size].
805 const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7, model, data);
806 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
807 // [num_units, output_size].
808 const ConstTensorPin recurrentToOutputWeightsPin =
809 ConvertOperationInputToConstTensorPin(operation, 8, model, data);
810 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
811 const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13, model, data);
812 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
813 const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14, model, data);
814 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
815 const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15, model, data);
816
817 if (!inputToForgetWeightsPin.IsValid() ||
818 !inputToCellWeightsPin.IsValid() ||
819 !inputToOutputWeightsPin.IsValid() ||
820 !recurrentToForgetWeightsPin.IsValid() ||
821 !recurrentToCellWeightsPin.IsValid() ||
822 !recurrentToOutputWeightsPin.IsValid() ||
823 !forgetGateBiasPin.IsValid() ||
824 !cellBiasPin.IsValid() ||
825 !outputGateBiasPin.IsValid())
826 {
827 return Fail("%s: Operation has invalid tensor inputs", __func__);
828 }
829
830 // Get the optional input tensors:
831 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
832 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
David Monahanecd7ca62019-02-22 14:29:51 +0000833 const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
834 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100835 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
836 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
837 // “num_units”), or the second dimension of the “projection_weights”, if defined.
David Monahanecd7ca62019-02-22 14:29:51 +0000838 const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5, model, data,
839 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100840 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000841 const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9, model, data,
842 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100843 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000844 const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10, model, data,
845 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100846 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000847 const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11, model, data,
848 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100849 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000850 const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12, model, data,
851 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100852 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
853 // [output_size, num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000854 const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16, model, data,
855 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100856 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
David Monahanecd7ca62019-02-22 14:29:51 +0000857 const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17, model, data,
858 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100859
860 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
861 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
862 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
863 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
864 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
865 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
866 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
867 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
868 {
869 return Fail("%s: Operation has invalid tensor inputs", __func__);
870 }
871
872 // Get the mandatory input scalars (actually 1-D tensors of size 1):
873 // 20: The activation function: A value indicating the activation function:
874 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
875 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
876 // If set to 0.0 then clipping is disabled.
877 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
878 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
879 ActivationFn activation;
880 float cellClip;
881 float projClip;
882 if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
883 !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
884 !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
885 {
886 return Fail("%s: Operation has invalid scalar inputs", __func__);
887 }
888
889 // Outputs:
890 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
891 // CIFG, or [batch_size, num_units * 3] without CIFG.
892 const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
893 if (!scratchBuffer)
894 {
895 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
896 }
897 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
898 const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
899 if (!outputStateOut)
900 {
901 return Fail("%s: Could not read output 1: outputStateOut", __func__);
902 }
903 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
904 const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
905 if (!cellStateOut)
906 {
907 return Fail("%s: Could not read output 2: cellStateOut", __func__);
908 }
909 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
910 // effectively the same as the current “output state (out)” value.
911 const Operand* output = GetOutputOperand(operation, 3, model);
912 if (!output)
913 {
914 return Fail("%s: Could not read output 3: output", __func__);
915 }
916
917 // set the params structure for the AddLstmLayer call
918 armnn::LstmInputParams params;
919 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
920 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
921 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
922 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
923 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
924 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
925 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
926 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
927 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
928 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
929 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
930 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
931 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
932 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
933 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
934 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
935 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
936
937 // set the layer descriptor
938 armnn::LstmDescriptor desc;
939 desc.m_ActivationFunc = activation;
940 desc.m_ClippingThresCell = cellClip;
941 desc.m_ClippingThresProj = projClip;
942 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
943 params.m_RecurrentToInputWeights == nullptr ||
944 params.m_InputGateBias == nullptr);
945 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
946 params.m_CellToOutputWeights != nullptr);
947 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
948
949 // validate the optional input groups
950 if (desc.m_CifgEnabled &&
951 (params.m_InputToInputWeights != nullptr ||
952 params.m_RecurrentToInputWeights != nullptr ||
953 params.m_InputGateBias != nullptr))
954 {
955 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
956 " and input gate bias must be provided", __func__);
957 }
958
959 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
960 {
961 return Fail("%s: projection bias should not be provided without projection weights", __func__);
962 }
963
964 if (desc.m_PeepholeEnabled &&
965 (params.m_CellToForgetWeights == nullptr ||
966 params.m_CellToOutputWeights == nullptr ||
967 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
968 {
969 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
970 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
971 }
972
973 // Check if the layer is supported
974 // Inputs
975 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
976 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
977 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
978
979 // Outputs
980 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
981 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
982 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
983 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
984
985 // Basic parameters
986 const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
987 const armnn::TensorInfo& inputToCellWeights = params.m_InputToCellWeights->GetInfo();
988 const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
989 const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
990 const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
991 const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
992 const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
993 const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
994 const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
995
996 //Optional parameters
997 const armnn::TensorInfo* inputToInputWeights = nullptr;
998 const armnn::TensorInfo* recurrentToInputWeights = nullptr;
999 const armnn::TensorInfo* cellToInputWeights = nullptr;
1000 const armnn::TensorInfo* inputGateBias = nullptr;
1001 const armnn::TensorInfo* projectionWeights = nullptr;
1002 const armnn::TensorInfo* projectionBias = nullptr;
1003 const armnn::TensorInfo* cellToForgetWeights = nullptr;
1004 const armnn::TensorInfo* cellToOutputWeights = nullptr;
1005
1006 if(!desc.m_CifgEnabled)
1007 {
1008 inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1009 recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1010 if (params.m_CellToInputWeights != nullptr)
1011 {
1012 cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1013 }
1014 inputGateBias = &(params.m_InputGateBias->GetInfo());
1015 }
1016
1017 if(desc.m_ProjectionEnabled)
1018 {
1019 projectionWeights = &(params.m_ProjectionWeights->GetInfo());
1020 if (params.m_ProjectionBias != nullptr)
1021 {
1022 projectionBias = &(params.m_ProjectionBias->GetInfo());
1023 }
1024 }
1025
1026 if(desc.m_PeepholeEnabled)
1027 {
1028 cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1029 cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1030 }
1031
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001032 if (!IsLayerSupportedForAnyBackend(__func__,
1033 armnn::IsLstmSupported,
1034 data.m_Backends,
1035 inputInfo,
1036 outputStateInInfo,
1037 cellStateInInfo,
1038 scratchBufferInfo,
1039 outputStateOutInfo,
1040 cellStateOutInfo,
1041 outputInfo,
1042 desc,
1043 inputToForgetWeights,
1044 inputToCellWeights,
1045 inputToOutputWeights,
1046 recurrentToForgetWeights,
1047 recurrentToCellWeights,
1048 recurrentToOutputWeights,
1049 forgetGateBias,
1050 cellBias,
1051 outputGateBias,
1052 inputToInputWeights,
1053 recurrentToInputWeights,
1054 cellToInputWeights,
1055 inputGateBias,
1056 projectionWeights,
1057 projectionBias,
1058 cellToForgetWeights,
1059 cellToOutputWeights))
arovir01b0717b52018-09-05 17:03:25 +01001060 {
1061 return false;
1062 }
1063
1064 // Add the layer
1065 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1066
1067 input.Connect(layer->GetInputSlot(0));
1068 outputStateIn.Connect(layer->GetInputSlot(1));
1069 cellStateIn.Connect(layer->GetInputSlot(2));
1070
1071 return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
1072 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
1073 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
1074 SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
1075}
1076
1077bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
1078{
1079 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1080 if (!input.IsValid())
1081 {
1082 return Fail("%s: Operation has invalid inputs", __func__);
1083 }
1084
1085 const Operand* output = GetOutputOperand(operation, 0, model);
1086 if (!output)
1087 {
1088 return Fail("%s: Could not read output 0", __func__);
1089 }
1090
1091 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1092 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1093
Matteo Martincigh58f71092018-09-25 15:58:52 +01001094 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001095 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001096
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001097 if (!IsLayerSupportedForAnyBackend(__func__,
1098 armnn::IsL2NormalizationSupported,
1099 data.m_Backends,
1100 inputInfo,
1101 outputInfo,
1102 desc))
arovir01b0717b52018-09-05 17:03:25 +01001103 {
1104 return false;
1105 }
1106
Matteo Martincigh58f71092018-09-25 15:58:52 +01001107 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001108 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001109 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001110
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001111 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001112}
1113
1114bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1115{
1116 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
1117}
1118
1119bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1120{
1121 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
1122}
1123
1124bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1125{
1126 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1127 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1128
1129 if (!input0.IsValid() || !input1.IsValid())
1130 {
1131 return Fail("%s: Operation has invalid inputs", __func__);
1132 }
1133
1134 // The FuseActivation parameter is always the input index 2
1135 // and it should be optional
1136 ActivationFn activationFunction;
1137 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1138 {
1139 return Fail("%s: Operation has invalid inputs", __func__);
1140 }
1141
1142 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1143
1144 if (outputOperand == nullptr)
1145 {
1146 return false;
1147 }
1148
1149 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1150
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001151 if (!IsLayerSupportedForAnyBackend(__func__,
1152 armnn::IsMultiplicationSupported,
1153 data.m_Backends,
1154 input0.GetTensorInfo(),
1155 input1.GetTensorInfo(),
1156 outInfo))
arovir01b0717b52018-09-05 17:03:25 +01001157 {
1158 return false;
1159 }
1160
1161 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1162 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1163
1164 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1165 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1166
1167 if (endLayer != nullptr)
1168 {
1169 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
1170 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
1171 }
1172 else
1173 {
1174 return Fail("%s: ProcessActivation failed", __func__);
1175 }
1176}
1177
1178bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1179{
1180 armnn::ActivationDescriptor desc;
1181 desc.m_Function = armnn::ActivationFunction::ReLu;
1182
1183 return ConvertToActivation(operation, __func__, desc, model, data);
1184}
1185
1186bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1187{
1188 armnn::ActivationDescriptor desc;
1189 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1190 desc.m_A = 1.0f;
1191 desc.m_B = -1.0f;
1192
1193 return ConvertToActivation(operation, __func__, desc, model, data);
1194}
1195
1196bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1197{
1198 armnn::ActivationDescriptor desc;
1199 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1200 desc.m_A = 6.0f;
1201
1202 return ConvertToActivation(operation, __func__, desc, model, data);
1203}
1204
1205bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1206{
1207 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1208 if (!input.IsValid())
1209 {
1210 return Fail("%s: Operation has invalid inputs", __func__);
1211 }
1212
1213 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1214 if (!outputOperand)
1215 {
1216 return Fail("%s: Operation has no outputs", __func__);
1217 }
1218
1219 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1220
1221 armnn::SoftmaxDescriptor desc;
1222 if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
1223 {
1224 return Fail("%s: Operation has invalid inputs", __func__);
1225 }
1226
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001227 if (!IsLayerSupportedForAnyBackend(__func__,
1228 armnn::IsSoftmaxSupported,
1229 data.m_Backends,
1230 input.GetTensorInfo(),
1231 outInfo,
1232 desc))
arovir01b0717b52018-09-05 17:03:25 +01001233 {
1234 return false;
1235 }
1236
1237 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1238 assert(layer != nullptr);
1239 input.Connect(layer->GetInputSlot(0));
1240
1241 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1242}
1243
1244bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1245{
1246 armnn::ActivationDescriptor desc;
1247 desc.m_Function = armnn::ActivationFunction::TanH;
1248 desc.m_A = 1.0f; // android nn does not support tanH parameters
1249 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1250
1251 return ConvertToActivation(operation, __func__, desc, model, data);
1252}
1253
1254bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1255{
1256 const Operand* inputOperand = GetInputOperand(operation, 0, model);
1257 const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
1258 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1259
1260 if (inputOperand == nullptr
1261 || requestedShapeOperand == nullptr
1262 || outputOperand == nullptr)
1263 {
1264 return Fail("%s: Operation has invalid inputs", __func__);
1265 }
1266
1267
1268 if (requestedShapeOperand->dimensions.size() != 1)
1269 {
1270 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1271 __func__, requestedShapeOperand->dimensions.size());
1272 }
1273
1274 std::vector<int32_t> targetDimensions;
1275 if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
1276 {
1277 return Fail("%s: Could not read values of input 1", __func__);
1278 }
1279
1280 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1281
1282 Shape requestedShape;
1283 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1284 // function that resolves these values into a fully specified tensor shape.
1285 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1286 {
1287 return Fail("%s: Failed to resolve the requested shape", __func__);
1288 }
1289
1290 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1291 if (!SameShape(requestedShape, outputOperandShape))
1292 {
1293 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1294 }
1295
1296 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1297 if (!input.IsValid())
1298 {
1299 return Fail("%s: Could not read input 0", __func__);
1300 }
1301
arovir01b0717b52018-09-05 17:03:25 +01001302 armnn::ReshapeDescriptor reshapeDescriptor;
1303 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1304 requestedShape.dimensions.data());
1305
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001306 if (!IsLayerSupportedForAnyBackend(__func__,
1307 armnn::IsReshapeSupported,
1308 data.m_Backends,
1309 input.GetTensorInfo(),
1310 reshapeDescriptor))
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001311 {
1312 return false;
1313 }
1314
arovir01b0717b52018-09-05 17:03:25 +01001315 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1316 assert(layer != nullptr);
1317 input.Connect(layer->GetInputSlot(0));
1318
1319 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1320}
1321
1322bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1323{
1324 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1325 if (!input.IsValid())
1326 {
1327 return Fail("%s: Could not read input 0", __func__);
1328 }
1329
1330 const Operand* output = GetOutputOperand(operation, 0, model);
1331 if (!output)
1332 {
1333 return Fail("%s: Could not read output 0", __func__);
1334 }
1335
1336 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1337 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1338
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001339 armnn::ResizeBilinearDescriptor desc;
1340 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001341
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001342 if (!IsLayerSupportedForAnyBackend(__func__,
1343 armnn::IsResizeBilinearSupported,
1344 data.m_Backends,
1345 inputInfo,
1346 outputInfo))
arovir01b0717b52018-09-05 17:03:25 +01001347 {
1348 return false;
1349 }
1350
arovir01b0717b52018-09-05 17:03:25 +01001351
1352 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
1353 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
1354 {
1355 return Fail("%s: Operation has invalid inputs", __func__);
1356 }
1357
1358 armnn::IConnectableLayer* layer = data.m_Network->AddResizeBilinearLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001359
arovir01b0717b52018-09-05 17:03:25 +01001360 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001361
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001362 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1363 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001364
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001365 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001366
1367}
1368
1369} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001370} // namespace armnn_driver