blob: 121379acd98b90e2c0562ab54ec8a653bcf0105c [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
arovir015602b192018-10-04 16:15:02 +010011
arovir01b0717b52018-09-05 17:03:25 +010012namespace armnn_driver
13{
14namespace hal_1_0
15{
16
17bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
18{
19 switch (operation.type)
20 {
21 case V1_0::OperationType::ADD:
22 return ConvertAdd(operation, model, data);
23 case V1_0::OperationType::AVERAGE_POOL_2D:
24 return ConvertAveragePool2d(operation, model, data);
25 case V1_0::OperationType::CONCATENATION:
26 return ConvertConcatenation(operation, model, data);
27 case V1_0::OperationType::CONV_2D:
28 return ConvertConv2d(operation, model, data);
29 case V1_0::OperationType::DEPTHWISE_CONV_2D:
30 return ConvertDepthwiseConv2d(operation, model, data);
31 case V1_0::OperationType::FLOOR:
32 return ConvertFloor(operation, model, data);
33 case V1_0::OperationType::FULLY_CONNECTED:
34 return ConvertFullyConnected(operation, model, data);
35 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
36 return ConvertLocalResponseNormalization(operation, model, data);
37 case V1_0::OperationType::LOGISTIC:
38 return ConvertLogistic(operation, model, data);
39 case V1_0::OperationType::LSTM:
40 return ConvertLstm(operation, model, data);
41 case V1_0::OperationType::L2_NORMALIZATION:
42 return ConvertL2Normalization(operation, model, data);
43 case V1_0::OperationType::L2_POOL_2D:
44 return ConvertL2Pool2d(operation, model, data);
45 case V1_0::OperationType::MAX_POOL_2D:
46 return ConvertMaxPool2d(operation, model, data);
47 case V1_0::OperationType::MUL:
48 return ConvertMul(operation, model, data);
49 case V1_0::OperationType::RELU:
50 return ConvertReLu(operation, model, data);
51 case V1_0::OperationType::RELU1:
52 return ConvertReLu1(operation, model, data);
53 case V1_0::OperationType::RELU6:
54 return ConvertReLu6(operation, model, data);
55 case V1_0::OperationType::SOFTMAX:
56 return ConvertSoftmax(operation, model, data);
57 case V1_0::OperationType::TANH:
58 return ConvertTanH(operation, model, data);
59 case V1_0::OperationType::RESHAPE:
60 return ConvertReshape(operation, model, data);
61 case V1_0::OperationType::RESIZE_BILINEAR:
62 return ConvertResizeBilinear(operation, model, data);
63 default:
64 return Fail("%s: Operation type %s not supported in ArmnnDriver",
65 __func__, toString(operation.type).c_str());
66 }
67}
68
69bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
70{
71 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
72 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
73
74 if (!input0.IsValid() || !input1.IsValid())
75 {
76 return Fail("%s: Operation has invalid inputs", __func__);
77 }
78
79 // The FuseActivation parameter is always the input index 2
80 // and it should be optional
81 ActivationFn activationFunction;
82 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
83 {
84 return Fail("%s: Operation has invalid inputs", __func__);
85 }
86
87 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
88 if (!outputOperand)
89 {
90 return false;
91 }
92
93 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
94
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010095 if (!IsLayerSupportedForAnyBackend(__func__,
96 armnn::IsAdditionSupported,
97 data.m_Backends,
98 input0.GetTensorInfo(),
99 input1.GetTensorInfo(),
100 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100101 {
102 return false;
103 }
104
105 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
106 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
107
108 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
109 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
110
111 if (endLayer != nullptr)
112 {
113 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
114 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
115 }
116 else
117 {
118 return Fail("%s: ProcessActivation failed", __func__);
119 }
120}
121
122bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
123{
124 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
125}
126
127bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
128{
129 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
130 if (operation.inputs.size() <= 1)
131 {
132 return Fail("%s: Operation has insufficient arguments", __func__);
133 }
134
135 // Get inputs and outputs
136 const std::size_t numInputTensors = operation.inputs.size() - 1;
137
138 int32_t concatDim;
139 if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
140 {
141 return Fail("%s: Operation has invalid inputs", __func__);
142 }
143
144 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
145 if (!outputOperand)
146 {
147 return Fail("%s: Operation has no outputs", __func__);
148 }
149
150
151 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
152 armnn::TensorShape outputShape = outputInfo.GetShape();
153
154 //
155 // handle negative concat dims along the lines of tensorflow as described here:
156 // https://www.tensorflow.org/api_docs/python/tf/concat
157 // "negative axis refers to axis + rank(values)-th dimension"
158 //
159 if (concatDim < 0)
160 {
161 concatDim += outputShape.GetNumDimensions();
162 }
163
164 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
165 {
166 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
167 }
168
169 std::vector<LayerInputHandle> inputHandles;
170 std::vector<armnn::TensorShape> inputShapes;
171
172 inputHandles.reserve(numInputTensors);
173 inputShapes.reserve(numInputTensors);
174
175 bool inputsHaveBeenReshaped = false;
176 unsigned int tensorDimensionsAdded = 0;
177
178 for (uint32_t i = 0; i < numInputTensors; ++i)
179 {
180 const Operand* const operand = GetInputOperand(operation, i, model);
181 if (!operand)
182 {
183 return Fail("%s: Operation has invalid inputs", __func__);
184 }
185
186 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
187 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
188
189 if (operandShape.GetNumDimensions() == 0)
190 {
191 return Fail("%s: Operands with rank 0 are not supported", __func__);
192 }
193
194 if (RequiresReshape(operandShape))
195 {
196 inputsHaveBeenReshaped = true;
197
198 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
199
200 // Expand the tensor to three dimensions
201 if (operandShape.GetNumDimensions() == 2)
202 {
203 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
204 tensorDimensionsAdded = 1;
205 }
206 else
207 {
208 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
209 tensorDimensionsAdded = 2;
210 }
211
212 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
213 *data.m_Network,
214 operandInputHandle,
215 reshapeInfo
216 );
217
218 // Point to the reshape operation rather then the input operation
219 operandShape = reshapeInfo.GetShape();
220 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
221 }
222
223 inputShapes.emplace_back(operandShape);
224 inputHandles.emplace_back(operandInputHandle);
225
226 if (!inputHandles.back().IsValid())
227 {
228 return Fail("%s: Operation has invalid inputs", __func__);
229 }
230 }
231
232 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
233
234 if (inputsHaveBeenReshaped)
235 {
236 // Adjust the concatenation dimension by the amount of dimensions added (if any)
237 concatDim += tensorDimensionsAdded;
238
239 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
240 if (tensorDimensionsAdded == 1)
241 {
242 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
243 }
244 else if (tensorDimensionsAdded == 2)
245 {
narpra01f176d5a2018-11-18 20:17:48 +0000246 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100247 }
248 }
249
narpra01f176d5a2018-11-18 20:17:48 +0000250 // Check if permutations is required and get the pair of permutations required for the concatenation.
251 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100252 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
253 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
254
narpra01f176d5a2018-11-18 20:17:48 +0000255 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100256
narpra01f176d5a2018-11-18 20:17:48 +0000257 if (needPermute)
258 {
259 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
260 }
261
arovir01b0717b52018-09-05 17:03:25 +0100262 outputInfo.SetShape(outputShape);
263
264 // this is no-op for identity swizzles, otherwise it replaces both
265 // the handles and shapes with the swizzled layer output handles and shapes
266 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
267
268 // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
269 armnn::OriginsDescriptor mergerDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000270
arovir01b0717b52018-09-05 17:03:25 +0100271 try
272 {
narpra01f176d5a2018-11-18 20:17:48 +0000273 // The merger descriptor is always created across the only supported concat dimension
274 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100275 mergerDescriptor =
276 armnn::CreateMergerDescriptorForConcatenation(
277 inputShapes.begin(), inputShapes.end(), concatDim);
278 }
279 catch (const armnn::Exception& error)
280 {
281 return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
282 }
283
284 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000285 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100286 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
287 {
288 return Fail("%s: Error validating the output shape for concat", __func__);
289 }
290
291 std::vector<const armnn::TensorInfo*> inputTensorInfos;
292 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
293 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100294 if (!IsLayerSupportedForAnyBackend(__func__,
Jim Flynn073d7a32019-05-13 13:52:56 +0100295 armnn::IsConcatSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100296 data.m_Backends,
297 inputTensorInfos,
298 outputInfo,
299 mergerDescriptor))
arovir01b0717b52018-09-05 17:03:25 +0100300 {
301 return false;
302 }
303
Jim Flynn073d7a32019-05-13 13:52:56 +0100304 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(mergerDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100305 assert(layer != nullptr);
306 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
307
308 // Connect inputs to the layer
309 const int numInputSlots = layer->GetNumInputSlots();
310 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
311 for (int i = 0; i < numInputSlots; ++i)
312 {
313 // connect the input directly to the merge (concat) layer
314 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
315 }
316
narpra01f176d5a2018-11-18 20:17:48 +0000317 if (needPermute)
318 {
319 // Add permutation layer and connect the output to it, the permutation becomes the output layer
320 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
321 layer->GetOutputSlot(0),
322 permutationPair.second);
323 layer = &deswizzleLayer;
324 }
arovir01b0717b52018-09-05 17:03:25 +0100325
326 if (inputsHaveBeenReshaped)
327 {
328 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
329
330 // Undo the reshape knowing the amount of dimensions added
331 if (tensorDimensionsAdded == 1)
332 {
333 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
334 afterConcatInfo.GetShape()[2] }));
335 }
336 else if (tensorDimensionsAdded == 2)
337 {
narpra01f176d5a2018-11-18 20:17:48 +0000338 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100339 }
340
341 layer = &AddReshapeLayer(
342 *data.m_Network,
343 layer->GetOutputSlot(0),
344 afterConcatInfo
345 );
346 }
347
348 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
349}
350
351bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
352{
353 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
354 if (!input.IsValid())
355 {
356 return Fail("%s: Operation has invalid inputs", __func__);
357 }
358
359 const Operand* output = GetOutputOperand(operation, 0, model);
360 if (!output)
361 {
362 return Fail("%s: Could not read output 0", __func__);
363 }
364
365 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
366 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
367
arovir01b0717b52018-09-05 17:03:25 +0100368 // ArmNN does not currently support non-fixed weights or bias
narpra01fb60a562018-10-30 15:46:01 +0000369 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100370 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
371
372 if (!weightsPin.IsValid() || !biasPin.IsValid())
373 {
374 return Fail("%s: Operation has invalid inputs", __func__);
375 }
376
377 armnn::ConstTensor weights = weightsPin.GetConstTensor();
378 armnn::ConstTensor bias = biasPin.GetConstTensor();
narpra01fb60a562018-10-30 15:46:01 +0000379 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
arovir01b0717b52018-09-05 17:03:25 +0100380
381 armnn::Convolution2dDescriptor desc;
narpra01fb60a562018-10-30 15:46:01 +0000382 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100383 ActivationFn activation;
384
385 if (operation.inputs.size() == 10)
386 {
387 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
388 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
389 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
390 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
391 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
392 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
393 !GetInputActivationFunction(operation, 9, activation, model, data))
394 {
395 return Fail("%s: Operation has invalid inputs", __func__);
396 }
397 }
398 else if (operation.inputs.size() == 7)
399 {
400 android::nn::PaddingScheme paddingScheme;
401 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
402 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
403 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
404 !GetInputActivationFunction(operation, 6, activation, model, data))
405 {
406 return Fail("%s: Operation has invalid inputs", __func__);
407 }
408
narpra01fb60a562018-10-30 15:46:01 +0000409 const uint32_t kernelX = weights.GetShape()[2];
410 const uint32_t kernelY = weights.GetShape()[1];
411 const uint32_t inputX = inputInfo.GetShape()[2];
412 const uint32_t inputY = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100413
414 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
415 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
416 }
417 else
418 {
419 return Fail("%s: Unsupported number of operation inputs", __func__);
420 }
421
422 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100423 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100424
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100425 if (!IsLayerSupportedForAnyBackend(__func__,
426 armnn::IsConvolution2dSupported,
427 data.m_Backends,
428 inputInfo,
429 outputInfo,
430 desc,
431 weights.GetInfo(),
432 biases))
arovir01b0717b52018-09-05 17:03:25 +0100433 {
434 return false;
435 }
436
Matteo Martincighba01f372019-05-14 13:28:21 +0100437 armnn::IConnectableLayer* startLayer =
438 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100439
narpra01fb60a562018-10-30 15:46:01 +0000440 if (!startLayer)
arovir01b0717b52018-09-05 17:03:25 +0100441 {
narpra01fb60a562018-10-30 15:46:01 +0000442 return Fail("%s: AddConvolution2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100443 }
narpra01fb60a562018-10-30 15:46:01 +0000444
445 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
446
447 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100448 {
449 return Fail("%s: ProcessActivation failed", __func__);
450 }
narpra01fb60a562018-10-30 15:46:01 +0000451
452 input.Connect(startLayer->GetInputSlot(0));
453
454 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100455}
456
457bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
458{
459 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
460 if (!input.IsValid())
461 {
462 return Fail("%s: Operation has invalid inputs", __func__);
463 }
464
465 const Operand* output = GetOutputOperand(operation, 0, model);
466 if (!output)
467 {
468 return Fail("%s: Could not read output 0", __func__);
469 }
470
471 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
472 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
473
arovir01b0717b52018-09-05 17:03:25 +0100474 // ArmNN does not currently support non-fixed weights or bias
475
476 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
arovir01b0717b52018-09-05 17:03:25 +0100477 const Operand* weightsOperand = GetInputOperand(operation, 1, model);
478
479 if (weightsOperand == nullptr)
480 {
481 return Fail("%s: Operand is invalid", __func__);
482 }
483
484 // Reinterpret weight data as [ H, W, I, M ]
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000485 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
486 weightsOperand->dimensions[2],
arovir01b0717b52018-09-05 17:03:25 +0100487 inputInfo.GetShape()[3],
488 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
489
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000490 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
491 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
James Conroy6bf1cf02018-10-12 14:13:18 +0100492
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000493 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
494 HWIMToMIHW, &weightsShape);
arovir01b0717b52018-09-05 17:03:25 +0100495
496 // Bias is a 1D tensor
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000497 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100498
499 if (!weightsPin.IsValid() || !biasPin.IsValid())
500 {
501 return Fail("%s: Operation has invalid inputs", __func__);
502 }
503
504 armnn::ConstTensor weights = weightsPin.GetConstTensor();
505 armnn::ConstTensor bias = biasPin.GetConstTensor();
James Conroy6bf1cf02018-10-12 14:13:18 +0100506 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
arovir01b0717b52018-09-05 17:03:25 +0100507
508 armnn::DepthwiseConvolution2dDescriptor desc;
James Conroy6bf1cf02018-10-12 14:13:18 +0100509 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100510 ActivationFn activation;
511
512 if (operation.inputs.size() == 11)
513 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100514 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
515 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
516 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
517 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
518 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
519 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100520 !GetInputActivationFunction(operation, 10, activation, model, data))
521 {
522 return Fail("%s: Operation has invalid inputs", __func__);
523 }
524 }
525 else if (operation.inputs.size() == 8)
526 {
527 android::nn::PaddingScheme paddingScheme;
James Conroy6bf1cf02018-10-12 14:13:18 +0100528 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
529 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
530 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100531 !GetInputActivationFunction(operation, 7, activation, model, data))
532 {
533 return Fail("%s: Operation has invalid inputs", __func__);
534 }
535
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000536 const uint32_t kernelX = weights.GetShape()[3];
537 const uint32_t kernelY = weights.GetShape()[2];
James Conroy6bf1cf02018-10-12 14:13:18 +0100538 const uint32_t inputX = inputInfo.GetShape()[2];
539 const uint32_t inputY = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100540
541 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
542 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
543 }
544 else
545 {
546 return Fail("%s: Unsupported number of operation inputs", __func__);
547 }
548
549 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100550 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100551
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100552 if (!IsLayerSupportedForAnyBackend(__func__,
553 armnn::IsDepthwiseConvolutionSupported,
554 data.m_Backends,
555 inputInfo,
556 outputInfo,
557 desc,
558 weights.GetInfo(),
559 biases))
arovir01b0717b52018-09-05 17:03:25 +0100560 {
561 return false;
562 }
563
Matteo Martincighba01f372019-05-14 13:28:21 +0100564 armnn::IConnectableLayer* startLayer =
565 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
James Conroy6bf1cf02018-10-12 14:13:18 +0100566 if (!startLayer)
arovir01b0717b52018-09-05 17:03:25 +0100567 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100568 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100569 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100570
571 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
572 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100573 {
574 return Fail("%s: ProcessActivation failed", __func__);
575 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100576
577 input.Connect(startLayer->GetInputSlot(0));
578
579 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100580}
581
582bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
583{
584 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
585 if (!input.IsValid())
586 {
587 return Fail("%s: Operation has invalid inputs", __func__);
588 }
589
590 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
591 if (!outputOperand)
592 {
593 return Fail("%s: Operation has invalid outputs", __func__);
594 }
595
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100596 if (!IsLayerSupportedForAnyBackend(__func__,
597 armnn::IsFloorSupported,
598 data.m_Backends,
599 input.GetTensorInfo(),
600 GetTensorInfoForOperand(*outputOperand)))
arovir01b0717b52018-09-05 17:03:25 +0100601 {
602 return false;
603 }
604
605 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
606 assert(layer != nullptr);
607 input.Connect(layer->GetInputSlot(0));
608
609 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
610}
611
612bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
613{
614 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
615 if (!input.IsValid())
616 {
617 return Fail("%s: Operation has invalid inputs", __func__);
618 }
619
620 const Operand* output = GetOutputOperand(operation, 0, model);
621 if (!output)
622 {
623 return Fail("%s: Could not read output 0", __func__);
624 }
625
626 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
627 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
628
629 // ArmNN does not currently support non-fixed weights or bias
630 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D
631 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D
632
633 if (!weightsPin.IsValid() || !biasPin.IsValid())
634 {
635 return Fail("%s: Operation has invalid inputs", __func__);
636 }
637
638 armnn::ConstTensor weights = weightsPin.GetConstTensor();
639 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100640 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100641
642 try
arovir01b0717b52018-09-05 17:03:25 +0100643 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100644 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
645 } catch (const std::exception &e) {
646 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100647 }
648
649 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
650 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
651
652 ActivationFn activationFunction;
653 if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
654 {
655 return Fail("%s: Operation has invalid inputs", __func__);
656 }
657
658 armnn::FullyConnectedDescriptor desc;
659 desc.m_TransposeWeightMatrix = true;
660 desc.m_BiasEnabled = true;
661
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100662 if (!IsLayerSupportedForAnyBackend(__func__,
663 armnn::IsFullyConnectedSupported,
664 data.m_Backends,
665 reshapedInfo,
666 outputInfo,
667 weights.GetInfo(),
668 bias.GetInfo(),
669 desc))
arovir01b0717b52018-09-05 17:03:25 +0100670 {
671 return false;
672 }
673
Matteo Martincighba01f372019-05-14 13:28:21 +0100674 armnn::IConnectableLayer* startLayer =
675 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100676 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
677
678 if (endLayer != nullptr)
679 {
680 if (inputInfo.GetNumDimensions() > 2U)
681 {
682 armnn::ReshapeDescriptor reshapeDescriptor;
683 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
684
685 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
686 assert(reshapeLayer != nullptr);
687 input.Connect(reshapeLayer->GetInputSlot(0));
688 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
689 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
690 }
691 else
692 {
693 input.Connect(startLayer->GetInputSlot(0));
694 }
695
696 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
697 }
698 else
699 {
700 return Fail("%s: ProcessActivation failed", __func__);
701 }
702}
703
704bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
705 const Model& model,
706 ConversionData& data)
707{
708 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
709 if (!input.IsValid())
710 {
711 return Fail("%s: Operation has invalid inputs", __func__);
712 }
713
714 const Operand* output = GetOutputOperand(operation, 0, model);
715 if (!output)
716 {
717 return Fail("%s: Could not read output 0", __func__);
718 }
719
narpra012fb804a2018-10-22 14:52:32 +0100720 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100721 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
722
arovir01b0717b52018-09-05 17:03:25 +0100723 armnn::NormalizationDescriptor descriptor;
724
narpra012fb804a2018-10-22 14:52:32 +0100725 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100726 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100727 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100728
729 if (!input.IsValid() ||
730 !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
731 !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
732 !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
733 !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
734 {
735 return Fail("%s: Operation has invalid inputs", __func__);
736 }
737
738 // ArmNN expects normSize to be the full size of the normalization
739 // window rather than the radius as in AndroidNN.
740 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
741
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100742 if (!IsLayerSupportedForAnyBackend(__func__,
743 armnn::IsNormalizationSupported,
744 data.m_Backends,
745 inputInfo,
746 outputInfo,
747 descriptor))
arovir01b0717b52018-09-05 17:03:25 +0100748 {
749 return false;
750 }
751
752
753 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
754 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100755 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100756
narpra012fb804a2018-10-22 14:52:32 +0100757 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100758}
759
760bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
761{
762 armnn::ActivationDescriptor desc;
763 desc.m_Function = armnn::ActivationFunction::Sigmoid;
764
765 return ConvertToActivation(operation, __func__, desc, model, data);
766}
767
768bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
769{
770 // Inputs:
771 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
772 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
773 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
774 if (!input.IsValid())
775 {
776 return Fail("%s: Could not read input 0: input", __func__);
777 }
778 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
779 LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
780 if (!outputStateIn.IsValid())
781 {
782 return Fail("%s: Could not read input 18: outputStateIn", __func__);
783 }
784 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
785 LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
786 if (!cellStateIn.IsValid())
787 {
788 return Fail("%s: Could not read input 19: cellStateIn", __func__);
789 }
790
791 // Get the mandatory input tensors:
792 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
793 // [num_units, input_size].
794 const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
795 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
796 const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3, model, data);
797 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
798 // [num_units, input_size].
799 const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4, model, data);
800 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
801 // [num_units, output_size].
802 const ConstTensorPin recurrentToForgetWeightsPin =
803 ConvertOperationInputToConstTensorPin(operation, 6, model, data);
804 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
805 // [num_units, output_size].
806 const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7, model, data);
807 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
808 // [num_units, output_size].
809 const ConstTensorPin recurrentToOutputWeightsPin =
810 ConvertOperationInputToConstTensorPin(operation, 8, model, data);
811 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
812 const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13, model, data);
813 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
814 const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14, model, data);
815 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
816 const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15, model, data);
817
818 if (!inputToForgetWeightsPin.IsValid() ||
819 !inputToCellWeightsPin.IsValid() ||
820 !inputToOutputWeightsPin.IsValid() ||
821 !recurrentToForgetWeightsPin.IsValid() ||
822 !recurrentToCellWeightsPin.IsValid() ||
823 !recurrentToOutputWeightsPin.IsValid() ||
824 !forgetGateBiasPin.IsValid() ||
825 !cellBiasPin.IsValid() ||
826 !outputGateBiasPin.IsValid())
827 {
828 return Fail("%s: Operation has invalid tensor inputs", __func__);
829 }
830
831 // Get the optional input tensors:
832 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
833 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
David Monahanecd7ca62019-02-22 14:29:51 +0000834 const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
835 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100836 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
837 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
838 // “num_units”), or the second dimension of the “projection_weights”, if defined.
David Monahanecd7ca62019-02-22 14:29:51 +0000839 const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5, model, data,
840 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100841 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000842 const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9, model, data,
843 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100844 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000845 const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10, model, data,
846 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100847 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000848 const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11, model, data,
849 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100850 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000851 const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12, model, data,
852 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100853 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
854 // [output_size, num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000855 const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16, model, data,
856 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100857 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
David Monahanecd7ca62019-02-22 14:29:51 +0000858 const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17, model, data,
859 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100860
861 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
862 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
863 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
864 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
865 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
866 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
867 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
868 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
869 {
870 return Fail("%s: Operation has invalid tensor inputs", __func__);
871 }
872
873 // Get the mandatory input scalars (actually 1-D tensors of size 1):
874 // 20: The activation function: A value indicating the activation function:
875 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
876 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
877 // If set to 0.0 then clipping is disabled.
878 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
879 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
880 ActivationFn activation;
881 float cellClip;
882 float projClip;
883 if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
884 !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
885 !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
886 {
887 return Fail("%s: Operation has invalid scalar inputs", __func__);
888 }
889
890 // Outputs:
891 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
892 // CIFG, or [batch_size, num_units * 3] without CIFG.
893 const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
894 if (!scratchBuffer)
895 {
896 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
897 }
898 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
899 const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
900 if (!outputStateOut)
901 {
902 return Fail("%s: Could not read output 1: outputStateOut", __func__);
903 }
904 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
905 const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
906 if (!cellStateOut)
907 {
908 return Fail("%s: Could not read output 2: cellStateOut", __func__);
909 }
910 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
911 // effectively the same as the current “output state (out)” value.
912 const Operand* output = GetOutputOperand(operation, 3, model);
913 if (!output)
914 {
915 return Fail("%s: Could not read output 3: output", __func__);
916 }
917
918 // set the params structure for the AddLstmLayer call
919 armnn::LstmInputParams params;
920 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
921 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
922 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
923 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
924 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
925 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
926 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
927 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
928 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
929 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
930 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
931 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
932 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
933 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
934 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
935 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
936 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
937
938 // set the layer descriptor
939 armnn::LstmDescriptor desc;
940 desc.m_ActivationFunc = activation;
941 desc.m_ClippingThresCell = cellClip;
942 desc.m_ClippingThresProj = projClip;
943 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
944 params.m_RecurrentToInputWeights == nullptr ||
945 params.m_InputGateBias == nullptr);
946 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
947 params.m_CellToOutputWeights != nullptr);
948 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
949
950 // validate the optional input groups
951 if (desc.m_CifgEnabled &&
952 (params.m_InputToInputWeights != nullptr ||
953 params.m_RecurrentToInputWeights != nullptr ||
954 params.m_InputGateBias != nullptr))
955 {
956 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
957 " and input gate bias must be provided", __func__);
958 }
959
960 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
961 {
962 return Fail("%s: projection bias should not be provided without projection weights", __func__);
963 }
964
965 if (desc.m_PeepholeEnabled &&
966 (params.m_CellToForgetWeights == nullptr ||
967 params.m_CellToOutputWeights == nullptr ||
968 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
969 {
970 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
971 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
972 }
973
974 // Check if the layer is supported
975 // Inputs
976 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
977 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
978 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
979
980 // Outputs
981 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
982 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
983 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
984 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
985
986 // Basic parameters
987 const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
988 const armnn::TensorInfo& inputToCellWeights = params.m_InputToCellWeights->GetInfo();
989 const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
990 const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
991 const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
992 const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
993 const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
994 const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
995 const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
996
997 //Optional parameters
998 const armnn::TensorInfo* inputToInputWeights = nullptr;
999 const armnn::TensorInfo* recurrentToInputWeights = nullptr;
1000 const armnn::TensorInfo* cellToInputWeights = nullptr;
1001 const armnn::TensorInfo* inputGateBias = nullptr;
1002 const armnn::TensorInfo* projectionWeights = nullptr;
1003 const armnn::TensorInfo* projectionBias = nullptr;
1004 const armnn::TensorInfo* cellToForgetWeights = nullptr;
1005 const armnn::TensorInfo* cellToOutputWeights = nullptr;
1006
1007 if(!desc.m_CifgEnabled)
1008 {
1009 inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1010 recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1011 if (params.m_CellToInputWeights != nullptr)
1012 {
1013 cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1014 }
1015 inputGateBias = &(params.m_InputGateBias->GetInfo());
1016 }
1017
1018 if(desc.m_ProjectionEnabled)
1019 {
1020 projectionWeights = &(params.m_ProjectionWeights->GetInfo());
1021 if (params.m_ProjectionBias != nullptr)
1022 {
1023 projectionBias = &(params.m_ProjectionBias->GetInfo());
1024 }
1025 }
1026
1027 if(desc.m_PeepholeEnabled)
1028 {
1029 cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1030 cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1031 }
1032
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001033 if (!IsLayerSupportedForAnyBackend(__func__,
1034 armnn::IsLstmSupported,
1035 data.m_Backends,
1036 inputInfo,
1037 outputStateInInfo,
1038 cellStateInInfo,
1039 scratchBufferInfo,
1040 outputStateOutInfo,
1041 cellStateOutInfo,
1042 outputInfo,
1043 desc,
1044 inputToForgetWeights,
1045 inputToCellWeights,
1046 inputToOutputWeights,
1047 recurrentToForgetWeights,
1048 recurrentToCellWeights,
1049 recurrentToOutputWeights,
1050 forgetGateBias,
1051 cellBias,
1052 outputGateBias,
1053 inputToInputWeights,
1054 recurrentToInputWeights,
1055 cellToInputWeights,
1056 inputGateBias,
1057 projectionWeights,
1058 projectionBias,
1059 cellToForgetWeights,
1060 cellToOutputWeights))
arovir01b0717b52018-09-05 17:03:25 +01001061 {
1062 return false;
1063 }
1064
1065 // Add the layer
1066 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1067
1068 input.Connect(layer->GetInputSlot(0));
1069 outputStateIn.Connect(layer->GetInputSlot(1));
1070 cellStateIn.Connect(layer->GetInputSlot(2));
1071
1072 return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
1073 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
1074 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
1075 SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
1076}
1077
1078bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
1079{
1080 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1081 if (!input.IsValid())
1082 {
1083 return Fail("%s: Operation has invalid inputs", __func__);
1084 }
1085
1086 const Operand* output = GetOutputOperand(operation, 0, model);
1087 if (!output)
1088 {
1089 return Fail("%s: Could not read output 0", __func__);
1090 }
1091
1092 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1093 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1094
Matteo Martincigh58f71092018-09-25 15:58:52 +01001095 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001096 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001097
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001098 if (!IsLayerSupportedForAnyBackend(__func__,
1099 armnn::IsL2NormalizationSupported,
1100 data.m_Backends,
1101 inputInfo,
1102 outputInfo,
1103 desc))
arovir01b0717b52018-09-05 17:03:25 +01001104 {
1105 return false;
1106 }
1107
Matteo Martincigh58f71092018-09-25 15:58:52 +01001108 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001109 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001110 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001111
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001112 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001113}
1114
1115bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1116{
1117 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
1118}
1119
1120bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1121{
1122 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
1123}
1124
1125bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1126{
1127 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1128 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1129
1130 if (!input0.IsValid() || !input1.IsValid())
1131 {
1132 return Fail("%s: Operation has invalid inputs", __func__);
1133 }
1134
1135 // The FuseActivation parameter is always the input index 2
1136 // and it should be optional
1137 ActivationFn activationFunction;
1138 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1139 {
1140 return Fail("%s: Operation has invalid inputs", __func__);
1141 }
1142
1143 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1144
1145 if (outputOperand == nullptr)
1146 {
1147 return false;
1148 }
1149
1150 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1151
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001152 if (!IsLayerSupportedForAnyBackend(__func__,
1153 armnn::IsMultiplicationSupported,
1154 data.m_Backends,
1155 input0.GetTensorInfo(),
1156 input1.GetTensorInfo(),
1157 outInfo))
arovir01b0717b52018-09-05 17:03:25 +01001158 {
1159 return false;
1160 }
1161
1162 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1163 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1164
1165 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1166 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1167
1168 if (endLayer != nullptr)
1169 {
1170 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
1171 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
1172 }
1173 else
1174 {
1175 return Fail("%s: ProcessActivation failed", __func__);
1176 }
1177}
1178
1179bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1180{
1181 armnn::ActivationDescriptor desc;
1182 desc.m_Function = armnn::ActivationFunction::ReLu;
1183
1184 return ConvertToActivation(operation, __func__, desc, model, data);
1185}
1186
1187bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1188{
1189 armnn::ActivationDescriptor desc;
1190 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1191 desc.m_A = 1.0f;
1192 desc.m_B = -1.0f;
1193
1194 return ConvertToActivation(operation, __func__, desc, model, data);
1195}
1196
1197bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1198{
1199 armnn::ActivationDescriptor desc;
1200 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1201 desc.m_A = 6.0f;
1202
1203 return ConvertToActivation(operation, __func__, desc, model, data);
1204}
1205
1206bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1207{
1208 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1209 if (!input.IsValid())
1210 {
1211 return Fail("%s: Operation has invalid inputs", __func__);
1212 }
1213
1214 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1215 if (!outputOperand)
1216 {
1217 return Fail("%s: Operation has no outputs", __func__);
1218 }
1219
1220 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1221
1222 armnn::SoftmaxDescriptor desc;
1223 if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
1224 {
1225 return Fail("%s: Operation has invalid inputs", __func__);
1226 }
1227
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001228 if (!IsLayerSupportedForAnyBackend(__func__,
1229 armnn::IsSoftmaxSupported,
1230 data.m_Backends,
1231 input.GetTensorInfo(),
1232 outInfo,
1233 desc))
arovir01b0717b52018-09-05 17:03:25 +01001234 {
1235 return false;
1236 }
1237
1238 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1239 assert(layer != nullptr);
1240 input.Connect(layer->GetInputSlot(0));
1241
1242 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1243}
1244
1245bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1246{
1247 armnn::ActivationDescriptor desc;
1248 desc.m_Function = armnn::ActivationFunction::TanH;
1249 desc.m_A = 1.0f; // android nn does not support tanH parameters
1250 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1251
1252 return ConvertToActivation(operation, __func__, desc, model, data);
1253}
1254
1255bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1256{
1257 const Operand* inputOperand = GetInputOperand(operation, 0, model);
1258 const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
1259 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1260
1261 if (inputOperand == nullptr
1262 || requestedShapeOperand == nullptr
1263 || outputOperand == nullptr)
1264 {
1265 return Fail("%s: Operation has invalid inputs", __func__);
1266 }
1267
1268
1269 if (requestedShapeOperand->dimensions.size() != 1)
1270 {
1271 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1272 __func__, requestedShapeOperand->dimensions.size());
1273 }
1274
1275 std::vector<int32_t> targetDimensions;
1276 if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
1277 {
1278 return Fail("%s: Could not read values of input 1", __func__);
1279 }
1280
1281 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1282
1283 Shape requestedShape;
1284 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1285 // function that resolves these values into a fully specified tensor shape.
1286 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1287 {
1288 return Fail("%s: Failed to resolve the requested shape", __func__);
1289 }
1290
1291 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1292 if (!SameShape(requestedShape, outputOperandShape))
1293 {
1294 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1295 }
1296
1297 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1298 if (!input.IsValid())
1299 {
1300 return Fail("%s: Could not read input 0", __func__);
1301 }
1302
arovir01b0717b52018-09-05 17:03:25 +01001303 armnn::ReshapeDescriptor reshapeDescriptor;
1304 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1305 requestedShape.dimensions.data());
1306
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001307 if (!IsLayerSupportedForAnyBackend(__func__,
1308 armnn::IsReshapeSupported,
1309 data.m_Backends,
1310 input.GetTensorInfo(),
1311 reshapeDescriptor))
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001312 {
1313 return false;
1314 }
1315
arovir01b0717b52018-09-05 17:03:25 +01001316 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1317 assert(layer != nullptr);
1318 input.Connect(layer->GetInputSlot(0));
1319
1320 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1321}
1322
1323bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1324{
1325 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1326 if (!input.IsValid())
1327 {
1328 return Fail("%s: Could not read input 0", __func__);
1329 }
1330
1331 const Operand* output = GetOutputOperand(operation, 0, model);
1332 if (!output)
1333 {
1334 return Fail("%s: Could not read output 0", __func__);
1335 }
1336
1337 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1338 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1339
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001340 armnn::ResizeBilinearDescriptor desc;
1341 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001342
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001343 if (!IsLayerSupportedForAnyBackend(__func__,
1344 armnn::IsResizeBilinearSupported,
1345 data.m_Backends,
1346 inputInfo,
1347 outputInfo))
arovir01b0717b52018-09-05 17:03:25 +01001348 {
1349 return false;
1350 }
1351
arovir01b0717b52018-09-05 17:03:25 +01001352
1353 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
1354 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
1355 {
1356 return Fail("%s: Operation has invalid inputs", __func__);
1357 }
1358
1359 armnn::IConnectableLayer* layer = data.m_Network->AddResizeBilinearLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001360
arovir01b0717b52018-09-05 17:03:25 +01001361 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001362
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001363 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1364 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001365
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001366 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001367
1368}
1369
1370} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001371} // namespace armnn_driver