blob: fa29f86075c2c35886ad787801f42da3420d5427 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
arovir015602b192018-10-04 16:15:02 +010011
arovir01b0717b52018-09-05 17:03:25 +010012namespace armnn_driver
13{
14namespace hal_1_0
15{
16
17bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
18{
19 switch (operation.type)
20 {
21 case V1_0::OperationType::ADD:
22 return ConvertAdd(operation, model, data);
23 case V1_0::OperationType::AVERAGE_POOL_2D:
24 return ConvertAveragePool2d(operation, model, data);
25 case V1_0::OperationType::CONCATENATION:
26 return ConvertConcatenation(operation, model, data);
27 case V1_0::OperationType::CONV_2D:
28 return ConvertConv2d(operation, model, data);
29 case V1_0::OperationType::DEPTHWISE_CONV_2D:
30 return ConvertDepthwiseConv2d(operation, model, data);
31 case V1_0::OperationType::FLOOR:
32 return ConvertFloor(operation, model, data);
33 case V1_0::OperationType::FULLY_CONNECTED:
34 return ConvertFullyConnected(operation, model, data);
35 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
36 return ConvertLocalResponseNormalization(operation, model, data);
37 case V1_0::OperationType::LOGISTIC:
38 return ConvertLogistic(operation, model, data);
39 case V1_0::OperationType::LSTM:
40 return ConvertLstm(operation, model, data);
41 case V1_0::OperationType::L2_NORMALIZATION:
42 return ConvertL2Normalization(operation, model, data);
43 case V1_0::OperationType::L2_POOL_2D:
44 return ConvertL2Pool2d(operation, model, data);
45 case V1_0::OperationType::MAX_POOL_2D:
46 return ConvertMaxPool2d(operation, model, data);
47 case V1_0::OperationType::MUL:
48 return ConvertMul(operation, model, data);
49 case V1_0::OperationType::RELU:
50 return ConvertReLu(operation, model, data);
51 case V1_0::OperationType::RELU1:
52 return ConvertReLu1(operation, model, data);
53 case V1_0::OperationType::RELU6:
54 return ConvertReLu6(operation, model, data);
55 case V1_0::OperationType::SOFTMAX:
56 return ConvertSoftmax(operation, model, data);
57 case V1_0::OperationType::TANH:
58 return ConvertTanH(operation, model, data);
59 case V1_0::OperationType::RESHAPE:
60 return ConvertReshape(operation, model, data);
61 case V1_0::OperationType::RESIZE_BILINEAR:
62 return ConvertResizeBilinear(operation, model, data);
63 default:
64 return Fail("%s: Operation type %s not supported in ArmnnDriver",
65 __func__, toString(operation.type).c_str());
66 }
67}
68
69bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
70{
71 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
72 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
73
74 if (!input0.IsValid() || !input1.IsValid())
75 {
76 return Fail("%s: Operation has invalid inputs", __func__);
77 }
78
79 // The FuseActivation parameter is always the input index 2
80 // and it should be optional
81 ActivationFn activationFunction;
82 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
83 {
84 return Fail("%s: Operation has invalid inputs", __func__);
85 }
86
87 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
88 if (!outputOperand)
89 {
90 return false;
91 }
92
93 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
94
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010095 if (!IsLayerSupportedForAnyBackend(__func__,
96 armnn::IsAdditionSupported,
97 data.m_Backends,
98 input0.GetTensorInfo(),
99 input1.GetTensorInfo(),
100 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100101 {
102 return false;
103 }
104
105 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
106 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
107
108 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
109 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
110
111 if (endLayer != nullptr)
112 {
113 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
114 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
115 }
116 else
117 {
118 return Fail("%s: ProcessActivation failed", __func__);
119 }
120}
121
122bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
123{
124 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
125}
126
127bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
128{
129 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
130 if (operation.inputs.size() <= 1)
131 {
132 return Fail("%s: Operation has insufficient arguments", __func__);
133 }
134
135 // Get inputs and outputs
136 const std::size_t numInputTensors = operation.inputs.size() - 1;
137
138 int32_t concatDim;
139 if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
140 {
141 return Fail("%s: Operation has invalid inputs", __func__);
142 }
143
144 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
145 if (!outputOperand)
146 {
147 return Fail("%s: Operation has no outputs", __func__);
148 }
149
150
151 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
152 armnn::TensorShape outputShape = outputInfo.GetShape();
153
154 //
155 // handle negative concat dims along the lines of tensorflow as described here:
156 // https://www.tensorflow.org/api_docs/python/tf/concat
157 // "negative axis refers to axis + rank(values)-th dimension"
158 //
159 if (concatDim < 0)
160 {
161 concatDim += outputShape.GetNumDimensions();
162 }
163
164 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
165 {
166 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
167 }
168
169 std::vector<LayerInputHandle> inputHandles;
170 std::vector<armnn::TensorShape> inputShapes;
171
172 inputHandles.reserve(numInputTensors);
173 inputShapes.reserve(numInputTensors);
174
175 bool inputsHaveBeenReshaped = false;
176 unsigned int tensorDimensionsAdded = 0;
177
178 for (uint32_t i = 0; i < numInputTensors; ++i)
179 {
180 const Operand* const operand = GetInputOperand(operation, i, model);
181 if (!operand)
182 {
183 return Fail("%s: Operation has invalid inputs", __func__);
184 }
185
186 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
187 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
188
189 if (operandShape.GetNumDimensions() == 0)
190 {
191 return Fail("%s: Operands with rank 0 are not supported", __func__);
192 }
193
194 if (RequiresReshape(operandShape))
195 {
196 inputsHaveBeenReshaped = true;
197
198 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
199
200 // Expand the tensor to three dimensions
201 if (operandShape.GetNumDimensions() == 2)
202 {
203 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
204 tensorDimensionsAdded = 1;
205 }
206 else
207 {
208 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
209 tensorDimensionsAdded = 2;
210 }
211
212 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
213 *data.m_Network,
214 operandInputHandle,
215 reshapeInfo
216 );
217
218 // Point to the reshape operation rather then the input operation
219 operandShape = reshapeInfo.GetShape();
220 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
221 }
222
223 inputShapes.emplace_back(operandShape);
224 inputHandles.emplace_back(operandInputHandle);
225
226 if (!inputHandles.back().IsValid())
227 {
228 return Fail("%s: Operation has invalid inputs", __func__);
229 }
230 }
231
232 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
233
234 if (inputsHaveBeenReshaped)
235 {
236 // Adjust the concatenation dimension by the amount of dimensions added (if any)
237 concatDim += tensorDimensionsAdded;
238
239 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
240 if (tensorDimensionsAdded == 1)
241 {
242 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
243 }
244 else if (tensorDimensionsAdded == 2)
245 {
narpra01f176d5a2018-11-18 20:17:48 +0000246 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100247 }
248 }
249
narpra01f176d5a2018-11-18 20:17:48 +0000250 // Check if permutations is required and get the pair of permutations required for the concatenation.
251 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100252 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
253 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
254
narpra01f176d5a2018-11-18 20:17:48 +0000255 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100256
narpra01f176d5a2018-11-18 20:17:48 +0000257 if (needPermute)
258 {
259 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
260 }
261
arovir01b0717b52018-09-05 17:03:25 +0100262 outputInfo.SetShape(outputShape);
263
264 // this is no-op for identity swizzles, otherwise it replaces both
265 // the handles and shapes with the swizzled layer output handles and shapes
266 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
267
268 // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
269 armnn::OriginsDescriptor mergerDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000270
arovir01b0717b52018-09-05 17:03:25 +0100271 try
272 {
narpra01f176d5a2018-11-18 20:17:48 +0000273 // The merger descriptor is always created across the only supported concat dimension
274 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100275 mergerDescriptor =
276 armnn::CreateMergerDescriptorForConcatenation(
277 inputShapes.begin(), inputShapes.end(), concatDim);
278 }
279 catch (const armnn::Exception& error)
280 {
281 return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
282 }
283
284 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000285 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100286 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
287 {
288 return Fail("%s: Error validating the output shape for concat", __func__);
289 }
290
291 std::vector<const armnn::TensorInfo*> inputTensorInfos;
292 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
293 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100294 if (!IsLayerSupportedForAnyBackend(__func__,
Jim Flynn073d7a32019-05-13 13:52:56 +0100295 armnn::IsConcatSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100296 data.m_Backends,
297 inputTensorInfos,
298 outputInfo,
299 mergerDescriptor))
arovir01b0717b52018-09-05 17:03:25 +0100300 {
301 return false;
302 }
303
Jim Flynn073d7a32019-05-13 13:52:56 +0100304 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(mergerDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100305 assert(layer != nullptr);
306 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
307
308 // Connect inputs to the layer
309 const int numInputSlots = layer->GetNumInputSlots();
310 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
311 for (int i = 0; i < numInputSlots; ++i)
312 {
313 // connect the input directly to the merge (concat) layer
314 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
315 }
316
narpra01f176d5a2018-11-18 20:17:48 +0000317 if (needPermute)
318 {
319 // Add permutation layer and connect the output to it, the permutation becomes the output layer
320 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
321 layer->GetOutputSlot(0),
322 permutationPair.second);
323 layer = &deswizzleLayer;
324 }
arovir01b0717b52018-09-05 17:03:25 +0100325
326 if (inputsHaveBeenReshaped)
327 {
328 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
329
330 // Undo the reshape knowing the amount of dimensions added
331 if (tensorDimensionsAdded == 1)
332 {
333 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
334 afterConcatInfo.GetShape()[2] }));
335 }
336 else if (tensorDimensionsAdded == 2)
337 {
narpra01f176d5a2018-11-18 20:17:48 +0000338 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100339 }
340
341 layer = &AddReshapeLayer(
342 *data.m_Network,
343 layer->GetOutputSlot(0),
344 afterConcatInfo
345 );
346 }
347
348 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
349}
350
351bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
352{
353 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
354 if (!input.IsValid())
355 {
356 return Fail("%s: Operation has invalid inputs", __func__);
357 }
358
359 const Operand* output = GetOutputOperand(operation, 0, model);
360 if (!output)
361 {
362 return Fail("%s: Could not read output 0", __func__);
363 }
364
365 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
366 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
367
arovir01b0717b52018-09-05 17:03:25 +0100368 // ArmNN does not currently support non-fixed weights or bias
narpra01fb60a562018-10-30 15:46:01 +0000369 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100370 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
371
372 if (!weightsPin.IsValid() || !biasPin.IsValid())
373 {
374 return Fail("%s: Operation has invalid inputs", __func__);
375 }
376
377 armnn::ConstTensor weights = weightsPin.GetConstTensor();
378 armnn::ConstTensor bias = biasPin.GetConstTensor();
narpra01fb60a562018-10-30 15:46:01 +0000379 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
arovir01b0717b52018-09-05 17:03:25 +0100380
381 armnn::Convolution2dDescriptor desc;
narpra01fb60a562018-10-30 15:46:01 +0000382 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100383 ActivationFn activation;
384
385 if (operation.inputs.size() == 10)
386 {
387 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
388 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
389 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
390 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
391 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
392 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
393 !GetInputActivationFunction(operation, 9, activation, model, data))
394 {
395 return Fail("%s: Operation has invalid inputs", __func__);
396 }
397 }
398 else if (operation.inputs.size() == 7)
399 {
400 android::nn::PaddingScheme paddingScheme;
401 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
402 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
403 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
404 !GetInputActivationFunction(operation, 6, activation, model, data))
405 {
406 return Fail("%s: Operation has invalid inputs", __func__);
407 }
408
narpra01fb60a562018-10-30 15:46:01 +0000409 const uint32_t kernelX = weights.GetShape()[2];
410 const uint32_t kernelY = weights.GetShape()[1];
411 const uint32_t inputX = inputInfo.GetShape()[2];
412 const uint32_t inputY = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100413
414 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
415 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
416 }
417 else
418 {
419 return Fail("%s: Unsupported number of operation inputs", __func__);
420 }
421
422 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100423 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100424
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100425 if (!IsLayerSupportedForAnyBackend(__func__,
426 armnn::IsConvolution2dSupported,
427 data.m_Backends,
428 inputInfo,
429 outputInfo,
430 desc,
431 weights.GetInfo(),
432 biases))
arovir01b0717b52018-09-05 17:03:25 +0100433 {
434 return false;
435 }
436
437 armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc, weights, bias);
arovir01b0717b52018-09-05 17:03:25 +0100438
narpra01fb60a562018-10-30 15:46:01 +0000439 if (!startLayer)
arovir01b0717b52018-09-05 17:03:25 +0100440 {
narpra01fb60a562018-10-30 15:46:01 +0000441 return Fail("%s: AddConvolution2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100442 }
narpra01fb60a562018-10-30 15:46:01 +0000443
444 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
445
446 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100447 {
448 return Fail("%s: ProcessActivation failed", __func__);
449 }
narpra01fb60a562018-10-30 15:46:01 +0000450
451 input.Connect(startLayer->GetInputSlot(0));
452
453 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100454}
455
456bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
457{
458 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
459 if (!input.IsValid())
460 {
461 return Fail("%s: Operation has invalid inputs", __func__);
462 }
463
464 const Operand* output = GetOutputOperand(operation, 0, model);
465 if (!output)
466 {
467 return Fail("%s: Could not read output 0", __func__);
468 }
469
470 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
471 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
472
arovir01b0717b52018-09-05 17:03:25 +0100473 // ArmNN does not currently support non-fixed weights or bias
474
475 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
arovir01b0717b52018-09-05 17:03:25 +0100476 const Operand* weightsOperand = GetInputOperand(operation, 1, model);
477
478 if (weightsOperand == nullptr)
479 {
480 return Fail("%s: Operand is invalid", __func__);
481 }
482
483 // Reinterpret weight data as [ H, W, I, M ]
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000484 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
485 weightsOperand->dimensions[2],
arovir01b0717b52018-09-05 17:03:25 +0100486 inputInfo.GetShape()[3],
487 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
488
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000489 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
490 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
James Conroy6bf1cf02018-10-12 14:13:18 +0100491
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000492 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
493 HWIMToMIHW, &weightsShape);
arovir01b0717b52018-09-05 17:03:25 +0100494
495 // Bias is a 1D tensor
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000496 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100497
498 if (!weightsPin.IsValid() || !biasPin.IsValid())
499 {
500 return Fail("%s: Operation has invalid inputs", __func__);
501 }
502
503 armnn::ConstTensor weights = weightsPin.GetConstTensor();
504 armnn::ConstTensor bias = biasPin.GetConstTensor();
James Conroy6bf1cf02018-10-12 14:13:18 +0100505 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
arovir01b0717b52018-09-05 17:03:25 +0100506
507 armnn::DepthwiseConvolution2dDescriptor desc;
James Conroy6bf1cf02018-10-12 14:13:18 +0100508 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100509 ActivationFn activation;
510
511 if (operation.inputs.size() == 11)
512 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100513 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
514 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
515 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
516 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
517 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
518 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100519 !GetInputActivationFunction(operation, 10, activation, model, data))
520 {
521 return Fail("%s: Operation has invalid inputs", __func__);
522 }
523 }
524 else if (operation.inputs.size() == 8)
525 {
526 android::nn::PaddingScheme paddingScheme;
James Conroy6bf1cf02018-10-12 14:13:18 +0100527 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
528 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
529 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100530 !GetInputActivationFunction(operation, 7, activation, model, data))
531 {
532 return Fail("%s: Operation has invalid inputs", __func__);
533 }
534
Matteo Martincigh361ccc82018-12-18 09:32:02 +0000535 const uint32_t kernelX = weights.GetShape()[3];
536 const uint32_t kernelY = weights.GetShape()[2];
James Conroy6bf1cf02018-10-12 14:13:18 +0100537 const uint32_t inputX = inputInfo.GetShape()[2];
538 const uint32_t inputY = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100539
540 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
541 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
542 }
543 else
544 {
545 return Fail("%s: Unsupported number of operation inputs", __func__);
546 }
547
548 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100549 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100550
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100551 if (!IsLayerSupportedForAnyBackend(__func__,
552 armnn::IsDepthwiseConvolutionSupported,
553 data.m_Backends,
554 inputInfo,
555 outputInfo,
556 desc,
557 weights.GetInfo(),
558 biases))
arovir01b0717b52018-09-05 17:03:25 +0100559 {
560 return false;
561 }
562
563 armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, bias);
James Conroy6bf1cf02018-10-12 14:13:18 +0100564 if (!startLayer)
arovir01b0717b52018-09-05 17:03:25 +0100565 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100566 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100567 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100568
569 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
570 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100571 {
572 return Fail("%s: ProcessActivation failed", __func__);
573 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100574
575 input.Connect(startLayer->GetInputSlot(0));
576
577 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100578}
579
580bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
581{
582 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
583 if (!input.IsValid())
584 {
585 return Fail("%s: Operation has invalid inputs", __func__);
586 }
587
588 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
589 if (!outputOperand)
590 {
591 return Fail("%s: Operation has invalid outputs", __func__);
592 }
593
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100594 if (!IsLayerSupportedForAnyBackend(__func__,
595 armnn::IsFloorSupported,
596 data.m_Backends,
597 input.GetTensorInfo(),
598 GetTensorInfoForOperand(*outputOperand)))
arovir01b0717b52018-09-05 17:03:25 +0100599 {
600 return false;
601 }
602
603 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
604 assert(layer != nullptr);
605 input.Connect(layer->GetInputSlot(0));
606
607 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
608}
609
610bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
611{
612 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
613 if (!input.IsValid())
614 {
615 return Fail("%s: Operation has invalid inputs", __func__);
616 }
617
618 const Operand* output = GetOutputOperand(operation, 0, model);
619 if (!output)
620 {
621 return Fail("%s: Could not read output 0", __func__);
622 }
623
624 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
625 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
626
627 // ArmNN does not currently support non-fixed weights or bias
628 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D
629 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D
630
631 if (!weightsPin.IsValid() || !biasPin.IsValid())
632 {
633 return Fail("%s: Operation has invalid inputs", __func__);
634 }
635
636 armnn::ConstTensor weights = weightsPin.GetConstTensor();
637 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100638 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100639
640 try
arovir01b0717b52018-09-05 17:03:25 +0100641 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100642 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
643 } catch (const std::exception &e) {
644 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100645 }
646
647 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
648 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
649
650 ActivationFn activationFunction;
651 if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
652 {
653 return Fail("%s: Operation has invalid inputs", __func__);
654 }
655
656 armnn::FullyConnectedDescriptor desc;
657 desc.m_TransposeWeightMatrix = true;
658 desc.m_BiasEnabled = true;
659
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100660 if (!IsLayerSupportedForAnyBackend(__func__,
661 armnn::IsFullyConnectedSupported,
662 data.m_Backends,
663 reshapedInfo,
664 outputInfo,
665 weights.GetInfo(),
666 bias.GetInfo(),
667 desc))
arovir01b0717b52018-09-05 17:03:25 +0100668 {
669 return false;
670 }
671
672 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc, weights, bias);
673 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
674
675 if (endLayer != nullptr)
676 {
677 if (inputInfo.GetNumDimensions() > 2U)
678 {
679 armnn::ReshapeDescriptor reshapeDescriptor;
680 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
681
682 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
683 assert(reshapeLayer != nullptr);
684 input.Connect(reshapeLayer->GetInputSlot(0));
685 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
686 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
687 }
688 else
689 {
690 input.Connect(startLayer->GetInputSlot(0));
691 }
692
693 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
694 }
695 else
696 {
697 return Fail("%s: ProcessActivation failed", __func__);
698 }
699}
700
701bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
702 const Model& model,
703 ConversionData& data)
704{
705 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
706 if (!input.IsValid())
707 {
708 return Fail("%s: Operation has invalid inputs", __func__);
709 }
710
711 const Operand* output = GetOutputOperand(operation, 0, model);
712 if (!output)
713 {
714 return Fail("%s: Could not read output 0", __func__);
715 }
716
narpra012fb804a2018-10-22 14:52:32 +0100717 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100718 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
719
arovir01b0717b52018-09-05 17:03:25 +0100720 armnn::NormalizationDescriptor descriptor;
721
narpra012fb804a2018-10-22 14:52:32 +0100722 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100723 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100724 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100725
726 if (!input.IsValid() ||
727 !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
728 !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
729 !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
730 !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
731 {
732 return Fail("%s: Operation has invalid inputs", __func__);
733 }
734
735 // ArmNN expects normSize to be the full size of the normalization
736 // window rather than the radius as in AndroidNN.
737 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
738
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100739 if (!IsLayerSupportedForAnyBackend(__func__,
740 armnn::IsNormalizationSupported,
741 data.m_Backends,
742 inputInfo,
743 outputInfo,
744 descriptor))
arovir01b0717b52018-09-05 17:03:25 +0100745 {
746 return false;
747 }
748
749
750 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
751 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100752 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100753
narpra012fb804a2018-10-22 14:52:32 +0100754 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100755}
756
757bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
758{
759 armnn::ActivationDescriptor desc;
760 desc.m_Function = armnn::ActivationFunction::Sigmoid;
761
762 return ConvertToActivation(operation, __func__, desc, model, data);
763}
764
765bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
766{
767 // Inputs:
768 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
769 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
770 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
771 if (!input.IsValid())
772 {
773 return Fail("%s: Could not read input 0: input", __func__);
774 }
775 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
776 LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
777 if (!outputStateIn.IsValid())
778 {
779 return Fail("%s: Could not read input 18: outputStateIn", __func__);
780 }
781 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
782 LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
783 if (!cellStateIn.IsValid())
784 {
785 return Fail("%s: Could not read input 19: cellStateIn", __func__);
786 }
787
788 // Get the mandatory input tensors:
789 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
790 // [num_units, input_size].
791 const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
792 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
793 const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3, model, data);
794 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
795 // [num_units, input_size].
796 const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4, model, data);
797 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
798 // [num_units, output_size].
799 const ConstTensorPin recurrentToForgetWeightsPin =
800 ConvertOperationInputToConstTensorPin(operation, 6, model, data);
801 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
802 // [num_units, output_size].
803 const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7, model, data);
804 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
805 // [num_units, output_size].
806 const ConstTensorPin recurrentToOutputWeightsPin =
807 ConvertOperationInputToConstTensorPin(operation, 8, model, data);
808 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
809 const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13, model, data);
810 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
811 const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14, model, data);
812 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
813 const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15, model, data);
814
815 if (!inputToForgetWeightsPin.IsValid() ||
816 !inputToCellWeightsPin.IsValid() ||
817 !inputToOutputWeightsPin.IsValid() ||
818 !recurrentToForgetWeightsPin.IsValid() ||
819 !recurrentToCellWeightsPin.IsValid() ||
820 !recurrentToOutputWeightsPin.IsValid() ||
821 !forgetGateBiasPin.IsValid() ||
822 !cellBiasPin.IsValid() ||
823 !outputGateBiasPin.IsValid())
824 {
825 return Fail("%s: Operation has invalid tensor inputs", __func__);
826 }
827
828 // Get the optional input tensors:
829 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
830 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
David Monahanecd7ca62019-02-22 14:29:51 +0000831 const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data,
832 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100833 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
834 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
835 // “num_units”), or the second dimension of the “projection_weights”, if defined.
David Monahanecd7ca62019-02-22 14:29:51 +0000836 const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5, model, data,
837 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100838 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000839 const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9, model, data,
840 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100841 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000842 const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10, model, data,
843 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100844 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000845 const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11, model, data,
846 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100847 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000848 const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12, model, data,
849 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100850 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
851 // [output_size, num_units].
David Monahanecd7ca62019-02-22 14:29:51 +0000852 const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16, model, data,
853 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100854 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
David Monahanecd7ca62019-02-22 14:29:51 +0000855 const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17, model, data,
856 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100857
858 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
859 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
860 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
861 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
862 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
863 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
864 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
865 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
866 {
867 return Fail("%s: Operation has invalid tensor inputs", __func__);
868 }
869
870 // Get the mandatory input scalars (actually 1-D tensors of size 1):
871 // 20: The activation function: A value indicating the activation function:
872 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
873 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
874 // If set to 0.0 then clipping is disabled.
875 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
876 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
877 ActivationFn activation;
878 float cellClip;
879 float projClip;
880 if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
881 !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
882 !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
883 {
884 return Fail("%s: Operation has invalid scalar inputs", __func__);
885 }
886
887 // Outputs:
888 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
889 // CIFG, or [batch_size, num_units * 3] without CIFG.
890 const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
891 if (!scratchBuffer)
892 {
893 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
894 }
895 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
896 const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
897 if (!outputStateOut)
898 {
899 return Fail("%s: Could not read output 1: outputStateOut", __func__);
900 }
901 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
902 const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
903 if (!cellStateOut)
904 {
905 return Fail("%s: Could not read output 2: cellStateOut", __func__);
906 }
907 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
908 // effectively the same as the current “output state (out)” value.
909 const Operand* output = GetOutputOperand(operation, 3, model);
910 if (!output)
911 {
912 return Fail("%s: Could not read output 3: output", __func__);
913 }
914
915 // set the params structure for the AddLstmLayer call
916 armnn::LstmInputParams params;
917 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
918 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
919 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
920 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
921 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
922 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
923 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
924 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
925 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
926 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
927 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
928 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
929 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
930 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
931 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
932 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
933 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
934
935 // set the layer descriptor
936 armnn::LstmDescriptor desc;
937 desc.m_ActivationFunc = activation;
938 desc.m_ClippingThresCell = cellClip;
939 desc.m_ClippingThresProj = projClip;
940 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
941 params.m_RecurrentToInputWeights == nullptr ||
942 params.m_InputGateBias == nullptr);
943 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
944 params.m_CellToOutputWeights != nullptr);
945 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
946
947 // validate the optional input groups
948 if (desc.m_CifgEnabled &&
949 (params.m_InputToInputWeights != nullptr ||
950 params.m_RecurrentToInputWeights != nullptr ||
951 params.m_InputGateBias != nullptr))
952 {
953 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
954 " and input gate bias must be provided", __func__);
955 }
956
957 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
958 {
959 return Fail("%s: projection bias should not be provided without projection weights", __func__);
960 }
961
962 if (desc.m_PeepholeEnabled &&
963 (params.m_CellToForgetWeights == nullptr ||
964 params.m_CellToOutputWeights == nullptr ||
965 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
966 {
967 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
968 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
969 }
970
971 // Check if the layer is supported
972 // Inputs
973 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
974 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
975 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
976
977 // Outputs
978 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
979 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
980 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
981 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
982
983 // Basic parameters
984 const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
985 const armnn::TensorInfo& inputToCellWeights = params.m_InputToCellWeights->GetInfo();
986 const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
987 const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
988 const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
989 const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
990 const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
991 const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
992 const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
993
994 //Optional parameters
995 const armnn::TensorInfo* inputToInputWeights = nullptr;
996 const armnn::TensorInfo* recurrentToInputWeights = nullptr;
997 const armnn::TensorInfo* cellToInputWeights = nullptr;
998 const armnn::TensorInfo* inputGateBias = nullptr;
999 const armnn::TensorInfo* projectionWeights = nullptr;
1000 const armnn::TensorInfo* projectionBias = nullptr;
1001 const armnn::TensorInfo* cellToForgetWeights = nullptr;
1002 const armnn::TensorInfo* cellToOutputWeights = nullptr;
1003
1004 if(!desc.m_CifgEnabled)
1005 {
1006 inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1007 recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1008 if (params.m_CellToInputWeights != nullptr)
1009 {
1010 cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1011 }
1012 inputGateBias = &(params.m_InputGateBias->GetInfo());
1013 }
1014
1015 if(desc.m_ProjectionEnabled)
1016 {
1017 projectionWeights = &(params.m_ProjectionWeights->GetInfo());
1018 if (params.m_ProjectionBias != nullptr)
1019 {
1020 projectionBias = &(params.m_ProjectionBias->GetInfo());
1021 }
1022 }
1023
1024 if(desc.m_PeepholeEnabled)
1025 {
1026 cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1027 cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1028 }
1029
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001030 if (!IsLayerSupportedForAnyBackend(__func__,
1031 armnn::IsLstmSupported,
1032 data.m_Backends,
1033 inputInfo,
1034 outputStateInInfo,
1035 cellStateInInfo,
1036 scratchBufferInfo,
1037 outputStateOutInfo,
1038 cellStateOutInfo,
1039 outputInfo,
1040 desc,
1041 inputToForgetWeights,
1042 inputToCellWeights,
1043 inputToOutputWeights,
1044 recurrentToForgetWeights,
1045 recurrentToCellWeights,
1046 recurrentToOutputWeights,
1047 forgetGateBias,
1048 cellBias,
1049 outputGateBias,
1050 inputToInputWeights,
1051 recurrentToInputWeights,
1052 cellToInputWeights,
1053 inputGateBias,
1054 projectionWeights,
1055 projectionBias,
1056 cellToForgetWeights,
1057 cellToOutputWeights))
arovir01b0717b52018-09-05 17:03:25 +01001058 {
1059 return false;
1060 }
1061
1062 // Add the layer
1063 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1064
1065 input.Connect(layer->GetInputSlot(0));
1066 outputStateIn.Connect(layer->GetInputSlot(1));
1067 cellStateIn.Connect(layer->GetInputSlot(2));
1068
1069 return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
1070 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
1071 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
1072 SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
1073}
1074
1075bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
1076{
1077 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1078 if (!input.IsValid())
1079 {
1080 return Fail("%s: Operation has invalid inputs", __func__);
1081 }
1082
1083 const Operand* output = GetOutputOperand(operation, 0, model);
1084 if (!output)
1085 {
1086 return Fail("%s: Could not read output 0", __func__);
1087 }
1088
1089 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1090 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1091
Matteo Martincigh58f71092018-09-25 15:58:52 +01001092 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001093 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001094
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001095 if (!IsLayerSupportedForAnyBackend(__func__,
1096 armnn::IsL2NormalizationSupported,
1097 data.m_Backends,
1098 inputInfo,
1099 outputInfo,
1100 desc))
arovir01b0717b52018-09-05 17:03:25 +01001101 {
1102 return false;
1103 }
1104
Matteo Martincigh58f71092018-09-25 15:58:52 +01001105 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001106 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001107 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001108
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001109 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001110}
1111
1112bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1113{
1114 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
1115}
1116
1117bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1118{
1119 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
1120}
1121
1122bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1123{
1124 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1125 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1126
1127 if (!input0.IsValid() || !input1.IsValid())
1128 {
1129 return Fail("%s: Operation has invalid inputs", __func__);
1130 }
1131
1132 // The FuseActivation parameter is always the input index 2
1133 // and it should be optional
1134 ActivationFn activationFunction;
1135 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1136 {
1137 return Fail("%s: Operation has invalid inputs", __func__);
1138 }
1139
1140 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1141
1142 if (outputOperand == nullptr)
1143 {
1144 return false;
1145 }
1146
1147 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1148
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001149 if (!IsLayerSupportedForAnyBackend(__func__,
1150 armnn::IsMultiplicationSupported,
1151 data.m_Backends,
1152 input0.GetTensorInfo(),
1153 input1.GetTensorInfo(),
1154 outInfo))
arovir01b0717b52018-09-05 17:03:25 +01001155 {
1156 return false;
1157 }
1158
1159 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1160 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1161
1162 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1163 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1164
1165 if (endLayer != nullptr)
1166 {
1167 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
1168 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
1169 }
1170 else
1171 {
1172 return Fail("%s: ProcessActivation failed", __func__);
1173 }
1174}
1175
1176bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1177{
1178 armnn::ActivationDescriptor desc;
1179 desc.m_Function = armnn::ActivationFunction::ReLu;
1180
1181 return ConvertToActivation(operation, __func__, desc, model, data);
1182}
1183
1184bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1185{
1186 armnn::ActivationDescriptor desc;
1187 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1188 desc.m_A = 1.0f;
1189 desc.m_B = -1.0f;
1190
1191 return ConvertToActivation(operation, __func__, desc, model, data);
1192}
1193
1194bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1195{
1196 armnn::ActivationDescriptor desc;
1197 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1198 desc.m_A = 6.0f;
1199
1200 return ConvertToActivation(operation, __func__, desc, model, data);
1201}
1202
1203bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1204{
1205 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1206 if (!input.IsValid())
1207 {
1208 return Fail("%s: Operation has invalid inputs", __func__);
1209 }
1210
1211 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1212 if (!outputOperand)
1213 {
1214 return Fail("%s: Operation has no outputs", __func__);
1215 }
1216
1217 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1218
1219 armnn::SoftmaxDescriptor desc;
1220 if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
1221 {
1222 return Fail("%s: Operation has invalid inputs", __func__);
1223 }
1224
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001225 if (!IsLayerSupportedForAnyBackend(__func__,
1226 armnn::IsSoftmaxSupported,
1227 data.m_Backends,
1228 input.GetTensorInfo(),
1229 outInfo,
1230 desc))
arovir01b0717b52018-09-05 17:03:25 +01001231 {
1232 return false;
1233 }
1234
1235 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1236 assert(layer != nullptr);
1237 input.Connect(layer->GetInputSlot(0));
1238
1239 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1240}
1241
1242bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1243{
1244 armnn::ActivationDescriptor desc;
1245 desc.m_Function = armnn::ActivationFunction::TanH;
1246 desc.m_A = 1.0f; // android nn does not support tanH parameters
1247 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1248
1249 return ConvertToActivation(operation, __func__, desc, model, data);
1250}
1251
1252bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1253{
1254 const Operand* inputOperand = GetInputOperand(operation, 0, model);
1255 const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
1256 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1257
1258 if (inputOperand == nullptr
1259 || requestedShapeOperand == nullptr
1260 || outputOperand == nullptr)
1261 {
1262 return Fail("%s: Operation has invalid inputs", __func__);
1263 }
1264
1265
1266 if (requestedShapeOperand->dimensions.size() != 1)
1267 {
1268 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1269 __func__, requestedShapeOperand->dimensions.size());
1270 }
1271
1272 std::vector<int32_t> targetDimensions;
1273 if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
1274 {
1275 return Fail("%s: Could not read values of input 1", __func__);
1276 }
1277
1278 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1279
1280 Shape requestedShape;
1281 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1282 // function that resolves these values into a fully specified tensor shape.
1283 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1284 {
1285 return Fail("%s: Failed to resolve the requested shape", __func__);
1286 }
1287
1288 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1289 if (!SameShape(requestedShape, outputOperandShape))
1290 {
1291 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1292 }
1293
1294 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1295 if (!input.IsValid())
1296 {
1297 return Fail("%s: Could not read input 0", __func__);
1298 }
1299
arovir01b0717b52018-09-05 17:03:25 +01001300 armnn::ReshapeDescriptor reshapeDescriptor;
1301 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1302 requestedShape.dimensions.data());
1303
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001304 if (!IsLayerSupportedForAnyBackend(__func__,
1305 armnn::IsReshapeSupported,
1306 data.m_Backends,
1307 input.GetTensorInfo(),
1308 reshapeDescriptor))
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001309 {
1310 return false;
1311 }
1312
arovir01b0717b52018-09-05 17:03:25 +01001313 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1314 assert(layer != nullptr);
1315 input.Connect(layer->GetInputSlot(0));
1316
1317 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1318}
1319
1320bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1321{
1322 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1323 if (!input.IsValid())
1324 {
1325 return Fail("%s: Could not read input 0", __func__);
1326 }
1327
1328 const Operand* output = GetOutputOperand(operation, 0, model);
1329 if (!output)
1330 {
1331 return Fail("%s: Could not read output 0", __func__);
1332 }
1333
1334 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1335 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1336
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001337 armnn::ResizeBilinearDescriptor desc;
1338 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001339
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001340 if (!IsLayerSupportedForAnyBackend(__func__,
1341 armnn::IsResizeBilinearSupported,
1342 data.m_Backends,
1343 inputInfo,
1344 outputInfo))
arovir01b0717b52018-09-05 17:03:25 +01001345 {
1346 return false;
1347 }
1348
arovir01b0717b52018-09-05 17:03:25 +01001349
1350 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
1351 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
1352 {
1353 return Fail("%s: Operation has invalid inputs", __func__);
1354 }
1355
1356 armnn::IConnectableLayer* layer = data.m_Network->AddResizeBilinearLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001357
arovir01b0717b52018-09-05 17:03:25 +01001358 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001359
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001360 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1361 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001362
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001363 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001364
1365}
1366
1367} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001368} // namespace armnn_driver