blob: d0d298c7807a5918a8937439e16a32a373864a26 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
arovir015602b192018-10-04 16:15:02 +01008#include "armnn/Optional.hpp"
9
arovir01b0717b52018-09-05 17:03:25 +010010namespace armnn_driver
11{
12namespace hal_1_0
13{
14
15bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
16{
17 switch (operation.type)
18 {
19 case V1_0::OperationType::ADD:
20 return ConvertAdd(operation, model, data);
21 case V1_0::OperationType::AVERAGE_POOL_2D:
22 return ConvertAveragePool2d(operation, model, data);
23 case V1_0::OperationType::CONCATENATION:
24 return ConvertConcatenation(operation, model, data);
25 case V1_0::OperationType::CONV_2D:
26 return ConvertConv2d(operation, model, data);
27 case V1_0::OperationType::DEPTHWISE_CONV_2D:
28 return ConvertDepthwiseConv2d(operation, model, data);
29 case V1_0::OperationType::FLOOR:
30 return ConvertFloor(operation, model, data);
31 case V1_0::OperationType::FULLY_CONNECTED:
32 return ConvertFullyConnected(operation, model, data);
33 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
34 return ConvertLocalResponseNormalization(operation, model, data);
35 case V1_0::OperationType::LOGISTIC:
36 return ConvertLogistic(operation, model, data);
37 case V1_0::OperationType::LSTM:
38 return ConvertLstm(operation, model, data);
39 case V1_0::OperationType::L2_NORMALIZATION:
40 return ConvertL2Normalization(operation, model, data);
41 case V1_0::OperationType::L2_POOL_2D:
42 return ConvertL2Pool2d(operation, model, data);
43 case V1_0::OperationType::MAX_POOL_2D:
44 return ConvertMaxPool2d(operation, model, data);
45 case V1_0::OperationType::MUL:
46 return ConvertMul(operation, model, data);
47 case V1_0::OperationType::RELU:
48 return ConvertReLu(operation, model, data);
49 case V1_0::OperationType::RELU1:
50 return ConvertReLu1(operation, model, data);
51 case V1_0::OperationType::RELU6:
52 return ConvertReLu6(operation, model, data);
53 case V1_0::OperationType::SOFTMAX:
54 return ConvertSoftmax(operation, model, data);
55 case V1_0::OperationType::TANH:
56 return ConvertTanH(operation, model, data);
57 case V1_0::OperationType::RESHAPE:
58 return ConvertReshape(operation, model, data);
59 case V1_0::OperationType::RESIZE_BILINEAR:
60 return ConvertResizeBilinear(operation, model, data);
61 default:
62 return Fail("%s: Operation type %s not supported in ArmnnDriver",
63 __func__, toString(operation.type).c_str());
64 }
65}
66
67bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
68{
69 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
70 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
71
72 if (!input0.IsValid() || !input1.IsValid())
73 {
74 return Fail("%s: Operation has invalid inputs", __func__);
75 }
76
77 // The FuseActivation parameter is always the input index 2
78 // and it should be optional
79 ActivationFn activationFunction;
80 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
81 {
82 return Fail("%s: Operation has invalid inputs", __func__);
83 }
84
85 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
86 if (!outputOperand)
87 {
88 return false;
89 }
90
91 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
92
93 if (!IsLayerSupported(__func__,
94 armnn::IsAdditionSupported,
95 data.m_Compute,
96 input0.GetTensorInfo(),
97 input1.GetTensorInfo(),
98 outInfo))
99 {
100 return false;
101 }
102
103 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
104 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
105
106 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
107 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
108
109 if (endLayer != nullptr)
110 {
111 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
112 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
113 }
114 else
115 {
116 return Fail("%s: ProcessActivation failed", __func__);
117 }
118}
119
120bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
121{
122 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
123}
124
125bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
126{
127 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
128 if (operation.inputs.size() <= 1)
129 {
130 return Fail("%s: Operation has insufficient arguments", __func__);
131 }
132
133 // Get inputs and outputs
134 const std::size_t numInputTensors = operation.inputs.size() - 1;
135
136 int32_t concatDim;
137 if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
138 {
139 return Fail("%s: Operation has invalid inputs", __func__);
140 }
141
142 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
143 if (!outputOperand)
144 {
145 return Fail("%s: Operation has no outputs", __func__);
146 }
147
148
149 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
150 armnn::TensorShape outputShape = outputInfo.GetShape();
151
152 //
153 // handle negative concat dims along the lines of tensorflow as described here:
154 // https://www.tensorflow.org/api_docs/python/tf/concat
155 // "negative axis refers to axis + rank(values)-th dimension"
156 //
157 if (concatDim < 0)
158 {
159 concatDim += outputShape.GetNumDimensions();
160 }
161
162 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
163 {
164 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
165 }
166
167 std::vector<LayerInputHandle> inputHandles;
168 std::vector<armnn::TensorShape> inputShapes;
169
170 inputHandles.reserve(numInputTensors);
171 inputShapes.reserve(numInputTensors);
172
173 bool inputsHaveBeenReshaped = false;
174 unsigned int tensorDimensionsAdded = 0;
175
176 for (uint32_t i = 0; i < numInputTensors; ++i)
177 {
178 const Operand* const operand = GetInputOperand(operation, i, model);
179 if (!operand)
180 {
181 return Fail("%s: Operation has invalid inputs", __func__);
182 }
183
184 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
185 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
186
187 if (operandShape.GetNumDimensions() == 0)
188 {
189 return Fail("%s: Operands with rank 0 are not supported", __func__);
190 }
191
192 if (RequiresReshape(operandShape))
193 {
194 inputsHaveBeenReshaped = true;
195
196 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
197
198 // Expand the tensor to three dimensions
199 if (operandShape.GetNumDimensions() == 2)
200 {
201 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
202 tensorDimensionsAdded = 1;
203 }
204 else
205 {
206 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
207 tensorDimensionsAdded = 2;
208 }
209
210 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
211 *data.m_Network,
212 operandInputHandle,
213 reshapeInfo
214 );
215
216 // Point to the reshape operation rather then the input operation
217 operandShape = reshapeInfo.GetShape();
218 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
219 }
220
221 inputShapes.emplace_back(operandShape);
222 inputHandles.emplace_back(operandInputHandle);
223
224 if (!inputHandles.back().IsValid())
225 {
226 return Fail("%s: Operation has invalid inputs", __func__);
227 }
228 }
229
230 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
231
232 if (inputsHaveBeenReshaped)
233 {
234 // Adjust the concatenation dimension by the amount of dimensions added (if any)
235 concatDim += tensorDimensionsAdded;
236
237 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
238 if (tensorDimensionsAdded == 1)
239 {
240 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
241 }
242 else if (tensorDimensionsAdded == 2)
243 {
244 outputShape = armnn::TensorShape({1, 1, outputShape[0], outputShape[1]});
245 }
246 }
247
248 // Get the pair of permutations required for the concatenation
249 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
250 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
251
252 CreatePermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
253
254 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
255 outputInfo.SetShape(outputShape);
256
257 // this is no-op for identity swizzles, otherwise it replaces both
258 // the handles and shapes with the swizzled layer output handles and shapes
259 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
260
261 // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
262 armnn::OriginsDescriptor mergerDescriptor;
263 try
264 {
265 // The merger descriptor is always created across the only supported concat
266 // dimension, which is 0 or 1
267 mergerDescriptor =
268 armnn::CreateMergerDescriptorForConcatenation(
269 inputShapes.begin(), inputShapes.end(), concatDim);
270 }
271 catch (const armnn::Exception& error)
272 {
273 return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
274 }
275
276 // Validate the output shape is correct given the input shapes based on the
277 // only valid concat dimension which is 0 or 1
278 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
279 {
280 return Fail("%s: Error validating the output shape for concat", __func__);
281 }
282
283 std::vector<const armnn::TensorInfo*> inputTensorInfos;
284 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
285 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
286 if (!IsLayerSupported(__func__,
287 armnn::IsMergerSupported,
288 data.m_Compute,
289 inputTensorInfos,
290 mergerDescriptor))
291 {
292 return false;
293 }
294
295 armnn::IConnectableLayer* layer = data.m_Network->AddMergerLayer(mergerDescriptor);
296 assert(layer != nullptr);
297 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
298
299 // Connect inputs to the layer
300 const int numInputSlots = layer->GetNumInputSlots();
301 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
302 for (int i = 0; i < numInputSlots; ++i)
303 {
304 // connect the input directly to the merge (concat) layer
305 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
306 }
307
308 // Add permutation layer and connect the output to it, the permutation becomes the output layer
309 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
310 layer->GetOutputSlot(0),
311 permutationPair.second);
312 layer = &deswizzleLayer;
313
314 if (inputsHaveBeenReshaped)
315 {
316 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
317
318 // Undo the reshape knowing the amount of dimensions added
319 if (tensorDimensionsAdded == 1)
320 {
321 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
322 afterConcatInfo.GetShape()[2] }));
323 }
324 else if (tensorDimensionsAdded == 2)
325 {
326 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2],
327 afterConcatInfo.GetShape()[3] }));
328 }
329
330 layer = &AddReshapeLayer(
331 *data.m_Network,
332 layer->GetOutputSlot(0),
333 afterConcatInfo
334 );
335 }
336
337 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
338}
339
340bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
341{
342 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
343 if (!input.IsValid())
344 {
345 return Fail("%s: Operation has invalid inputs", __func__);
346 }
347
348 const Operand* output = GetOutputOperand(operation, 0, model);
349 if (!output)
350 {
351 return Fail("%s: Could not read output 0", __func__);
352 }
353
354 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
355 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
356
357 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
358 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
359
360 // ArmNN does not currently support non-fixed weights or bias
361 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data, NHWCToArmNN);
362 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
363
364 if (!weightsPin.IsValid() || !biasPin.IsValid())
365 {
366 return Fail("%s: Operation has invalid inputs", __func__);
367 }
368
369 armnn::ConstTensor weights = weightsPin.GetConstTensor();
370 armnn::ConstTensor bias = biasPin.GetConstTensor();
371 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
372
373 armnn::Convolution2dDescriptor desc;
374 ActivationFn activation;
375
376 if (operation.inputs.size() == 10)
377 {
378 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
379 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
380 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
381 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
382 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
383 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
384 !GetInputActivationFunction(operation, 9, activation, model, data))
385 {
386 return Fail("%s: Operation has invalid inputs", __func__);
387 }
388 }
389 else if (operation.inputs.size() == 7)
390 {
391 android::nn::PaddingScheme paddingScheme;
392 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
393 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
394 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
395 !GetInputActivationFunction(operation, 6, activation, model, data))
396 {
397 return Fail("%s: Operation has invalid inputs", __func__);
398 }
399
400 const uint32_t kernelX = weights.GetShape()[3];
401 const uint32_t kernelY = weights.GetShape()[2];
402 const uint32_t inputX = swizzledInputInfo.GetShape()[3];
403 const uint32_t inputY = swizzledInputInfo.GetShape()[2];
404
405 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
406 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
407 }
408 else
409 {
410 return Fail("%s: Unsupported number of operation inputs", __func__);
411 }
412
413 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100414 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100415
416 if (!IsLayerSupported(__func__,
417 armnn::IsConvolution2dSupported,
418 data.m_Compute,
419 swizzledInputInfo,
420 swizzledOutputInfo,
421 desc,
422 weights.GetInfo(),
423 biases))
424 {
425 return false;
426 }
427
428 armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc, weights, bias);
429 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer, data);
430
431 if (endLayer != nullptr)
432 {
433 armnn::IConnectableLayer& outSwizzleLayer =
434 SwizzleInDeswizzleOut(*data.m_Network, input, *startLayer, *endLayer);
435 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer, model, data);
436 }
437 else
438 {
439 return Fail("%s: ProcessActivation failed", __func__);
440 }
441}
442
443bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
444{
445 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
446 if (!input.IsValid())
447 {
448 return Fail("%s: Operation has invalid inputs", __func__);
449 }
450
451 const Operand* output = GetOutputOperand(operation, 0, model);
452 if (!output)
453 {
454 return Fail("%s: Could not read output 0", __func__);
455 }
456
457 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
458 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
459
arovir01b0717b52018-09-05 17:03:25 +0100460 // ArmNN does not currently support non-fixed weights or bias
461
462 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
James Conroy6bf1cf02018-10-12 14:13:18 +0100463 // which is equal to [ M, H, W, I ]
arovir01b0717b52018-09-05 17:03:25 +0100464 const Operand* weightsOperand = GetInputOperand(operation, 1, model);
465
466 if (weightsOperand == nullptr)
467 {
468 return Fail("%s: Operand is invalid", __func__);
469 }
470
471 // Reinterpret weight data as [ H, W, I, M ]
472 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1], weightsOperand->dimensions[2],
473 inputInfo.GetShape()[3],
474 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
475
James Conroy6bf1cf02018-10-12 14:13:18 +0100476 // Swizzle weight data [ H, W, I, M ] -> [ M, H, W, I ]
477 const armnn::PermutationVector HWIMToMHWI = { 1U, 2U, 3U, 0U };
478
arovir01b0717b52018-09-05 17:03:25 +0100479 ConstTensorPin weightsPin =
James Conroy6bf1cf02018-10-12 14:13:18 +0100480 ConvertOperationInputToConstTensorPin(operation, 1, model, data, HWIMToMHWI, &weightsShape);
arovir01b0717b52018-09-05 17:03:25 +0100481
482 // Bias is a 1D tensor
483 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
484
485 if (!weightsPin.IsValid() || !biasPin.IsValid())
486 {
487 return Fail("%s: Operation has invalid inputs", __func__);
488 }
489
490 armnn::ConstTensor weights = weightsPin.GetConstTensor();
491 armnn::ConstTensor bias = biasPin.GetConstTensor();
James Conroy6bf1cf02018-10-12 14:13:18 +0100492 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
arovir01b0717b52018-09-05 17:03:25 +0100493
494 armnn::DepthwiseConvolution2dDescriptor desc;
James Conroy6bf1cf02018-10-12 14:13:18 +0100495 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100496 ActivationFn activation;
497
498 if (operation.inputs.size() == 11)
499 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100500 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
501 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
502 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
503 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
504 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
505 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100506 !GetInputActivationFunction(operation, 10, activation, model, data))
507 {
508 return Fail("%s: Operation has invalid inputs", __func__);
509 }
510 }
511 else if (operation.inputs.size() == 8)
512 {
513 android::nn::PaddingScheme paddingScheme;
James Conroy6bf1cf02018-10-12 14:13:18 +0100514 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
515 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
516 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
arovir01b0717b52018-09-05 17:03:25 +0100517 !GetInputActivationFunction(operation, 7, activation, model, data))
518 {
519 return Fail("%s: Operation has invalid inputs", __func__);
520 }
521
James Conroy6bf1cf02018-10-12 14:13:18 +0100522 const uint32_t kernelX = weights.GetShape()[2];
523 const uint32_t kernelY = weights.GetShape()[1];
524 const uint32_t inputX = inputInfo.GetShape()[2];
525 const uint32_t inputY = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100526
527 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
528 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
529 }
530 else
531 {
532 return Fail("%s: Unsupported number of operation inputs", __func__);
533 }
534
535 desc.m_BiasEnabled = true;
arovir015602b192018-10-04 16:15:02 +0100536 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100537
538 if (!IsLayerSupported(__func__,
539 armnn::IsDepthwiseConvolutionSupported,
540 data.m_Compute,
James Conroy6bf1cf02018-10-12 14:13:18 +0100541 inputInfo,
542 outputInfo,
arovir01b0717b52018-09-05 17:03:25 +0100543 desc,
544 weights.GetInfo(),
545 biases))
546 {
547 return false;
548 }
549
550 armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, bias);
James Conroy6bf1cf02018-10-12 14:13:18 +0100551 if (!startLayer)
arovir01b0717b52018-09-05 17:03:25 +0100552 {
James Conroy6bf1cf02018-10-12 14:13:18 +0100553 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100554 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100555
556 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
557 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100558 {
559 return Fail("%s: ProcessActivation failed", __func__);
560 }
James Conroy6bf1cf02018-10-12 14:13:18 +0100561
562 input.Connect(startLayer->GetInputSlot(0));
563
564 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100565}
566
567bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
568{
569 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
570 if (!input.IsValid())
571 {
572 return Fail("%s: Operation has invalid inputs", __func__);
573 }
574
575 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
576 if (!outputOperand)
577 {
578 return Fail("%s: Operation has invalid outputs", __func__);
579 }
580
581 if (!IsLayerSupported(__func__,
582 armnn::IsFloorSupported,
583 data.m_Compute,
584 input.GetTensorInfo(),
585 GetTensorInfoForOperand(*outputOperand)))
586 {
587 return false;
588 }
589
590 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
591 assert(layer != nullptr);
592 input.Connect(layer->GetInputSlot(0));
593
594 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
595}
596
597bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
598{
599 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
600 if (!input.IsValid())
601 {
602 return Fail("%s: Operation has invalid inputs", __func__);
603 }
604
605 const Operand* output = GetOutputOperand(operation, 0, model);
606 if (!output)
607 {
608 return Fail("%s: Could not read output 0", __func__);
609 }
610
611 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
612 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
613
614 // ArmNN does not currently support non-fixed weights or bias
615 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data); // 2D
616 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data); // 1D
617
618 if (!weightsPin.IsValid() || !biasPin.IsValid())
619 {
620 return Fail("%s: Operation has invalid inputs", __func__);
621 }
622
623 armnn::ConstTensor weights = weightsPin.GetConstTensor();
624 armnn::ConstTensor bias = biasPin.GetConstTensor();
625
626 armnn::TensorInfo reshapedInfo = inputInfo;
627 if (inputInfo.GetNumDimensions() > 2U)
628 {
629 unsigned int dim0 = inputInfo.GetShape()[0];
630 unsigned int dim1 = inputInfo.GetShape()[1];
631
632 for (unsigned int i = 2U; i < inputInfo.GetNumDimensions(); ++i)
633 {
634 dim1 *= inputInfo.GetShape()[i];
635 }
636
637 unsigned int divisor = weights.GetInfo().GetShape()[1] / dim1;
638 if(dim0 % divisor != 0)
639 {
640 return Fail("%s: Failed to deduce tensor shape", __func__);
641 }
642
643 reshapedInfo.SetShape(armnn::TensorShape({dim0 / divisor, dim1 * divisor}));
644 }
645
646 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
647 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
648
649 ActivationFn activationFunction;
650 if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
651 {
652 return Fail("%s: Operation has invalid inputs", __func__);
653 }
654
655 armnn::FullyConnectedDescriptor desc;
656 desc.m_TransposeWeightMatrix = true;
657 desc.m_BiasEnabled = true;
658
659 if (!IsLayerSupported(__func__,
660 armnn::IsFullyConnectedSupported,
661 data.m_Compute,
662 inputInfo,
663 outputInfo,
664 weights.GetInfo(),
665 bias.GetInfo(),
666 desc))
667 {
668 return false;
669 }
670
671 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc, weights, bias);
672 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
673
674 if (endLayer != nullptr)
675 {
676 if (inputInfo.GetNumDimensions() > 2U)
677 {
678 armnn::ReshapeDescriptor reshapeDescriptor;
679 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
680
681 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
682 assert(reshapeLayer != nullptr);
683 input.Connect(reshapeLayer->GetInputSlot(0));
684 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
685 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
686 }
687 else
688 {
689 input.Connect(startLayer->GetInputSlot(0));
690 }
691
692 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
693 }
694 else
695 {
696 return Fail("%s: ProcessActivation failed", __func__);
697 }
698}
699
700bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
701 const Model& model,
702 ConversionData& data)
703{
704 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
705 if (!input.IsValid())
706 {
707 return Fail("%s: Operation has invalid inputs", __func__);
708 }
709
710 const Operand* output = GetOutputOperand(operation, 0, model);
711 if (!output)
712 {
713 return Fail("%s: Could not read output 0", __func__);
714 }
715
narpra012fb804a2018-10-22 14:52:32 +0100716 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100717 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
718
arovir01b0717b52018-09-05 17:03:25 +0100719 armnn::NormalizationDescriptor descriptor;
720
narpra012fb804a2018-10-22 14:52:32 +0100721 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100722 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100723 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100724
725 if (!input.IsValid() ||
726 !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
727 !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
728 !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
729 !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
730 {
731 return Fail("%s: Operation has invalid inputs", __func__);
732 }
733
734 // ArmNN expects normSize to be the full size of the normalization
735 // window rather than the radius as in AndroidNN.
736 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
737
738 if (!IsLayerSupported(__func__,
739 armnn::IsNormalizationSupported,
740 data.m_Compute,
narpra012fb804a2018-10-22 14:52:32 +0100741 inputInfo,
742 outputInfo,
arovir01b0717b52018-09-05 17:03:25 +0100743 descriptor))
744 {
745 return false;
746 }
747
748
749 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
750 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100751 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100752
narpra012fb804a2018-10-22 14:52:32 +0100753 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100754}
755
756bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
757{
758 armnn::ActivationDescriptor desc;
759 desc.m_Function = armnn::ActivationFunction::Sigmoid;
760
761 return ConvertToActivation(operation, __func__, desc, model, data);
762}
763
764bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
765{
766 // Inputs:
767 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
768 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
769 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
770 if (!input.IsValid())
771 {
772 return Fail("%s: Could not read input 0: input", __func__);
773 }
774 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
775 LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
776 if (!outputStateIn.IsValid())
777 {
778 return Fail("%s: Could not read input 18: outputStateIn", __func__);
779 }
780 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
781 LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
782 if (!cellStateIn.IsValid())
783 {
784 return Fail("%s: Could not read input 19: cellStateIn", __func__);
785 }
786
787 // Get the mandatory input tensors:
788 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
789 // [num_units, input_size].
790 const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2, model, data);
791 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
792 const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3, model, data);
793 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
794 // [num_units, input_size].
795 const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4, model, data);
796 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
797 // [num_units, output_size].
798 const ConstTensorPin recurrentToForgetWeightsPin =
799 ConvertOperationInputToConstTensorPin(operation, 6, model, data);
800 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
801 // [num_units, output_size].
802 const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7, model, data);
803 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
804 // [num_units, output_size].
805 const ConstTensorPin recurrentToOutputWeightsPin =
806 ConvertOperationInputToConstTensorPin(operation, 8, model, data);
807 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
808 const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13, model, data);
809 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
810 const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14, model, data);
811 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
812 const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15, model, data);
813
814 if (!inputToForgetWeightsPin.IsValid() ||
815 !inputToCellWeightsPin.IsValid() ||
816 !inputToOutputWeightsPin.IsValid() ||
817 !recurrentToForgetWeightsPin.IsValid() ||
818 !recurrentToCellWeightsPin.IsValid() ||
819 !recurrentToOutputWeightsPin.IsValid() ||
820 !forgetGateBiasPin.IsValid() ||
821 !cellBiasPin.IsValid() ||
822 !outputGateBiasPin.IsValid())
823 {
824 return Fail("%s: Operation has invalid tensor inputs", __func__);
825 }
826
827 // Get the optional input tensors:
828 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
829 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
830 const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1, model, data);
831 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
832 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
833 // “num_units”), or the second dimension of the “projection_weights”, if defined.
834 const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5, model, data);
835 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
836 const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9, model, data);
837 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
838 const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10, model, data);
839 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
840 const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11, model, data);
841 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
842 const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12, model, data);
843 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
844 // [output_size, num_units].
845 const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16, model, data);
846 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
847 const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17, model, data);
848
849 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
850 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
851 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
852 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
853 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
854 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
855 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
856 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
857 {
858 return Fail("%s: Operation has invalid tensor inputs", __func__);
859 }
860
861 // Get the mandatory input scalars (actually 1-D tensors of size 1):
862 // 20: The activation function: A value indicating the activation function:
863 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
864 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
865 // If set to 0.0 then clipping is disabled.
866 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
867 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
868 ActivationFn activation;
869 float cellClip;
870 float projClip;
871 if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
872 !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
873 !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
874 {
875 return Fail("%s: Operation has invalid scalar inputs", __func__);
876 }
877
878 // Outputs:
879 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
880 // CIFG, or [batch_size, num_units * 3] without CIFG.
881 const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
882 if (!scratchBuffer)
883 {
884 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
885 }
886 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
887 const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
888 if (!outputStateOut)
889 {
890 return Fail("%s: Could not read output 1: outputStateOut", __func__);
891 }
892 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
893 const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
894 if (!cellStateOut)
895 {
896 return Fail("%s: Could not read output 2: cellStateOut", __func__);
897 }
898 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
899 // effectively the same as the current “output state (out)” value.
900 const Operand* output = GetOutputOperand(operation, 3, model);
901 if (!output)
902 {
903 return Fail("%s: Could not read output 3: output", __func__);
904 }
905
906 // set the params structure for the AddLstmLayer call
907 armnn::LstmInputParams params;
908 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
909 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
910 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
911 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
912 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
913 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
914 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
915 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
916 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
917 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
918 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
919 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
920 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
921 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
922 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
923 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
924 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
925
926 // set the layer descriptor
927 armnn::LstmDescriptor desc;
928 desc.m_ActivationFunc = activation;
929 desc.m_ClippingThresCell = cellClip;
930 desc.m_ClippingThresProj = projClip;
931 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
932 params.m_RecurrentToInputWeights == nullptr ||
933 params.m_InputGateBias == nullptr);
934 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
935 params.m_CellToOutputWeights != nullptr);
936 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
937
938 // validate the optional input groups
939 if (desc.m_CifgEnabled &&
940 (params.m_InputToInputWeights != nullptr ||
941 params.m_RecurrentToInputWeights != nullptr ||
942 params.m_InputGateBias != nullptr))
943 {
944 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
945 " and input gate bias must be provided", __func__);
946 }
947
948 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
949 {
950 return Fail("%s: projection bias should not be provided without projection weights", __func__);
951 }
952
953 if (desc.m_PeepholeEnabled &&
954 (params.m_CellToForgetWeights == nullptr ||
955 params.m_CellToOutputWeights == nullptr ||
956 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
957 {
958 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
959 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
960 }
961
962 // Check if the layer is supported
963 // Inputs
964 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
965 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
966 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
967
968 // Outputs
969 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
970 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
971 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
972 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
973
974 // Basic parameters
975 const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
976 const armnn::TensorInfo& inputToCellWeights = params.m_InputToCellWeights->GetInfo();
977 const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
978 const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
979 const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
980 const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
981 const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
982 const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
983 const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
984
985 //Optional parameters
986 const armnn::TensorInfo* inputToInputWeights = nullptr;
987 const armnn::TensorInfo* recurrentToInputWeights = nullptr;
988 const armnn::TensorInfo* cellToInputWeights = nullptr;
989 const armnn::TensorInfo* inputGateBias = nullptr;
990 const armnn::TensorInfo* projectionWeights = nullptr;
991 const armnn::TensorInfo* projectionBias = nullptr;
992 const armnn::TensorInfo* cellToForgetWeights = nullptr;
993 const armnn::TensorInfo* cellToOutputWeights = nullptr;
994
995 if(!desc.m_CifgEnabled)
996 {
997 inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
998 recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
999 if (params.m_CellToInputWeights != nullptr)
1000 {
1001 cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1002 }
1003 inputGateBias = &(params.m_InputGateBias->GetInfo());
1004 }
1005
1006 if(desc.m_ProjectionEnabled)
1007 {
1008 projectionWeights = &(params.m_ProjectionWeights->GetInfo());
1009 if (params.m_ProjectionBias != nullptr)
1010 {
1011 projectionBias = &(params.m_ProjectionBias->GetInfo());
1012 }
1013 }
1014
1015 if(desc.m_PeepholeEnabled)
1016 {
1017 cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1018 cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1019 }
1020
1021 if (!IsLayerSupported(__func__,
1022 armnn::IsLstmSupported,
1023 data.m_Compute,
1024 inputInfo,
1025 outputStateInInfo,
1026 cellStateInInfo,
1027 scratchBufferInfo,
1028 outputStateOutInfo,
1029 cellStateOutInfo,
1030 outputInfo,
1031 desc,
1032 inputToForgetWeights,
1033 inputToCellWeights,
1034 inputToOutputWeights,
1035 recurrentToForgetWeights,
1036 recurrentToCellWeights,
1037 recurrentToOutputWeights,
1038 forgetGateBias,
1039 cellBias,
1040 outputGateBias,
1041 inputToInputWeights,
1042 recurrentToInputWeights,
1043 cellToInputWeights,
1044 inputGateBias,
1045 projectionWeights,
1046 projectionBias,
1047 cellToForgetWeights,
1048 cellToOutputWeights))
1049 {
1050 return false;
1051 }
1052
1053 // Add the layer
1054 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1055
1056 input.Connect(layer->GetInputSlot(0));
1057 outputStateIn.Connect(layer->GetInputSlot(1));
1058 cellStateIn.Connect(layer->GetInputSlot(2));
1059
1060 return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
1061 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
1062 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
1063 SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
1064}
1065
1066bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
1067{
1068 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1069 if (!input.IsValid())
1070 {
1071 return Fail("%s: Operation has invalid inputs", __func__);
1072 }
1073
1074 const Operand* output = GetOutputOperand(operation, 0, model);
1075 if (!output)
1076 {
1077 return Fail("%s: Could not read output 0", __func__);
1078 }
1079
1080 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1081 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1082
Matteo Martincigh58f71092018-09-25 15:58:52 +01001083 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001084 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001085
arovir01b0717b52018-09-05 17:03:25 +01001086 if (!IsLayerSupported(__func__,
1087 armnn::IsL2NormalizationSupported,
1088 data.m_Compute,
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001089 inputInfo,
1090 outputInfo,
Matteo Martincigh58f71092018-09-25 15:58:52 +01001091 desc))
arovir01b0717b52018-09-05 17:03:25 +01001092 {
1093 return false;
1094 }
1095
Matteo Martincigh58f71092018-09-25 15:58:52 +01001096 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001097 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001098 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001099
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001100 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001101}
1102
1103bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1104{
1105 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
1106}
1107
1108bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1109{
1110 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
1111}
1112
1113bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1114{
1115 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1116 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1117
1118 if (!input0.IsValid() || !input1.IsValid())
1119 {
1120 return Fail("%s: Operation has invalid inputs", __func__);
1121 }
1122
1123 // The FuseActivation parameter is always the input index 2
1124 // and it should be optional
1125 ActivationFn activationFunction;
1126 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1127 {
1128 return Fail("%s: Operation has invalid inputs", __func__);
1129 }
1130
1131 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1132
1133 if (outputOperand == nullptr)
1134 {
1135 return false;
1136 }
1137
1138 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1139
1140 if (!IsLayerSupported(__func__,
1141 armnn::IsMultiplicationSupported,
1142 data.m_Compute,
1143 input0.GetTensorInfo(),
1144 input1.GetTensorInfo(),
1145 outInfo))
1146 {
1147 return false;
1148 }
1149
1150 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1151 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1152
1153 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1154 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1155
1156 if (endLayer != nullptr)
1157 {
1158 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
1159 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
1160 }
1161 else
1162 {
1163 return Fail("%s: ProcessActivation failed", __func__);
1164 }
1165}
1166
1167bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1168{
1169 armnn::ActivationDescriptor desc;
1170 desc.m_Function = armnn::ActivationFunction::ReLu;
1171
1172 return ConvertToActivation(operation, __func__, desc, model, data);
1173}
1174
1175bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1176{
1177 armnn::ActivationDescriptor desc;
1178 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1179 desc.m_A = 1.0f;
1180 desc.m_B = -1.0f;
1181
1182 return ConvertToActivation(operation, __func__, desc, model, data);
1183}
1184
1185bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1186{
1187 armnn::ActivationDescriptor desc;
1188 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1189 desc.m_A = 6.0f;
1190
1191 return ConvertToActivation(operation, __func__, desc, model, data);
1192}
1193
1194bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1195{
1196 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1197 if (!input.IsValid())
1198 {
1199 return Fail("%s: Operation has invalid inputs", __func__);
1200 }
1201
1202 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1203 if (!outputOperand)
1204 {
1205 return Fail("%s: Operation has no outputs", __func__);
1206 }
1207
1208 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1209
1210 armnn::SoftmaxDescriptor desc;
1211 if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
1212 {
1213 return Fail("%s: Operation has invalid inputs", __func__);
1214 }
1215
1216 if (!IsLayerSupported(__func__,
1217 armnn::IsSoftmaxSupported,
1218 data.m_Compute,
1219 input.GetTensorInfo(),
1220 outInfo,
1221 desc))
1222 {
1223 return false;
1224 }
1225
1226 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1227 assert(layer != nullptr);
1228 input.Connect(layer->GetInputSlot(0));
1229
1230 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1231}
1232
1233bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1234{
1235 armnn::ActivationDescriptor desc;
1236 desc.m_Function = armnn::ActivationFunction::TanH;
1237 desc.m_A = 1.0f; // android nn does not support tanH parameters
1238 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1239
1240 return ConvertToActivation(operation, __func__, desc, model, data);
1241}
1242
1243bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1244{
1245 const Operand* inputOperand = GetInputOperand(operation, 0, model);
1246 const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
1247 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
1248
1249 if (inputOperand == nullptr
1250 || requestedShapeOperand == nullptr
1251 || outputOperand == nullptr)
1252 {
1253 return Fail("%s: Operation has invalid inputs", __func__);
1254 }
1255
1256
1257 if (requestedShapeOperand->dimensions.size() != 1)
1258 {
1259 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1260 __func__, requestedShapeOperand->dimensions.size());
1261 }
1262
1263 std::vector<int32_t> targetDimensions;
1264 if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
1265 {
1266 return Fail("%s: Could not read values of input 1", __func__);
1267 }
1268
1269 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1270
1271 Shape requestedShape;
1272 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1273 // function that resolves these values into a fully specified tensor shape.
1274 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1275 {
1276 return Fail("%s: Failed to resolve the requested shape", __func__);
1277 }
1278
1279 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1280 if (!SameShape(requestedShape, outputOperandShape))
1281 {
1282 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1283 }
1284
1285 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1286 if (!input.IsValid())
1287 {
1288 return Fail("%s: Could not read input 0", __func__);
1289 }
1290
1291 if (!IsLayerSupported(__func__,
1292 armnn::IsReshapeSupported,
1293 data.m_Compute,
1294 input.GetTensorInfo()))
1295 {
1296 return false;
1297 }
1298
1299
1300 armnn::ReshapeDescriptor reshapeDescriptor;
1301 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1302 requestedShape.dimensions.data());
1303
1304 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1305 assert(layer != nullptr);
1306 input.Connect(layer->GetInputSlot(0));
1307
1308 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1309}
1310
1311bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1312{
1313 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1314 if (!input.IsValid())
1315 {
1316 return Fail("%s: Could not read input 0", __func__);
1317 }
1318
1319 const Operand* output = GetOutputOperand(operation, 0, model);
1320 if (!output)
1321 {
1322 return Fail("%s: Could not read output 0", __func__);
1323 }
1324
1325 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1326 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1327
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001328 armnn::ResizeBilinearDescriptor desc;
1329 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001330
1331 if (!IsLayerSupported(__func__,
1332 armnn::IsResizeBilinearSupported,
1333 data.m_Compute,
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001334 inputInfo))
arovir01b0717b52018-09-05 17:03:25 +01001335 {
1336 return false;
1337 }
1338
arovir01b0717b52018-09-05 17:03:25 +01001339
1340 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
1341 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
1342 {
1343 return Fail("%s: Operation has invalid inputs", __func__);
1344 }
1345
1346 armnn::IConnectableLayer* layer = data.m_Network->AddResizeBilinearLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001347
arovir01b0717b52018-09-05 17:03:25 +01001348 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001349
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001350 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1351 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001352
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001353 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001354
1355}
1356
1357} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001358} // namespace armnn_driver