blob: 8acb0d4b0495d171ec489e31aabbb3e3fac88025 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
arovir015602b192018-10-04 16:15:02 +010011
arovir01b0717b52018-09-05 17:03:25 +010012namespace armnn_driver
13{
14namespace hal_1_0
15{
16
17bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
18{
19 switch (operation.type)
20 {
21 case V1_0::OperationType::ADD:
22 return ConvertAdd(operation, model, data);
23 case V1_0::OperationType::AVERAGE_POOL_2D:
24 return ConvertAveragePool2d(operation, model, data);
25 case V1_0::OperationType::CONCATENATION:
26 return ConvertConcatenation(operation, model, data);
27 case V1_0::OperationType::CONV_2D:
Mike Kellyb5fdf382019-06-11 16:35:25 +010028 return ValidateConv2dParameters(operation)
29 && ConvertConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010030 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Mike Kellyb5fdf382019-06-11 16:35:25 +010031 return ValidateDepthwiseConv2dParameters(operation)
32 && ConvertDepthwiseConv2d<Operand, OperandType, Operation, Model>(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010033 case V1_0::OperationType::DEQUANTIZE:
34 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010035 case V1_0::OperationType::FLOOR:
36 return ConvertFloor(operation, model, data);
37 case V1_0::OperationType::FULLY_CONNECTED:
38 return ConvertFullyConnected(operation, model, data);
39 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
40 return ConvertLocalResponseNormalization(operation, model, data);
41 case V1_0::OperationType::LOGISTIC:
42 return ConvertLogistic(operation, model, data);
43 case V1_0::OperationType::LSTM:
44 return ConvertLstm(operation, model, data);
45 case V1_0::OperationType::L2_NORMALIZATION:
46 return ConvertL2Normalization(operation, model, data);
47 case V1_0::OperationType::L2_POOL_2D:
48 return ConvertL2Pool2d(operation, model, data);
49 case V1_0::OperationType::MAX_POOL_2D:
50 return ConvertMaxPool2d(operation, model, data);
51 case V1_0::OperationType::MUL:
52 return ConvertMul(operation, model, data);
53 case V1_0::OperationType::RELU:
54 return ConvertReLu(operation, model, data);
55 case V1_0::OperationType::RELU1:
56 return ConvertReLu1(operation, model, data);
57 case V1_0::OperationType::RELU6:
58 return ConvertReLu6(operation, model, data);
59 case V1_0::OperationType::SOFTMAX:
60 return ConvertSoftmax(operation, model, data);
61 case V1_0::OperationType::TANH:
62 return ConvertTanH(operation, model, data);
63 case V1_0::OperationType::RESHAPE:
64 return ConvertReshape(operation, model, data);
65 case V1_0::OperationType::RESIZE_BILINEAR:
66 return ConvertResizeBilinear(operation, model, data);
67 default:
68 return Fail("%s: Operation type %s not supported in ArmnnDriver",
69 __func__, toString(operation.type).c_str());
70 }
71}
72
Mike Kellyb5fdf382019-06-11 16:35:25 +010073bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
74{
75 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
76 {
77 return Fail("%s: Unsupported number of operation inputs", __func__);
78 }
79 return true;
80}
81
82bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
83{
84 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
85 {
86 return Fail("%s: Unsupported number of operation inputs", __func__);
87 }
88 return true;
89}
90
arovir01b0717b52018-09-05 17:03:25 +010091bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
92{
Mike Kellyb5fdf382019-06-11 16:35:25 +010093 LayerInputHandle input0 = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
94 LayerInputHandle input1 = ConvertToLayerInputHandle<Operand>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010095
96 if (!input0.IsValid() || !input1.IsValid())
97 {
98 return Fail("%s: Operation has invalid inputs", __func__);
99 }
100
101 // The FuseActivation parameter is always the input index 2
102 // and it should be optional
103 ActivationFn activationFunction;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100104 if (!GetOptionalInputActivation<Operand, OperandType>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100105 {
106 return Fail("%s: Operation has invalid inputs", __func__);
107 }
108
Mike Kellyb5fdf382019-06-11 16:35:25 +0100109 const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100110 if (!outputOperand)
111 {
112 return false;
113 }
114
115 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
116
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100117 if (!IsLayerSupportedForAnyBackend(__func__,
118 armnn::IsAdditionSupported,
119 data.m_Backends,
120 input0.GetTensorInfo(),
121 input1.GetTensorInfo(),
122 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100123 {
124 return false;
125 }
126
127 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
128 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
129
130 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
131 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
132
133 if (endLayer != nullptr)
134 {
135 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100136 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100137 }
138 else
139 {
140 return Fail("%s: ProcessActivation failed", __func__);
141 }
142}
143
144bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
145{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100146 return ConvertPooling2d<Operand, OperandType>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100147}
148
149bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
150{
151 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
152 if (operation.inputs.size() <= 1)
153 {
154 return Fail("%s: Operation has insufficient arguments", __func__);
155 }
156
157 // Get inputs and outputs
158 const std::size_t numInputTensors = operation.inputs.size() - 1;
159
160 int32_t concatDim;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100161 if (!GetInputScalar<Operand, OperandType>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100162 {
163 return Fail("%s: Operation has invalid inputs", __func__);
164 }
165
Mike Kellyb5fdf382019-06-11 16:35:25 +0100166 const Operand* const outputOperand = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100167 if (!outputOperand)
168 {
169 return Fail("%s: Operation has no outputs", __func__);
170 }
171
172
173 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
174 armnn::TensorShape outputShape = outputInfo.GetShape();
175
176 //
177 // handle negative concat dims along the lines of tensorflow as described here:
178 // https://www.tensorflow.org/api_docs/python/tf/concat
179 // "negative axis refers to axis + rank(values)-th dimension"
180 //
181 if (concatDim < 0)
182 {
183 concatDim += outputShape.GetNumDimensions();
184 }
185
186 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
187 {
188 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
189 }
190
191 std::vector<LayerInputHandle> inputHandles;
192 std::vector<armnn::TensorShape> inputShapes;
193
194 inputHandles.reserve(numInputTensors);
195 inputShapes.reserve(numInputTensors);
196
197 bool inputsHaveBeenReshaped = false;
198 unsigned int tensorDimensionsAdded = 0;
199
200 for (uint32_t i = 0; i < numInputTensors; ++i)
201 {
Mike Kellyb5fdf382019-06-11 16:35:25 +0100202 const Operand* const operand = GetInputOperand<Operand>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100203 if (!operand)
204 {
205 return Fail("%s: Operation has invalid inputs", __func__);
206 }
207
208 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100209 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle<Operand>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100210
211 if (operandShape.GetNumDimensions() == 0)
212 {
213 return Fail("%s: Operands with rank 0 are not supported", __func__);
214 }
215
216 if (RequiresReshape(operandShape))
217 {
218 inputsHaveBeenReshaped = true;
219
220 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
221
222 // Expand the tensor to three dimensions
223 if (operandShape.GetNumDimensions() == 2)
224 {
225 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
226 tensorDimensionsAdded = 1;
227 }
228 else
229 {
230 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
231 tensorDimensionsAdded = 2;
232 }
233
234 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
235 *data.m_Network,
236 operandInputHandle,
237 reshapeInfo
238 );
239
240 // Point to the reshape operation rather then the input operation
241 operandShape = reshapeInfo.GetShape();
242 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
243 }
244
245 inputShapes.emplace_back(operandShape);
246 inputHandles.emplace_back(operandInputHandle);
247
248 if (!inputHandles.back().IsValid())
249 {
250 return Fail("%s: Operation has invalid inputs", __func__);
251 }
252 }
253
254 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
255
256 if (inputsHaveBeenReshaped)
257 {
258 // Adjust the concatenation dimension by the amount of dimensions added (if any)
259 concatDim += tensorDimensionsAdded;
260
261 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
262 if (tensorDimensionsAdded == 1)
263 {
264 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
265 }
266 else if (tensorDimensionsAdded == 2)
267 {
narpra01f176d5a2018-11-18 20:17:48 +0000268 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100269 }
270 }
271
narpra01f176d5a2018-11-18 20:17:48 +0000272 // Check if permutations is required and get the pair of permutations required for the concatenation.
273 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100274 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
275 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
276
narpra01f176d5a2018-11-18 20:17:48 +0000277 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100278
narpra01f176d5a2018-11-18 20:17:48 +0000279 if (needPermute)
280 {
281 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
282 }
283
arovir01b0717b52018-09-05 17:03:25 +0100284 outputInfo.SetShape(outputShape);
285
286 // this is no-op for identity swizzles, otherwise it replaces both
287 // the handles and shapes with the swizzled layer output handles and shapes
288 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
289
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100290 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
291 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000292
arovir01b0717b52018-09-05 17:03:25 +0100293 try
294 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100295 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000296 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100297 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100298 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100299 }
300 catch (const armnn::Exception& error)
301 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100302 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100303 }
304
305 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000306 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100307 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
308 {
309 return Fail("%s: Error validating the output shape for concat", __func__);
310 }
311
312 std::vector<const armnn::TensorInfo*> inputTensorInfos;
313 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
314 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100315 if (!IsLayerSupportedForAnyBackend(__func__,
Jim Flynn073d7a32019-05-13 13:52:56 +0100316 armnn::IsConcatSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100317 data.m_Backends,
318 inputTensorInfos,
319 outputInfo,
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100320 concatDescriptor))
arovir01b0717b52018-09-05 17:03:25 +0100321 {
322 return false;
323 }
324
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100325 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100326 assert(layer != nullptr);
327 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
328
329 // Connect inputs to the layer
330 const int numInputSlots = layer->GetNumInputSlots();
331 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
332 for (int i = 0; i < numInputSlots; ++i)
333 {
334 // connect the input directly to the merge (concat) layer
335 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
336 }
337
narpra01f176d5a2018-11-18 20:17:48 +0000338 if (needPermute)
339 {
340 // Add permutation layer and connect the output to it, the permutation becomes the output layer
341 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
342 layer->GetOutputSlot(0),
343 permutationPair.second);
344 layer = &deswizzleLayer;
345 }
arovir01b0717b52018-09-05 17:03:25 +0100346
347 if (inputsHaveBeenReshaped)
348 {
349 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
350
351 // Undo the reshape knowing the amount of dimensions added
352 if (tensorDimensionsAdded == 1)
353 {
354 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
355 afterConcatInfo.GetShape()[2] }));
356 }
357 else if (tensorDimensionsAdded == 2)
358 {
narpra01f176d5a2018-11-18 20:17:48 +0000359 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100360 }
361
362 layer = &AddReshapeLayer(
363 *data.m_Network,
364 layer->GetOutputSlot(0),
365 afterConcatInfo
366 );
367 }
368
Mike Kellyb5fdf382019-06-11 16:35:25 +0100369 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100370}
371
David Monahanacf479a2019-05-29 14:27:04 +0100372bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
373{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100374 LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100375
376 if (!input.IsValid())
377 {
378 return Fail("%s: Operation has invalid input", __func__);
379 }
380
Mike Kellyb5fdf382019-06-11 16:35:25 +0100381 const Operand* const outputOperand = GetOutputOperand<Operand>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100382 if (!outputOperand)
383 {
384 return Fail("%s: Operation has invalid outputs", __func__);
385 }
386
387 if (!IsLayerSupportedForAnyBackend(__func__,
388 armnn::IsDequantizeSupported,
389 data.m_Backends,
390 input.GetTensorInfo(),
391 GetTensorInfoForOperand(*outputOperand)))
392 {
393 return false;
394 }
395
396 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
397 assert(layer != nullptr);
398 input.Connect(layer->GetInputSlot(0));
399
Mike Kellyb5fdf382019-06-11 16:35:25 +0100400 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100401}
402
arovir01b0717b52018-09-05 17:03:25 +0100403bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
404{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100405 LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100406 if (!input.IsValid())
407 {
408 return Fail("%s: Operation has invalid inputs", __func__);
409 }
410
Mike Kellyb5fdf382019-06-11 16:35:25 +0100411 const Operand* const outputOperand = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100412 if (!outputOperand)
413 {
414 return Fail("%s: Operation has invalid outputs", __func__);
415 }
416
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100417 if (!IsLayerSupportedForAnyBackend(__func__,
418 armnn::IsFloorSupported,
419 data.m_Backends,
420 input.GetTensorInfo(),
421 GetTensorInfoForOperand(*outputOperand)))
arovir01b0717b52018-09-05 17:03:25 +0100422 {
423 return false;
424 }
425
426 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
427 assert(layer != nullptr);
428 input.Connect(layer->GetInputSlot(0));
429
Mike Kellyb5fdf382019-06-11 16:35:25 +0100430 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100431}
432
433bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
434{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100435 LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100436 if (!input.IsValid())
437 {
438 return Fail("%s: Operation has invalid inputs", __func__);
439 }
440
Mike Kellyb5fdf382019-06-11 16:35:25 +0100441 const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100442 if (!output)
443 {
444 return Fail("%s: Could not read output 0", __func__);
445 }
446
447 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
448 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
449
450 // ArmNN does not currently support non-fixed weights or bias
Mike Kellyb5fdf382019-06-11 16:35:25 +0100451 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 1, model, data); // 2D
452 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100453
454 if (!weightsPin.IsValid() || !biasPin.IsValid())
455 {
456 return Fail("%s: Operation has invalid inputs", __func__);
457 }
458
459 armnn::ConstTensor weights = weightsPin.GetConstTensor();
460 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100461 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100462
463 try
arovir01b0717b52018-09-05 17:03:25 +0100464 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100465 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
466 } catch (const std::exception &e) {
467 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100468 }
469
470 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
471 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
472
473 ActivationFn activationFunction;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100474 if (!GetInputActivationFunction<Operand, OperandType>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100475 {
476 return Fail("%s: Operation has invalid inputs", __func__);
477 }
478
479 armnn::FullyConnectedDescriptor desc;
480 desc.m_TransposeWeightMatrix = true;
481 desc.m_BiasEnabled = true;
482
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100483 if (!IsLayerSupportedForAnyBackend(__func__,
484 armnn::IsFullyConnectedSupported,
485 data.m_Backends,
486 reshapedInfo,
487 outputInfo,
488 weights.GetInfo(),
489 bias.GetInfo(),
490 desc))
arovir01b0717b52018-09-05 17:03:25 +0100491 {
492 return false;
493 }
494
Matteo Martincighba01f372019-05-14 13:28:21 +0100495 armnn::IConnectableLayer* startLayer =
496 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100497 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
498
499 if (endLayer != nullptr)
500 {
501 if (inputInfo.GetNumDimensions() > 2U)
502 {
503 armnn::ReshapeDescriptor reshapeDescriptor;
504 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
505
506 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
507 assert(reshapeLayer != nullptr);
508 input.Connect(reshapeLayer->GetInputSlot(0));
509 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
510 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
511 }
512 else
513 {
514 input.Connect(startLayer->GetInputSlot(0));
515 }
516
Mike Kellyb5fdf382019-06-11 16:35:25 +0100517 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100518 }
519 else
520 {
521 return Fail("%s: ProcessActivation failed", __func__);
522 }
523}
524
525bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
526 const Model& model,
527 ConversionData& data)
528{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100529 LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100530 if (!input.IsValid())
531 {
532 return Fail("%s: Operation has invalid inputs", __func__);
533 }
534
Mike Kellyb5fdf382019-06-11 16:35:25 +0100535 const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100536 if (!output)
537 {
538 return Fail("%s: Could not read output 0", __func__);
539 }
540
narpra012fb804a2018-10-22 14:52:32 +0100541 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100542 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
543
arovir01b0717b52018-09-05 17:03:25 +0100544 armnn::NormalizationDescriptor descriptor;
545
narpra012fb804a2018-10-22 14:52:32 +0100546 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100547 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100548 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100549
550 if (!input.IsValid() ||
Mike Kellyb5fdf382019-06-11 16:35:25 +0100551 !GetInputScalar<Operand, OperandType>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
552 !GetInputFloat32<Operand, OperandType>(operation, 2, descriptor.m_K, model, data) ||
553 !GetInputFloat32<Operand, OperandType>(operation, 3, descriptor.m_Alpha, model, data) ||
554 !GetInputFloat32<Operand, OperandType>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100555 {
556 return Fail("%s: Operation has invalid inputs", __func__);
557 }
558
559 // ArmNN expects normSize to be the full size of the normalization
560 // window rather than the radius as in AndroidNN.
561 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
562
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100563 if (!IsLayerSupportedForAnyBackend(__func__,
564 armnn::IsNormalizationSupported,
565 data.m_Backends,
566 inputInfo,
567 outputInfo,
568 descriptor))
arovir01b0717b52018-09-05 17:03:25 +0100569 {
570 return false;
571 }
572
573
574 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
575 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100576 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100577
Mike Kellyb5fdf382019-06-11 16:35:25 +0100578 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100579}
580
581bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
582{
583 armnn::ActivationDescriptor desc;
584 desc.m_Function = armnn::ActivationFunction::Sigmoid;
585
Mike Kellyb5fdf382019-06-11 16:35:25 +0100586 return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100587}
588
589bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
590{
591 // Inputs:
592 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
593 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Mike Kellyb5fdf382019-06-11 16:35:25 +0100594 LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100595 if (!input.IsValid())
596 {
597 return Fail("%s: Could not read input 0: input", __func__);
598 }
599 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100600 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<Operand>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100601 if (!outputStateIn.IsValid())
602 {
603 return Fail("%s: Could not read input 18: outputStateIn", __func__);
604 }
605 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100606 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<Operand>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100607 if (!cellStateIn.IsValid())
608 {
609 return Fail("%s: Could not read input 19: cellStateIn", __func__);
610 }
611
612 // Get the mandatory input tensors:
613 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
614 // [num_units, input_size].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100615 const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 2, model,
616 data);
arovir01b0717b52018-09-05 17:03:25 +0100617 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100618 const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 3, model,
619 data);
arovir01b0717b52018-09-05 17:03:25 +0100620 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
621 // [num_units, input_size].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100622 const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 4, model,
623 data);
arovir01b0717b52018-09-05 17:03:25 +0100624 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
625 // [num_units, output_size].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100626 const ConstTensorPin recurrentToForgetWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 6,
627 model, data);
arovir01b0717b52018-09-05 17:03:25 +0100628 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
629 // [num_units, output_size].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100630 const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 7, model,
631 data);
arovir01b0717b52018-09-05 17:03:25 +0100632 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
633 // [num_units, output_size].
634 const ConstTensorPin recurrentToOutputWeightsPin =
Mike Kellyb5fdf382019-06-11 16:35:25 +0100635 ConvertOperationInputToConstTensorPin<Operand>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100636 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100637 const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100638 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100639 const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100640 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100641 const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100642
643 if (!inputToForgetWeightsPin.IsValid() ||
644 !inputToCellWeightsPin.IsValid() ||
645 !inputToOutputWeightsPin.IsValid() ||
646 !recurrentToForgetWeightsPin.IsValid() ||
647 !recurrentToCellWeightsPin.IsValid() ||
648 !recurrentToOutputWeightsPin.IsValid() ||
649 !forgetGateBiasPin.IsValid() ||
650 !cellBiasPin.IsValid() ||
651 !outputGateBiasPin.IsValid())
652 {
653 return Fail("%s: Operation has invalid tensor inputs", __func__);
654 }
655
656 // Get the optional input tensors:
657 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
658 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Mike Kellyb5fdf382019-06-11 16:35:25 +0100659 const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 1, model,
660 data, g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100661 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
662 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
663 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Mike Kellyb5fdf382019-06-11 16:35:25 +0100664 const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 5,
665 model, data, g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100666 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100667 const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 9, model,
668 data, g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100669 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100670 const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 10, model,
671 data, g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100672 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100673 const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 11, model,
674 data, g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100675 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100676 const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 12, model, data,
David Monahanecd7ca62019-02-22 14:29:51 +0000677 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100678 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
679 // [output_size, num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100680 const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 16, model,
681 data, g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100682 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100683 const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin<Operand>(operation, 17, model, data,
David Monahanecd7ca62019-02-22 14:29:51 +0000684 g_DontPermute, nullptr, true);
arovir01b0717b52018-09-05 17:03:25 +0100685
686 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
687 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
688 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
689 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
690 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
691 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
692 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
693 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
694 {
695 return Fail("%s: Operation has invalid tensor inputs", __func__);
696 }
697
698 // Get the mandatory input scalars (actually 1-D tensors of size 1):
699 // 20: The activation function: A value indicating the activation function:
700 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
701 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
702 // If set to 0.0 then clipping is disabled.
703 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
704 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
705 ActivationFn activation;
706 float cellClip;
707 float projClip;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100708 if (!GetInputActivationFunctionFromTensor<Operand, OperandType>(operation, 20, activation, model, data) ||
709 !GetInputScalar<Operand, OperandType>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
710 !GetInputScalar<Operand, OperandType>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100711 {
712 return Fail("%s: Operation has invalid scalar inputs", __func__);
713 }
714
715 // Outputs:
716 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
717 // CIFG, or [batch_size, num_units * 3] without CIFG.
Mike Kellyb5fdf382019-06-11 16:35:25 +0100718 const Operand* scratchBuffer = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100719 if (!scratchBuffer)
720 {
721 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
722 }
723 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100724 const Operand* outputStateOut = GetOutputOperand<Operand>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100725 if (!outputStateOut)
726 {
727 return Fail("%s: Could not read output 1: outputStateOut", __func__);
728 }
729 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Mike Kellyb5fdf382019-06-11 16:35:25 +0100730 const Operand* cellStateOut = GetOutputOperand<Operand>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100731 if (!cellStateOut)
732 {
733 return Fail("%s: Could not read output 2: cellStateOut", __func__);
734 }
735 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
736 // effectively the same as the current “output state (out)” value.
Mike Kellyb5fdf382019-06-11 16:35:25 +0100737 const Operand* output = GetOutputOperand<Operand>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100738 if (!output)
739 {
740 return Fail("%s: Could not read output 3: output", __func__);
741 }
742
743 // set the params structure for the AddLstmLayer call
744 armnn::LstmInputParams params;
745 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
746 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
747 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
748 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
749 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
750 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
751 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
752 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
753 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
754 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
755 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
756 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
757 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
758 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
759 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
760 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
761 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
762
763 // set the layer descriptor
764 armnn::LstmDescriptor desc;
765 desc.m_ActivationFunc = activation;
766 desc.m_ClippingThresCell = cellClip;
767 desc.m_ClippingThresProj = projClip;
768 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
769 params.m_RecurrentToInputWeights == nullptr ||
770 params.m_InputGateBias == nullptr);
771 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
772 params.m_CellToOutputWeights != nullptr);
773 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
774
775 // validate the optional input groups
776 if (desc.m_CifgEnabled &&
777 (params.m_InputToInputWeights != nullptr ||
778 params.m_RecurrentToInputWeights != nullptr ||
779 params.m_InputGateBias != nullptr))
780 {
781 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
782 " and input gate bias must be provided", __func__);
783 }
784
785 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
786 {
787 return Fail("%s: projection bias should not be provided without projection weights", __func__);
788 }
789
790 if (desc.m_PeepholeEnabled &&
791 (params.m_CellToForgetWeights == nullptr ||
792 params.m_CellToOutputWeights == nullptr ||
793 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
794 {
795 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
796 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
797 }
798
799 // Check if the layer is supported
800 // Inputs
801 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
802 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
803 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
804
805 // Outputs
806 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
807 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
808 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
809 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
810
811 // Basic parameters
812 const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
813 const armnn::TensorInfo& inputToCellWeights = params.m_InputToCellWeights->GetInfo();
814 const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
815 const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
816 const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
817 const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
818 const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
819 const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
820 const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
821
822 //Optional parameters
823 const armnn::TensorInfo* inputToInputWeights = nullptr;
824 const armnn::TensorInfo* recurrentToInputWeights = nullptr;
825 const armnn::TensorInfo* cellToInputWeights = nullptr;
826 const armnn::TensorInfo* inputGateBias = nullptr;
827 const armnn::TensorInfo* projectionWeights = nullptr;
828 const armnn::TensorInfo* projectionBias = nullptr;
829 const armnn::TensorInfo* cellToForgetWeights = nullptr;
830 const armnn::TensorInfo* cellToOutputWeights = nullptr;
831
832 if(!desc.m_CifgEnabled)
833 {
834 inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
835 recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
836 if (params.m_CellToInputWeights != nullptr)
837 {
838 cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
839 }
840 inputGateBias = &(params.m_InputGateBias->GetInfo());
841 }
842
843 if(desc.m_ProjectionEnabled)
844 {
845 projectionWeights = &(params.m_ProjectionWeights->GetInfo());
846 if (params.m_ProjectionBias != nullptr)
847 {
848 projectionBias = &(params.m_ProjectionBias->GetInfo());
849 }
850 }
851
852 if(desc.m_PeepholeEnabled)
853 {
854 cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
855 cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
856 }
857
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100858 if (!IsLayerSupportedForAnyBackend(__func__,
859 armnn::IsLstmSupported,
860 data.m_Backends,
861 inputInfo,
862 outputStateInInfo,
863 cellStateInInfo,
864 scratchBufferInfo,
865 outputStateOutInfo,
866 cellStateOutInfo,
867 outputInfo,
868 desc,
869 inputToForgetWeights,
870 inputToCellWeights,
871 inputToOutputWeights,
872 recurrentToForgetWeights,
873 recurrentToCellWeights,
874 recurrentToOutputWeights,
875 forgetGateBias,
876 cellBias,
877 outputGateBias,
878 inputToInputWeights,
879 recurrentToInputWeights,
880 cellToInputWeights,
881 inputGateBias,
882 projectionWeights,
883 projectionBias,
884 cellToForgetWeights,
885 cellToOutputWeights))
arovir01b0717b52018-09-05 17:03:25 +0100886 {
887 return false;
888 }
889
890 // Add the layer
891 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
892
893 input.Connect(layer->GetInputSlot(0));
894 outputStateIn.Connect(layer->GetInputSlot(1));
895 cellStateIn.Connect(layer->GetInputSlot(2));
896
Mike Kellyb5fdf382019-06-11 16:35:25 +0100897 return (SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, 0, model, data) &&
898 SetupAndTrackLayerOutputSlot<Operand>(operation, 1, *layer, 1, model, data) &&
899 SetupAndTrackLayerOutputSlot<Operand>(operation, 2, *layer, 2, model, data) &&
900 SetupAndTrackLayerOutputSlot<Operand>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100901}
902
903bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
904{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100905 LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100906 if (!input.IsValid())
907 {
908 return Fail("%s: Operation has invalid inputs", __func__);
909 }
910
Mike Kellyb5fdf382019-06-11 16:35:25 +0100911 const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100912 if (!output)
913 {
914 return Fail("%s: Could not read output 0", __func__);
915 }
916
917 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
918 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
919
Matteo Martincigh58f71092018-09-25 15:58:52 +0100920 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100921 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +0100922
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100923 if (!IsLayerSupportedForAnyBackend(__func__,
924 armnn::IsL2NormalizationSupported,
925 data.m_Backends,
926 inputInfo,
927 outputInfo,
928 desc))
arovir01b0717b52018-09-05 17:03:25 +0100929 {
930 return false;
931 }
932
Matteo Martincigh58f71092018-09-25 15:58:52 +0100933 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +0100934 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100935 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100936
Mike Kellyb5fdf382019-06-11 16:35:25 +0100937 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100938}
939
940bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
941{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100942 return ConvertPooling2d<Operand, OperandType>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100943}
944
945bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
946{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100947 return ConvertPooling2d<Operand, OperandType>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100948}
949
950bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
951{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100952 LayerInputHandle input0 = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
953 LayerInputHandle input1 = ConvertToLayerInputHandle<Operand>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100954
955 if (!input0.IsValid() || !input1.IsValid())
956 {
957 return Fail("%s: Operation has invalid inputs", __func__);
958 }
959
960 // The FuseActivation parameter is always the input index 2
961 // and it should be optional
962 ActivationFn activationFunction;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100963 if (!GetOptionalInputActivation<Operand, OperandType>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100964 {
965 return Fail("%s: Operation has invalid inputs", __func__);
966 }
967
Mike Kellyb5fdf382019-06-11 16:35:25 +0100968 const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100969
970 if (outputOperand == nullptr)
971 {
972 return false;
973 }
974
975 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
976
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100977 if (!IsLayerSupportedForAnyBackend(__func__,
978 armnn::IsMultiplicationSupported,
979 data.m_Backends,
980 input0.GetTensorInfo(),
981 input1.GetTensorInfo(),
982 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100983 {
984 return false;
985 }
986
987 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
988 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
989
990 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
991 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
992
993 if (endLayer != nullptr)
994 {
995 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100996 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100997 }
998 else
999 {
1000 return Fail("%s: ProcessActivation failed", __func__);
1001 }
1002}
1003
1004bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1005{
1006 armnn::ActivationDescriptor desc;
1007 desc.m_Function = armnn::ActivationFunction::ReLu;
1008
Mike Kellyb5fdf382019-06-11 16:35:25 +01001009 return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001010}
1011
1012bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1013{
1014 armnn::ActivationDescriptor desc;
1015 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1016 desc.m_A = 1.0f;
1017 desc.m_B = -1.0f;
1018
Mike Kellyb5fdf382019-06-11 16:35:25 +01001019 return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001020}
1021
1022bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1023{
1024 armnn::ActivationDescriptor desc;
1025 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1026 desc.m_A = 6.0f;
1027
Mike Kellyb5fdf382019-06-11 16:35:25 +01001028 return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001029}
1030
1031bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1032{
Mike Kellyb5fdf382019-06-11 16:35:25 +01001033 LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001034 if (!input.IsValid())
1035 {
1036 return Fail("%s: Operation has invalid inputs", __func__);
1037 }
1038
Mike Kellyb5fdf382019-06-11 16:35:25 +01001039 const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001040 if (!outputOperand)
1041 {
1042 return Fail("%s: Operation has no outputs", __func__);
1043 }
1044
1045 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1046
1047 armnn::SoftmaxDescriptor desc;
Mike Kellyb5fdf382019-06-11 16:35:25 +01001048 if (!GetInputFloat32<Operand, OperandType>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001049 {
1050 return Fail("%s: Operation has invalid inputs", __func__);
1051 }
1052
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001053 if (!IsLayerSupportedForAnyBackend(__func__,
1054 armnn::IsSoftmaxSupported,
1055 data.m_Backends,
1056 input.GetTensorInfo(),
1057 outInfo,
1058 desc))
arovir01b0717b52018-09-05 17:03:25 +01001059 {
1060 return false;
1061 }
1062
1063 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1064 assert(layer != nullptr);
1065 input.Connect(layer->GetInputSlot(0));
1066
Mike Kellyb5fdf382019-06-11 16:35:25 +01001067 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001068}
1069
1070bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1071{
1072 armnn::ActivationDescriptor desc;
1073 desc.m_Function = armnn::ActivationFunction::TanH;
1074 desc.m_A = 1.0f; // android nn does not support tanH parameters
1075 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1076
Mike Kellyb5fdf382019-06-11 16:35:25 +01001077 return ConvertToActivation<Operand>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001078}
1079
1080bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1081{
Mike Kellyb5fdf382019-06-11 16:35:25 +01001082 const Operand* inputOperand = GetInputOperand<Operand>(operation, 0, model);
1083 const Operand* requestedShapeOperand = GetInputOperand<Operand>(operation, 1, model);
1084 const Operand* outputOperand = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001085
1086 if (inputOperand == nullptr
1087 || requestedShapeOperand == nullptr
1088 || outputOperand == nullptr)
1089 {
1090 return Fail("%s: Operation has invalid inputs", __func__);
1091 }
1092
1093
1094 if (requestedShapeOperand->dimensions.size() != 1)
1095 {
1096 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1097 __func__, requestedShapeOperand->dimensions.size());
1098 }
1099
1100 std::vector<int32_t> targetDimensions;
Mike Kellyb5fdf382019-06-11 16:35:25 +01001101 if (!GetTensorInt32Values<Operand, OperandType>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001102 {
1103 return Fail("%s: Could not read values of input 1", __func__);
1104 }
1105
1106 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1107
1108 Shape requestedShape;
1109 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1110 // function that resolves these values into a fully specified tensor shape.
1111 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1112 {
1113 return Fail("%s: Failed to resolve the requested shape", __func__);
1114 }
1115
1116 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1117 if (!SameShape(requestedShape, outputOperandShape))
1118 {
1119 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1120 }
1121
Mike Kellyb5fdf382019-06-11 16:35:25 +01001122 LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001123 if (!input.IsValid())
1124 {
1125 return Fail("%s: Could not read input 0", __func__);
1126 }
1127
arovir01b0717b52018-09-05 17:03:25 +01001128 armnn::ReshapeDescriptor reshapeDescriptor;
1129 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1130 requestedShape.dimensions.data());
1131
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001132 if (!IsLayerSupportedForAnyBackend(__func__,
1133 armnn::IsReshapeSupported,
1134 data.m_Backends,
1135 input.GetTensorInfo(),
1136 reshapeDescriptor))
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001137 {
1138 return false;
1139 }
1140
arovir01b0717b52018-09-05 17:03:25 +01001141 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1142 assert(layer != nullptr);
1143 input.Connect(layer->GetInputSlot(0));
1144
Mike Kellyb5fdf382019-06-11 16:35:25 +01001145 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001146}
1147
1148bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1149{
Mike Kellyb5fdf382019-06-11 16:35:25 +01001150 LayerInputHandle input = ConvertToLayerInputHandle<Operand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001151 if (!input.IsValid())
1152 {
1153 return Fail("%s: Could not read input 0", __func__);
1154 }
1155
Mike Kellyb5fdf382019-06-11 16:35:25 +01001156 const Operand* output = GetOutputOperand<Operand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001157 if (!output)
1158 {
1159 return Fail("%s: Could not read output 0", __func__);
1160 }
1161
1162 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1163 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1164
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001165 armnn::ResizeBilinearDescriptor desc;
1166 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001167
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001168 if (!IsLayerSupportedForAnyBackend(__func__,
1169 armnn::IsResizeBilinearSupported,
1170 data.m_Backends,
1171 inputInfo,
1172 outputInfo))
arovir01b0717b52018-09-05 17:03:25 +01001173 {
1174 return false;
1175 }
1176
arovir01b0717b52018-09-05 17:03:25 +01001177
Mike Kellyb5fdf382019-06-11 16:35:25 +01001178 if ( !GetInputScalar<Operand, OperandType>(operation, 1, OperandType::INT32, desc.m_TargetHeight, model, data)
1179 || !GetInputScalar<Operand, OperandType>(operation, 2, OperandType::INT32, desc.m_TargetWidth, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001180 {
1181 return Fail("%s: Operation has invalid inputs", __func__);
1182 }
1183
1184 armnn::IConnectableLayer* layer = data.m_Network->AddResizeBilinearLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001185
arovir01b0717b52018-09-05 17:03:25 +01001186 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001187
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001188 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1189 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001190
Mike Kellyb5fdf382019-06-11 16:35:25 +01001191 return SetupAndTrackLayerOutputSlot<Operand>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001192
1193}
1194
1195} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001196} // namespace armnn_driver