blob: b6074b423afaf1486b4bfadc82ea53aa5c9fdd1e [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +010011#include "Utils.hpp"
arovir015602b192018-10-04 16:15:02 +010012
arovir01b0717b52018-09-05 17:03:25 +010013namespace armnn_driver
14{
15namespace hal_1_0
16{
17
18bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
19{
20 switch (operation.type)
21 {
22 case V1_0::OperationType::ADD:
23 return ConvertAdd(operation, model, data);
24 case V1_0::OperationType::AVERAGE_POOL_2D:
25 return ConvertAveragePool2d(operation, model, data);
26 case V1_0::OperationType::CONCATENATION:
27 return ConvertConcatenation(operation, model, data);
28 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010029 return ConvertConv2d(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010030 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010031 return ConvertDepthwiseConv2d(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010032 case V1_0::OperationType::DEQUANTIZE:
33 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010034 case V1_0::OperationType::FLOOR:
35 return ConvertFloor(operation, model, data);
36 case V1_0::OperationType::FULLY_CONNECTED:
37 return ConvertFullyConnected(operation, model, data);
38 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
39 return ConvertLocalResponseNormalization(operation, model, data);
40 case V1_0::OperationType::LOGISTIC:
41 return ConvertLogistic(operation, model, data);
42 case V1_0::OperationType::LSTM:
43 return ConvertLstm(operation, model, data);
44 case V1_0::OperationType::L2_NORMALIZATION:
45 return ConvertL2Normalization(operation, model, data);
46 case V1_0::OperationType::L2_POOL_2D:
47 return ConvertL2Pool2d(operation, model, data);
48 case V1_0::OperationType::MAX_POOL_2D:
49 return ConvertMaxPool2d(operation, model, data);
50 case V1_0::OperationType::MUL:
51 return ConvertMul(operation, model, data);
52 case V1_0::OperationType::RELU:
53 return ConvertReLu(operation, model, data);
54 case V1_0::OperationType::RELU1:
55 return ConvertReLu1(operation, model, data);
56 case V1_0::OperationType::RELU6:
57 return ConvertReLu6(operation, model, data);
58 case V1_0::OperationType::SOFTMAX:
59 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010060 case V1_0::OperationType::SPACE_TO_DEPTH:
61 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010062 case V1_0::OperationType::TANH:
63 return ConvertTanH(operation, model, data);
64 case V1_0::OperationType::RESHAPE:
65 return ConvertReshape(operation, model, data);
66 case V1_0::OperationType::RESIZE_BILINEAR:
67 return ConvertResizeBilinear(operation, model, data);
68 default:
69 return Fail("%s: Operation type %s not supported in ArmnnDriver",
70 __func__, toString(operation.type).c_str());
71 }
72}
73
Mike Kellyb5fdf382019-06-11 16:35:25 +010074bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
75{
76 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
77 {
78 return Fail("%s: Unsupported number of operation inputs", __func__);
79 }
80 return true;
81}
82
83bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
84{
85 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
86 {
87 return Fail("%s: Unsupported number of operation inputs", __func__);
88 }
89 return true;
90}
91
arovir01b0717b52018-09-05 17:03:25 +010092bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
93{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010094 ALOGV("hal_1_0::HalPolicy::ConvertAdd()");
95
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010096 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
97 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010098
99 if (!input0.IsValid() || !input1.IsValid())
100 {
101 return Fail("%s: Operation has invalid inputs", __func__);
102 }
103
104 // The FuseActivation parameter is always the input index 2
105 // and it should be optional
106 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100107 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100108 {
109 return Fail("%s: Operation has invalid inputs", __func__);
110 }
111
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100112 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100113 if (!outputOperand)
114 {
115 return false;
116 }
117
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100118 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
119 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
120
121 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
122 if (IsDynamicTensor(outputInfo))
123 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100124 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100125 }
arovir01b0717b52018-09-05 17:03:25 +0100126
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100127 bool isSupported = false;
128 FORWARD_LAYER_SUPPORT_FUNC(__func__,
129 IsAdditionSupported,
130 data.m_Backends,
131 isSupported,
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100132 inputInfo0,
133 inputInfo1,
134 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100135 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100136 {
137 return false;
138 }
139
140 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100141 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
arovir01b0717b52018-09-05 17:03:25 +0100142
143 if (endLayer != nullptr)
144 {
145 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100146 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100147 }
148 else
149 {
150 return Fail("%s: ProcessActivation failed", __func__);
151 }
152}
153
154bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
155{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100156 ALOGV("hal_1_0::HalPolicy::ConvertAveragePool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100157 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100158}
159
160bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
161{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100162 ALOGV("hal_1_0::HalPolicy::ConvertConcatenation()");
163
arovir01b0717b52018-09-05 17:03:25 +0100164 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
165 if (operation.inputs.size() <= 1)
166 {
167 return Fail("%s: Operation has insufficient arguments", __func__);
168 }
169
170 // Get inputs and outputs
171 const std::size_t numInputTensors = operation.inputs.size() - 1;
172
173 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100174 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100175 {
176 return Fail("%s: Operation has invalid inputs", __func__);
177 }
178
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100179 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100180 if (!outputOperand)
181 {
182 return Fail("%s: Operation has no outputs", __func__);
183 }
184
185
186 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
187 armnn::TensorShape outputShape = outputInfo.GetShape();
188
189 //
190 // handle negative concat dims along the lines of tensorflow as described here:
191 // https://www.tensorflow.org/api_docs/python/tf/concat
192 // "negative axis refers to axis + rank(values)-th dimension"
193 //
194 if (concatDim < 0)
195 {
196 concatDim += outputShape.GetNumDimensions();
197 }
198
199 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
200 {
201 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
202 }
203
204 std::vector<LayerInputHandle> inputHandles;
205 std::vector<armnn::TensorShape> inputShapes;
206
207 inputHandles.reserve(numInputTensors);
208 inputShapes.reserve(numInputTensors);
209
210 bool inputsHaveBeenReshaped = false;
211 unsigned int tensorDimensionsAdded = 0;
212
213 for (uint32_t i = 0; i < numInputTensors; ++i)
214 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100215 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100216 if (!operand)
217 {
218 return Fail("%s: Operation has invalid inputs", __func__);
219 }
220
221 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100222 LayerInputHandle operandInputHandle =
223 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100224
225 if (operandShape.GetNumDimensions() == 0)
226 {
227 return Fail("%s: Operands with rank 0 are not supported", __func__);
228 }
229
230 if (RequiresReshape(operandShape))
231 {
232 inputsHaveBeenReshaped = true;
233
234 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
235
236 // Expand the tensor to three dimensions
237 if (operandShape.GetNumDimensions() == 2)
238 {
239 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
240 tensorDimensionsAdded = 1;
241 }
242 else
243 {
244 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
245 tensorDimensionsAdded = 2;
246 }
247
248 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
249 *data.m_Network,
250 operandInputHandle,
251 reshapeInfo
252 );
253
254 // Point to the reshape operation rather then the input operation
255 operandShape = reshapeInfo.GetShape();
256 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
257 }
258
259 inputShapes.emplace_back(operandShape);
260 inputHandles.emplace_back(operandInputHandle);
261
262 if (!inputHandles.back().IsValid())
263 {
264 return Fail("%s: Operation has invalid inputs", __func__);
265 }
266 }
267
268 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
269
270 if (inputsHaveBeenReshaped)
271 {
272 // Adjust the concatenation dimension by the amount of dimensions added (if any)
273 concatDim += tensorDimensionsAdded;
274
275 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
276 if (tensorDimensionsAdded == 1)
277 {
278 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
279 }
280 else if (tensorDimensionsAdded == 2)
281 {
narpra01f176d5a2018-11-18 20:17:48 +0000282 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100283 }
284 }
285
narpra01f176d5a2018-11-18 20:17:48 +0000286 // Check if permutations is required and get the pair of permutations required for the concatenation.
287 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100288 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
289 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
290
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100291 bool needPermute =
292 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100293
narpra01f176d5a2018-11-18 20:17:48 +0000294 if (needPermute)
295 {
296 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
297 }
298
arovir01b0717b52018-09-05 17:03:25 +0100299 outputInfo.SetShape(outputShape);
300
301 // this is no-op for identity swizzles, otherwise it replaces both
302 // the handles and shapes with the swizzled layer output handles and shapes
303 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
304
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100305 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
306 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000307
arovir01b0717b52018-09-05 17:03:25 +0100308 try
309 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100310 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000311 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100312 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100313 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100314 }
315 catch (const armnn::Exception& error)
316 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100317 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100318 }
319
320 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000321 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100322 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
323 {
324 return Fail("%s: Error validating the output shape for concat", __func__);
325 }
326
327 std::vector<const armnn::TensorInfo*> inputTensorInfos;
328 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
329 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100330
331 bool isSupported = false;
332 FORWARD_LAYER_SUPPORT_FUNC(__func__,
333 IsConcatSupported,
334 data.m_Backends,
335 isSupported,
336 inputTensorInfos,
337 outputInfo,
338 concatDescriptor);
339 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100340 {
341 return false;
342 }
343
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100344 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100345 assert(layer != nullptr);
346 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
347
348 // Connect inputs to the layer
349 const int numInputSlots = layer->GetNumInputSlots();
350 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
351 for (int i = 0; i < numInputSlots; ++i)
352 {
353 // connect the input directly to the merge (concat) layer
354 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
355 }
356
narpra01f176d5a2018-11-18 20:17:48 +0000357 if (needPermute)
358 {
359 // Add permutation layer and connect the output to it, the permutation becomes the output layer
360 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
361 layer->GetOutputSlot(0),
362 permutationPair.second);
363 layer = &deswizzleLayer;
364 }
arovir01b0717b52018-09-05 17:03:25 +0100365
366 if (inputsHaveBeenReshaped)
367 {
368 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
369
370 // Undo the reshape knowing the amount of dimensions added
371 if (tensorDimensionsAdded == 1)
372 {
373 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
374 afterConcatInfo.GetShape()[2] }));
375 }
376 else if (tensorDimensionsAdded == 2)
377 {
narpra01f176d5a2018-11-18 20:17:48 +0000378 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100379 }
380
381 layer = &AddReshapeLayer(
382 *data.m_Network,
383 layer->GetOutputSlot(0),
384 afterConcatInfo
385 );
386 }
387
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100388 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100389}
390
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100391bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
392{
393 ALOGV("hal_1_0::HalPolicy::ConvertConv2d()");
394 return ValidateConv2dParameters(operation) && ::ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
395}
396
397bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
398{
399 ALOGV("hal_1_0::HalPolicy::ConvertDepthwiseConv2d()");
400 return ValidateDepthwiseConv2dParameters(operation) &&
401 ::ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
402}
403
David Monahanacf479a2019-05-29 14:27:04 +0100404bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
405{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100406 ALOGV("hal_1_0::HalPolicy::ConvertDequantize()");
David Monahanacf479a2019-05-29 14:27:04 +0100407
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100408 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100409 if (!input.IsValid())
410 {
411 return Fail("%s: Operation has invalid input", __func__);
412 }
413
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100414 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100415 if (!outputOperand)
416 {
417 return Fail("%s: Operation has invalid outputs", __func__);
418 }
419
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100420 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100421 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100422 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100423 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100424 }
425
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100426 bool isSupported = false;
427 FORWARD_LAYER_SUPPORT_FUNC(__func__,
428 IsDequantizeSupported,
429 data.m_Backends,
430 isSupported,
431 input.GetTensorInfo(),
432 GetTensorInfoForOperand(*outputOperand));
433 if (!isSupported)
David Monahanacf479a2019-05-29 14:27:04 +0100434 {
435 return false;
436 }
437
438 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
439 assert(layer != nullptr);
440 input.Connect(layer->GetInputSlot(0));
441
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100442 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100443}
444
arovir01b0717b52018-09-05 17:03:25 +0100445bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
446{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100447 ALOGV("hal_1_0::HalPolicy::ConvertFloor()");
448
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100449 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100450 if (!input.IsValid())
451 {
452 return Fail("%s: Operation has invalid inputs", __func__);
453 }
454
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100455 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100456 if (!outputOperand)
457 {
458 return Fail("%s: Operation has invalid outputs", __func__);
459 }
460
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100461 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
462 if (IsDynamicTensor(outputInfo))
463 {
464 return Fail("%s: Dynamic output tensors are not supported", __func__);
465 }
466
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100467 bool isSupported = false;
468 FORWARD_LAYER_SUPPORT_FUNC(__func__,
469 IsFloorSupported,
470 data.m_Backends,
471 isSupported,
472 input.GetTensorInfo(),
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100473 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100474 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100475 {
476 return false;
477 }
478
479 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
480 assert(layer != nullptr);
481 input.Connect(layer->GetInputSlot(0));
482
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100483 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100484}
485
486bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
487{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100488 ALOGV("hal_1_0::HalPolicy::ConvertFullyConnected()");
489
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100490 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100491 if (!input.IsValid())
492 {
493 return Fail("%s: Operation has invalid inputs", __func__);
494 }
495
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100496 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100497 if (!output)
498 {
499 return Fail("%s: Could not read output 0", __func__);
500 }
501
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100502 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
503 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100504
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100505 if (IsDynamicTensor(outputInfo))
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100506 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100507 return Fail("%s: Dynamic output tensors are not supported", __func__);
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100508 }
arovir01b0717b52018-09-05 17:03:25 +0100509
510 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100511 ConstTensorPin weightsPin =
512 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
513 ConstTensorPin biasPin =
514 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100515
516 if (!weightsPin.IsValid() || !biasPin.IsValid())
517 {
518 return Fail("%s: Operation has invalid inputs", __func__);
519 }
520
521 armnn::ConstTensor weights = weightsPin.GetConstTensor();
522 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100523 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100524
525 try
arovir01b0717b52018-09-05 17:03:25 +0100526 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100527 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
528 } catch (const std::exception &e) {
529 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100530 }
531
532 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
533 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
534
535 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100536 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100537 {
538 return Fail("%s: Operation has invalid inputs", __func__);
539 }
540
541 armnn::FullyConnectedDescriptor desc;
542 desc.m_TransposeWeightMatrix = true;
543 desc.m_BiasEnabled = true;
544
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100545 bool isSupported = false;
546 FORWARD_LAYER_SUPPORT_FUNC(__func__,
547 IsFullyConnectedSupported,
548 data.m_Backends,
549 isSupported,
550 reshapedInfo,
551 outputInfo,
552 weights.GetInfo(),
553 bias.GetInfo(),
554 desc);
555 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100556 {
557 return false;
558 }
559
Matteo Martincighba01f372019-05-14 13:28:21 +0100560 armnn::IConnectableLayer* startLayer =
561 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100562 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
563
564 if (endLayer != nullptr)
565 {
566 if (inputInfo.GetNumDimensions() > 2U)
567 {
568 armnn::ReshapeDescriptor reshapeDescriptor;
569 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
570
571 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
572 assert(reshapeLayer != nullptr);
573 input.Connect(reshapeLayer->GetInputSlot(0));
574 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
575 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
576 }
577 else
578 {
579 input.Connect(startLayer->GetInputSlot(0));
580 }
581
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100582 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100583 }
584 else
585 {
586 return Fail("%s: ProcessActivation failed", __func__);
587 }
588}
589
590bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
591 const Model& model,
592 ConversionData& data)
593{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100594 ALOGV("hal_1_0::HalPolicy::ConvertLocalResponseNormalization()");
595
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100596 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100597 if (!input.IsValid())
598 {
599 return Fail("%s: Operation has invalid inputs", __func__);
600 }
601
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100602 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100603 if (!output)
604 {
605 return Fail("%s: Could not read output 0", __func__);
606 }
607
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100608 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100609 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
610
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100611 if (IsDynamicTensor(outputInfo))
612 {
613 return Fail("%s: Dynamic output tensors are not supported", __func__);
614 }
arovir01b0717b52018-09-05 17:03:25 +0100615
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100616 armnn::NormalizationDescriptor descriptor;
617 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100618 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100619 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100620
621 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100622 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
623 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
624 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
625 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100626 {
627 return Fail("%s: Operation has invalid inputs", __func__);
628 }
629
630 // ArmNN expects normSize to be the full size of the normalization
631 // window rather than the radius as in AndroidNN.
632 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
633
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100634 bool isSupported = false;
635 FORWARD_LAYER_SUPPORT_FUNC(__func__,
636 IsNormalizationSupported,
637 data.m_Backends,
638 isSupported,
639 inputInfo,
640 outputInfo,
641 descriptor);
642 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100643 {
644 return false;
645 }
646
647
648 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
649 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100650 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100651
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100652 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100653}
654
655bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
656{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100657 ALOGV("hal_1_0::HalPolicy::ConvertLogistic()");
658
arovir01b0717b52018-09-05 17:03:25 +0100659 armnn::ActivationDescriptor desc;
660 desc.m_Function = armnn::ActivationFunction::Sigmoid;
661
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100662 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100663}
664
665bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
666{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100667 ALOGV("hal_1_0::HalPolicy::ConvertLstm()");
668
arovir01b0717b52018-09-05 17:03:25 +0100669 // Inputs:
670 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
671 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100672 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100673 if (!input.IsValid())
674 {
675 return Fail("%s: Could not read input 0: input", __func__);
676 }
677 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100678 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100679 if (!outputStateIn.IsValid())
680 {
681 return Fail("%s: Could not read input 18: outputStateIn", __func__);
682 }
683 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100684 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100685 if (!cellStateIn.IsValid())
686 {
687 return Fail("%s: Could not read input 19: cellStateIn", __func__);
688 }
689
690 // Get the mandatory input tensors:
691 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
692 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100693 const ConstTensorPin inputToForgetWeightsPin =
694 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
695 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
696 // [num_units, input_size].
697 const ConstTensorPin inputToCellWeightsPin =
698 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100699 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
700 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100701 const ConstTensorPin inputToOutputWeightsPin =
702 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100703 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
704 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100705 const ConstTensorPin recurrentToForgetWeightsPin =
706 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100707 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
708 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100709 const ConstTensorPin recurrentToCellWeightsPin =
710 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100711 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
712 // [num_units, output_size].
713 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100714 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100715 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100716 const ConstTensorPin forgetGateBiasPin =
717 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100718 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100719 const ConstTensorPin cellBiasPin =
720 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100721 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100722 const ConstTensorPin outputGateBiasPin =
723 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100724
725 if (!inputToForgetWeightsPin.IsValid() ||
726 !inputToCellWeightsPin.IsValid() ||
727 !inputToOutputWeightsPin.IsValid() ||
728 !recurrentToForgetWeightsPin.IsValid() ||
729 !recurrentToCellWeightsPin.IsValid() ||
730 !recurrentToOutputWeightsPin.IsValid() ||
731 !forgetGateBiasPin.IsValid() ||
732 !cellBiasPin.IsValid() ||
733 !outputGateBiasPin.IsValid())
734 {
735 return Fail("%s: Operation has invalid tensor inputs", __func__);
736 }
737
738 // Get the optional input tensors:
739 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
740 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100741 const ConstTensorPin inputToInputWeightsPin =
742 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
743 1,
744 model,
745 data,
746 g_DontPermute,
747 nullptr,
748 true);
749
arovir01b0717b52018-09-05 17:03:25 +0100750 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
751 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
752 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100753 const ConstTensorPin recurrentToInputWeightsPin =
754 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
755 5,
756 model,
757 data,
758 g_DontPermute,
759 nullptr,
760 true);
761
arovir01b0717b52018-09-05 17:03:25 +0100762 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100763 const ConstTensorPin cellToInputWeightsPin =
764 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
765 9,
766 model,
767 data,
768 g_DontPermute,
769 nullptr,
770 true);
771
arovir01b0717b52018-09-05 17:03:25 +0100772 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100773 const ConstTensorPin cellToForgetWeightsPin =
774 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
775 10,
776 model,
777 data,
778 g_DontPermute,
779 nullptr,
780 true);
781
arovir01b0717b52018-09-05 17:03:25 +0100782 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100783 const ConstTensorPin cellToOutputWeightsPin =
784 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
785 11,
786 model,
787 data,
788 g_DontPermute,
789 nullptr,
790 true);
791
arovir01b0717b52018-09-05 17:03:25 +0100792 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100793 const ConstTensorPin inputGateBiasPin =
794 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
795 12,
796 model,
797 data,
798 g_DontPermute,
799 nullptr,
800 true);
801
arovir01b0717b52018-09-05 17:03:25 +0100802 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
803 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100804 const ConstTensorPin projectionWeightsPin =
805 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
806 16,
807 model,
808 data,
809 g_DontPermute,
810 nullptr,
811 true);
812
arovir01b0717b52018-09-05 17:03:25 +0100813 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100814 const ConstTensorPin projectionBiasPin =
815 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
816 17,
817 model,
818 data,
819 g_DontPermute,
820 nullptr,
821 true);
arovir01b0717b52018-09-05 17:03:25 +0100822
823 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
824 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
825 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
826 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
827 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
828 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
829 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
830 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
831 {
832 return Fail("%s: Operation has invalid tensor inputs", __func__);
833 }
834
835 // Get the mandatory input scalars (actually 1-D tensors of size 1):
836 // 20: The activation function: A value indicating the activation function:
837 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
838 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
839 // If set to 0.0 then clipping is disabled.
840 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
841 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
842 ActivationFn activation;
843 float cellClip;
844 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100845 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
846 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
847 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100848 {
849 return Fail("%s: Operation has invalid scalar inputs", __func__);
850 }
851
852 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100853 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
854 // with CIFG, or [batch_size, num_units * 3] without CIFG.
855 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100856 if (!scratchBuffer)
857 {
858 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
859 }
860 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100861 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100862 if (!outputStateOut)
863 {
864 return Fail("%s: Could not read output 1: outputStateOut", __func__);
865 }
866 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100867 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100868 if (!cellStateOut)
869 {
870 return Fail("%s: Could not read output 2: cellStateOut", __func__);
871 }
872 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
873 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100874 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100875 if (!output)
876 {
877 return Fail("%s: Could not read output 3: output", __func__);
878 }
879
880 // set the params structure for the AddLstmLayer call
881 armnn::LstmInputParams params;
882 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
883 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
884 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
885 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
886 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
887 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
888 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
889 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
890 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
891 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
892 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
893 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
894 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
895 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
896 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
897 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
898 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
899
900 // set the layer descriptor
901 armnn::LstmDescriptor desc;
902 desc.m_ActivationFunc = activation;
903 desc.m_ClippingThresCell = cellClip;
904 desc.m_ClippingThresProj = projClip;
905 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
906 params.m_RecurrentToInputWeights == nullptr ||
907 params.m_InputGateBias == nullptr);
908 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
909 params.m_CellToOutputWeights != nullptr);
910 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
911
912 // validate the optional input groups
913 if (desc.m_CifgEnabled &&
914 (params.m_InputToInputWeights != nullptr ||
915 params.m_RecurrentToInputWeights != nullptr ||
916 params.m_InputGateBias != nullptr))
917 {
918 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
919 " and input gate bias must be provided", __func__);
920 }
921
922 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
923 {
924 return Fail("%s: projection bias should not be provided without projection weights", __func__);
925 }
926
927 if (desc.m_PeepholeEnabled &&
928 (params.m_CellToForgetWeights == nullptr ||
929 params.m_CellToOutputWeights == nullptr ||
930 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
931 {
932 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
933 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
934 }
935
936 // Check if the layer is supported
937 // Inputs
938 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
939 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
940 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
941
942 // Outputs
943 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
944 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
945 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
946 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
947
948 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100949 armnn::LstmInputParamsInfo paramsInfo;
950 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
951 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
952 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
953 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
954 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
955 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
956 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
957 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
958 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100959
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100960 // Optional parameters
arovir01b0717b52018-09-05 17:03:25 +0100961 if(!desc.m_CifgEnabled)
962 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100963 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
964 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100965 if (params.m_CellToInputWeights != nullptr)
966 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100967 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100968 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100969 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100970 }
971
972 if(desc.m_ProjectionEnabled)
973 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100974 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100975 if (params.m_ProjectionBias != nullptr)
976 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100977 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100978 }
979 }
980
981 if(desc.m_PeepholeEnabled)
982 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100983 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
984 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100985 }
986
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100987 bool isSupported = false;
988 FORWARD_LAYER_SUPPORT_FUNC(__func__,
989 IsLstmSupported,
990 data.m_Backends,
991 isSupported,
992 inputInfo,
993 outputStateInInfo,
994 cellStateInInfo,
995 scratchBufferInfo,
996 outputStateOutInfo,
997 cellStateOutInfo,
998 outputInfo,
999 desc,
1000 paramsInfo);
1001 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001002 {
1003 return false;
1004 }
1005
1006 // Add the layer
1007 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1008
1009 input.Connect(layer->GetInputSlot(0));
1010 outputStateIn.Connect(layer->GetInputSlot(1));
1011 cellStateIn.Connect(layer->GetInputSlot(2));
1012
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001013 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1014 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1015 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1016 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +01001017}
1018
1019bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
1020{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001021 ALOGV("hal_1_0::HalPolicy::ConvertL2Normalization()");
1022
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001023 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001024 if (!input.IsValid())
1025 {
1026 return Fail("%s: Operation has invalid inputs", __func__);
1027 }
1028
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001029 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001030 if (!output)
1031 {
1032 return Fail("%s: Could not read output 0", __func__);
1033 }
1034
1035 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001036 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
arovir01b0717b52018-09-05 17:03:25 +01001037
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001038 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001039 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001040 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001041 }
1042
Matteo Martincigh58f71092018-09-25 15:58:52 +01001043 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001044 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001045
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001046 bool isSupported = false;
1047 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1048 IsL2NormalizationSupported,
1049 data.m_Backends,
1050 isSupported,
1051 inputInfo,
1052 outputInfo,
1053 desc);
1054 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001055 {
1056 return false;
1057 }
1058
Matteo Martincigh58f71092018-09-25 15:58:52 +01001059 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001060 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001061 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001062
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001063 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001064}
1065
1066bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1067{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001068 ALOGV("hal_1_0::HalPolicy::ConvertL2Pool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001069 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001070}
1071
1072bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1073{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001074 ALOGV("hal_1_0::HalPolicy::ConvertMaxPool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001075 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001076}
1077
1078bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1079{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001080 ALOGV("hal_1_0::HalPolicy::ConvertMul()");
1081
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001082 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1083 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001084
1085 if (!input0.IsValid() || !input1.IsValid())
1086 {
1087 return Fail("%s: Operation has invalid inputs", __func__);
1088 }
1089
1090 // The FuseActivation parameter is always the input index 2
1091 // and it should be optional
1092 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001093 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001094 {
1095 return Fail("%s: Operation has invalid inputs", __func__);
1096 }
1097
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001098 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001099
1100 if (outputOperand == nullptr)
1101 {
1102 return false;
1103 }
1104
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001105 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1106 if (IsDynamicTensor(outputInfo))
1107 {
1108 return Fail("%s: Dynamic output tensors are not supported", __func__);
1109 }
arovir01b0717b52018-09-05 17:03:25 +01001110
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001111 bool isSupported = false;
1112 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1113 IsMultiplicationSupported,
1114 data.m_Backends,
1115 isSupported,
1116 input0.GetTensorInfo(),
1117 input1.GetTensorInfo(),
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001118 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001119 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001120 {
1121 return false;
1122 }
1123
1124 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001125 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
arovir01b0717b52018-09-05 17:03:25 +01001126
1127 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1128 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1129
1130 if (endLayer != nullptr)
1131 {
1132 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001133 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001134 }
1135 else
1136 {
1137 return Fail("%s: ProcessActivation failed", __func__);
1138 }
1139}
1140
1141bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1142{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001143 ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
Sadik Armagan61113162019-07-25 09:09:40 +01001144 return ::ConvertReLu<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001145}
1146
1147bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1148{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001149 ALOGV("hal_1_0::HalPolicy::ConvertReLu1()");
Sadik Armagan61113162019-07-25 09:09:40 +01001150 return ::ConvertReLu1<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001151}
1152
1153bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1154{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001155 ALOGV("hal_1_0::HalPolicy::ConvertReLu6()");
Sadik Armagan61113162019-07-25 09:09:40 +01001156 return ::ConvertReLu6<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001157}
1158
1159bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1160{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001161 ALOGV("hal_1_0::HalPolicy::ConvertSoftmax()");
1162
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001163 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001164 if (!input.IsValid())
1165 {
1166 return Fail("%s: Operation has invalid inputs", __func__);
1167 }
1168
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001169 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001170 if (!outputOperand)
1171 {
1172 return Fail("%s: Operation has no outputs", __func__);
1173 }
1174
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001175 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001176 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001177 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001178 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001179 }
arovir01b0717b52018-09-05 17:03:25 +01001180
1181 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001182 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001183 {
1184 return Fail("%s: Operation has invalid inputs", __func__);
1185 }
1186
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001187 bool isSupported = false;
1188 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1189 IsSoftmaxSupported,
1190 data.m_Backends,
1191 isSupported,
1192 input.GetTensorInfo(),
1193 outputInfo,
1194 desc);
1195 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001196 {
1197 return false;
1198 }
1199
1200 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1201 assert(layer != nullptr);
1202 input.Connect(layer->GetInputSlot(0));
1203
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001204 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001205}
1206
Keith Davisa6bc52f2019-06-26 09:39:49 +01001207bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1208{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001209 ALOGV("hal_1_0::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +01001210
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001211 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001212 if (!input.IsValid() )
1213 {
1214 return Fail("%s: Operation has invalid inputs", __func__);
1215 }
1216
1217 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1218 unsigned int rank = inputInfo.GetNumDimensions();
1219
1220 if (rank != 4)
1221 {
1222 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1223 }
1224
1225 armnn::SpaceToDepthDescriptor desc;
1226 bool dataLayoutCheck;
1227
1228 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1229
1230 if (desc.m_BlockSize <= 1)
1231 {
1232 return Fail("%s: Block size must be at least 1 in all dimensions");
1233 }
1234
1235 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1236 if (!output)
1237 {
1238 return Fail("%s: Could not read output 0", __func__);
1239 }
1240
1241 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001242 if (IsDynamicTensor(outputInfo))
1243 {
1244 return Fail("%s: Dynamic output tensors are not supported", __func__);
1245 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001246
1247 bool isSupported = false;
1248 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1249 IsSpaceToDepthSupported,
1250 data.m_Backends,
1251 isSupported,
1252 inputInfo,
1253 outputInfo,
1254 desc);
1255 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001256 {
1257 return false;
1258 }
1259
1260 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1261 assert(layer != nullptr);
1262 input.Connect(layer->GetInputSlot(0));
1263
1264 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1265}
1266
arovir01b0717b52018-09-05 17:03:25 +01001267bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1268{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001269 ALOGV("hal_1_0::HalPolicy::ConvertTanH()");
Sadik Armagan61113162019-07-25 09:09:40 +01001270 return ::ConvertTanH<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001271}
1272
1273bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1274{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001275 ALOGV("hal_1_0::HalPolicy::ConvertReshape()");
1276
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001277 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1278 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1279 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001280
1281 if (inputOperand == nullptr
1282 || requestedShapeOperand == nullptr
1283 || outputOperand == nullptr)
1284 {
1285 return Fail("%s: Operation has invalid inputs", __func__);
1286 }
1287
1288
1289 if (requestedShapeOperand->dimensions.size() != 1)
1290 {
1291 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1292 __func__, requestedShapeOperand->dimensions.size());
1293 }
1294
1295 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001296 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001297 {
1298 return Fail("%s: Could not read values of input 1", __func__);
1299 }
1300
1301 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1302
1303 Shape requestedShape;
1304 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1305 // function that resolves these values into a fully specified tensor shape.
1306 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1307 {
1308 return Fail("%s: Failed to resolve the requested shape", __func__);
1309 }
1310
1311 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1312 if (!SameShape(requestedShape, outputOperandShape))
1313 {
1314 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1315 }
1316
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001317 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001318 if (!input.IsValid())
1319 {
1320 return Fail("%s: Could not read input 0", __func__);
1321 }
1322
arovir01b0717b52018-09-05 17:03:25 +01001323 armnn::ReshapeDescriptor reshapeDescriptor;
1324 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1325 requestedShape.dimensions.data());
1326
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001327 bool isSupported = false;
1328 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1329 IsReshapeSupported,
1330 data.m_Backends,
1331 isSupported,
1332 input.GetTensorInfo(),
1333 reshapeDescriptor);
1334 if (!isSupported)
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001335 {
1336 return false;
1337 }
1338
arovir01b0717b52018-09-05 17:03:25 +01001339 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1340 assert(layer != nullptr);
1341 input.Connect(layer->GetInputSlot(0));
1342
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001343 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001344}
1345
1346bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1347{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001348 ALOGV("hal_1_0::HalPolicy::ConvertResizeBilinear()");
1349
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001350 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001351 if (!input.IsValid())
1352 {
1353 return Fail("%s: Could not read input 0", __func__);
1354 }
1355
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001356 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001357 if (!output)
1358 {
1359 return Fail("%s: Could not read output 0", __func__);
1360 }
1361
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001362 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +01001363 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1364
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001365 if (IsDynamicTensor(outputInfo))
1366 {
1367 return Fail("%s: Dynamic output tensors are not supported", __func__);
1368 }
1369
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001370 armnn::ResizeDescriptor desc;
1371 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001372 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001373
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001374 bool isSupported = false;
1375 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1376 IsResizeSupported,
1377 data.m_Backends,
1378 isSupported,
1379 inputInfo,
1380 outputInfo,
1381 desc);
1382 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001383 {
1384 return false;
1385 }
1386
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001387 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1388 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001389 {
1390 return Fail("%s: Operation has invalid inputs", __func__);
1391 }
1392
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001393 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001394
arovir01b0717b52018-09-05 17:03:25 +01001395 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001396
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001397 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1398 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001399
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001400 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001401
1402}
1403
1404} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001405} // namespace armnn_driver