blob: 7a54e74f2e9d2ba456d6d7d1f49914c084820387 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +010011#include "OutputShapeUtils.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +010012#include "Utils.hpp"
arovir015602b192018-10-04 16:15:02 +010013
arovir01b0717b52018-09-05 17:03:25 +010014namespace armnn_driver
15{
16namespace hal_1_0
17{
18
19bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
20{
21 switch (operation.type)
22 {
23 case V1_0::OperationType::ADD:
24 return ConvertAdd(operation, model, data);
25 case V1_0::OperationType::AVERAGE_POOL_2D:
26 return ConvertAveragePool2d(operation, model, data);
27 case V1_0::OperationType::CONCATENATION:
28 return ConvertConcatenation(operation, model, data);
29 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010030 return ConvertConv2d(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010031 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010032 return ConvertDepthwiseConv2d(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010033 case V1_0::OperationType::DEQUANTIZE:
34 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010035 case V1_0::OperationType::FLOOR:
36 return ConvertFloor(operation, model, data);
37 case V1_0::OperationType::FULLY_CONNECTED:
38 return ConvertFullyConnected(operation, model, data);
39 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
40 return ConvertLocalResponseNormalization(operation, model, data);
41 case V1_0::OperationType::LOGISTIC:
42 return ConvertLogistic(operation, model, data);
43 case V1_0::OperationType::LSTM:
44 return ConvertLstm(operation, model, data);
45 case V1_0::OperationType::L2_NORMALIZATION:
46 return ConvertL2Normalization(operation, model, data);
47 case V1_0::OperationType::L2_POOL_2D:
48 return ConvertL2Pool2d(operation, model, data);
49 case V1_0::OperationType::MAX_POOL_2D:
50 return ConvertMaxPool2d(operation, model, data);
51 case V1_0::OperationType::MUL:
52 return ConvertMul(operation, model, data);
53 case V1_0::OperationType::RELU:
54 return ConvertReLu(operation, model, data);
55 case V1_0::OperationType::RELU1:
56 return ConvertReLu1(operation, model, data);
57 case V1_0::OperationType::RELU6:
58 return ConvertReLu6(operation, model, data);
59 case V1_0::OperationType::SOFTMAX:
60 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010061 case V1_0::OperationType::SPACE_TO_DEPTH:
62 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010063 case V1_0::OperationType::TANH:
64 return ConvertTanH(operation, model, data);
65 case V1_0::OperationType::RESHAPE:
66 return ConvertReshape(operation, model, data);
67 case V1_0::OperationType::RESIZE_BILINEAR:
68 return ConvertResizeBilinear(operation, model, data);
69 default:
70 return Fail("%s: Operation type %s not supported in ArmnnDriver",
71 __func__, toString(operation.type).c_str());
72 }
73}
74
Mike Kellyb5fdf382019-06-11 16:35:25 +010075bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
76{
77 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
78 {
79 return Fail("%s: Unsupported number of operation inputs", __func__);
80 }
81 return true;
82}
83
84bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
85{
86 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
87 {
88 return Fail("%s: Unsupported number of operation inputs", __func__);
89 }
90 return true;
91}
92
arovir01b0717b52018-09-05 17:03:25 +010093bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
94{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010095 ALOGV("hal_1_0::HalPolicy::ConvertAdd()");
96
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010097 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
98 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010099
100 if (!input0.IsValid() || !input1.IsValid())
101 {
102 return Fail("%s: Operation has invalid inputs", __func__);
103 }
104
105 // The FuseActivation parameter is always the input index 2
106 // and it should be optional
107 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100108 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100109 {
110 return Fail("%s: Operation has invalid inputs", __func__);
111 }
112
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100113 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100114 if (!outputOperand)
115 {
116 return false;
117 }
118
119 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
120
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100121 bool isSupported = false;
122 FORWARD_LAYER_SUPPORT_FUNC(__func__,
123 IsAdditionSupported,
124 data.m_Backends,
125 isSupported,
126 input0.GetTensorInfo(),
127 input1.GetTensorInfo(),
128 outInfo);
129 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100130 {
131 return false;
132 }
133
134 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
135 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
136
137 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
138 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
139
140 if (endLayer != nullptr)
141 {
142 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100143 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100144 }
145 else
146 {
147 return Fail("%s: ProcessActivation failed", __func__);
148 }
149}
150
151bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
152{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100153 ALOGV("hal_1_0::HalPolicy::ConvertAveragePool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100154 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100155}
156
157bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
158{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100159 ALOGV("hal_1_0::HalPolicy::ConvertConcatenation()");
160
arovir01b0717b52018-09-05 17:03:25 +0100161 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
162 if (operation.inputs.size() <= 1)
163 {
164 return Fail("%s: Operation has insufficient arguments", __func__);
165 }
166
167 // Get inputs and outputs
168 const std::size_t numInputTensors = operation.inputs.size() - 1;
169
170 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100171 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100172 {
173 return Fail("%s: Operation has invalid inputs", __func__);
174 }
175
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100176 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100177 if (!outputOperand)
178 {
179 return Fail("%s: Operation has no outputs", __func__);
180 }
181
182
183 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
184 armnn::TensorShape outputShape = outputInfo.GetShape();
185
186 //
187 // handle negative concat dims along the lines of tensorflow as described here:
188 // https://www.tensorflow.org/api_docs/python/tf/concat
189 // "negative axis refers to axis + rank(values)-th dimension"
190 //
191 if (concatDim < 0)
192 {
193 concatDim += outputShape.GetNumDimensions();
194 }
195
196 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
197 {
198 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
199 }
200
201 std::vector<LayerInputHandle> inputHandles;
202 std::vector<armnn::TensorShape> inputShapes;
203
204 inputHandles.reserve(numInputTensors);
205 inputShapes.reserve(numInputTensors);
206
207 bool inputsHaveBeenReshaped = false;
208 unsigned int tensorDimensionsAdded = 0;
209
210 for (uint32_t i = 0; i < numInputTensors; ++i)
211 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100212 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100213 if (!operand)
214 {
215 return Fail("%s: Operation has invalid inputs", __func__);
216 }
217
218 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100219 LayerInputHandle operandInputHandle =
220 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100221
222 if (operandShape.GetNumDimensions() == 0)
223 {
224 return Fail("%s: Operands with rank 0 are not supported", __func__);
225 }
226
227 if (RequiresReshape(operandShape))
228 {
229 inputsHaveBeenReshaped = true;
230
231 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
232
233 // Expand the tensor to three dimensions
234 if (operandShape.GetNumDimensions() == 2)
235 {
236 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
237 tensorDimensionsAdded = 1;
238 }
239 else
240 {
241 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
242 tensorDimensionsAdded = 2;
243 }
244
245 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
246 *data.m_Network,
247 operandInputHandle,
248 reshapeInfo
249 );
250
251 // Point to the reshape operation rather then the input operation
252 operandShape = reshapeInfo.GetShape();
253 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
254 }
255
256 inputShapes.emplace_back(operandShape);
257 inputHandles.emplace_back(operandInputHandle);
258
259 if (!inputHandles.back().IsValid())
260 {
261 return Fail("%s: Operation has invalid inputs", __func__);
262 }
263 }
264
265 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
266
267 if (inputsHaveBeenReshaped)
268 {
269 // Adjust the concatenation dimension by the amount of dimensions added (if any)
270 concatDim += tensorDimensionsAdded;
271
272 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
273 if (tensorDimensionsAdded == 1)
274 {
275 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
276 }
277 else if (tensorDimensionsAdded == 2)
278 {
narpra01f176d5a2018-11-18 20:17:48 +0000279 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100280 }
281 }
282
narpra01f176d5a2018-11-18 20:17:48 +0000283 // Check if permutations is required and get the pair of permutations required for the concatenation.
284 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100285 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
286 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
287
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100288 bool needPermute =
289 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100290
narpra01f176d5a2018-11-18 20:17:48 +0000291 if (needPermute)
292 {
293 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
294 }
295
arovir01b0717b52018-09-05 17:03:25 +0100296 outputInfo.SetShape(outputShape);
297
298 // this is no-op for identity swizzles, otherwise it replaces both
299 // the handles and shapes with the swizzled layer output handles and shapes
300 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
301
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100302 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
303 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000304
arovir01b0717b52018-09-05 17:03:25 +0100305 try
306 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100307 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000308 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100309 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100310 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100311 }
312 catch (const armnn::Exception& error)
313 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100314 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100315 }
316
317 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000318 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100319 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
320 {
321 return Fail("%s: Error validating the output shape for concat", __func__);
322 }
323
324 std::vector<const armnn::TensorInfo*> inputTensorInfos;
325 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
326 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100327
328 bool isSupported = false;
329 FORWARD_LAYER_SUPPORT_FUNC(__func__,
330 IsConcatSupported,
331 data.m_Backends,
332 isSupported,
333 inputTensorInfos,
334 outputInfo,
335 concatDescriptor);
336 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100337 {
338 return false;
339 }
340
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100341 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100342 assert(layer != nullptr);
343 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
344
345 // Connect inputs to the layer
346 const int numInputSlots = layer->GetNumInputSlots();
347 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
348 for (int i = 0; i < numInputSlots; ++i)
349 {
350 // connect the input directly to the merge (concat) layer
351 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
352 }
353
narpra01f176d5a2018-11-18 20:17:48 +0000354 if (needPermute)
355 {
356 // Add permutation layer and connect the output to it, the permutation becomes the output layer
357 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
358 layer->GetOutputSlot(0),
359 permutationPair.second);
360 layer = &deswizzleLayer;
361 }
arovir01b0717b52018-09-05 17:03:25 +0100362
363 if (inputsHaveBeenReshaped)
364 {
365 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
366
367 // Undo the reshape knowing the amount of dimensions added
368 if (tensorDimensionsAdded == 1)
369 {
370 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
371 afterConcatInfo.GetShape()[2] }));
372 }
373 else if (tensorDimensionsAdded == 2)
374 {
narpra01f176d5a2018-11-18 20:17:48 +0000375 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100376 }
377
378 layer = &AddReshapeLayer(
379 *data.m_Network,
380 layer->GetOutputSlot(0),
381 afterConcatInfo
382 );
383 }
384
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100385 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100386}
387
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100388bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
389{
390 ALOGV("hal_1_0::HalPolicy::ConvertConv2d()");
391 return ValidateConv2dParameters(operation) && ::ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
392}
393
394bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
395{
396 ALOGV("hal_1_0::HalPolicy::ConvertDepthwiseConv2d()");
397 return ValidateDepthwiseConv2dParameters(operation) &&
398 ::ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
399}
400
David Monahanacf479a2019-05-29 14:27:04 +0100401bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
402{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100403 ALOGV("hal_1_0::HalPolicy::ConvertDequantize()");
David Monahanacf479a2019-05-29 14:27:04 +0100404
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100405 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100406 if (!input.IsValid())
407 {
408 return Fail("%s: Operation has invalid input", __func__);
409 }
410
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100411 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100412 if (!outputOperand)
413 {
414 return Fail("%s: Operation has invalid outputs", __func__);
415 }
416
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100417 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100418 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100419 {
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100420 ALOGD("Output shape not set, will infer from input");
421 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100422 }
423
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100424 bool isSupported = false;
425 FORWARD_LAYER_SUPPORT_FUNC(__func__,
426 IsDequantizeSupported,
427 data.m_Backends,
428 isSupported,
429 input.GetTensorInfo(),
430 GetTensorInfoForOperand(*outputOperand));
431 if (!isSupported)
David Monahanacf479a2019-05-29 14:27:04 +0100432 {
433 return false;
434 }
435
436 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
437 assert(layer != nullptr);
438 input.Connect(layer->GetInputSlot(0));
439
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100440 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
441 0,
442 *layer,
443 model,
444 data,
445 armnn::Optional<armnn::TensorInfo>(outputInfo));
David Monahanacf479a2019-05-29 14:27:04 +0100446}
447
arovir01b0717b52018-09-05 17:03:25 +0100448bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
449{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100450 ALOGV("hal_1_0::HalPolicy::ConvertFloor()");
451
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100452 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100453 if (!input.IsValid())
454 {
455 return Fail("%s: Operation has invalid inputs", __func__);
456 }
457
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100458 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100459 if (!outputOperand)
460 {
461 return Fail("%s: Operation has invalid outputs", __func__);
462 }
463
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100464 bool isSupported = false;
465 FORWARD_LAYER_SUPPORT_FUNC(__func__,
466 IsFloorSupported,
467 data.m_Backends,
468 isSupported,
469 input.GetTensorInfo(),
470 GetTensorInfoForOperand(*outputOperand));
471 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100472 {
473 return false;
474 }
475
476 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
477 assert(layer != nullptr);
478 input.Connect(layer->GetInputSlot(0));
479
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100480 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100481}
482
483bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
484{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100485 ALOGV("hal_1_0::HalPolicy::ConvertFullyConnected()");
486
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100487 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100488 if (!input.IsValid())
489 {
490 return Fail("%s: Operation has invalid inputs", __func__);
491 }
492
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100493 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100494 if (!output)
495 {
496 return Fail("%s: Could not read output 0", __func__);
497 }
498
499 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100500 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
501
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100502 if (IsDynamicTensor(outputInfo))
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100503 {
504 ALOGD("Output shape not set, will infer from inputs");
505 outputInfo.SetShape(inputInfo.GetShape());
506 }
arovir01b0717b52018-09-05 17:03:25 +0100507
508 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100509 ConstTensorPin weightsPin =
510 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
511 ConstTensorPin biasPin =
512 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100513
514 if (!weightsPin.IsValid() || !biasPin.IsValid())
515 {
516 return Fail("%s: Operation has invalid inputs", __func__);
517 }
518
519 armnn::ConstTensor weights = weightsPin.GetConstTensor();
520 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100521 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100522
523 try
arovir01b0717b52018-09-05 17:03:25 +0100524 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100525 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
526 } catch (const std::exception &e) {
527 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100528 }
529
530 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
531 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
532
533 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100534 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100535 {
536 return Fail("%s: Operation has invalid inputs", __func__);
537 }
538
539 armnn::FullyConnectedDescriptor desc;
540 desc.m_TransposeWeightMatrix = true;
541 desc.m_BiasEnabled = true;
542
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100543 bool isSupported = false;
544 FORWARD_LAYER_SUPPORT_FUNC(__func__,
545 IsFullyConnectedSupported,
546 data.m_Backends,
547 isSupported,
548 reshapedInfo,
549 outputInfo,
550 weights.GetInfo(),
551 bias.GetInfo(),
552 desc);
553 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100554 {
555 return false;
556 }
557
Matteo Martincighba01f372019-05-14 13:28:21 +0100558 armnn::IConnectableLayer* startLayer =
559 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100560 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
561
562 if (endLayer != nullptr)
563 {
564 if (inputInfo.GetNumDimensions() > 2U)
565 {
566 armnn::ReshapeDescriptor reshapeDescriptor;
567 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
568
569 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
570 assert(reshapeLayer != nullptr);
571 input.Connect(reshapeLayer->GetInputSlot(0));
572 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
573 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
574 }
575 else
576 {
577 input.Connect(startLayer->GetInputSlot(0));
578 }
579
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100580 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
581 0,
582 *endLayer,
583 model,
584 data,
585 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +0100586 }
587 else
588 {
589 return Fail("%s: ProcessActivation failed", __func__);
590 }
591}
592
593bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
594 const Model& model,
595 ConversionData& data)
596{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100597 ALOGV("hal_1_0::HalPolicy::ConvertLocalResponseNormalization()");
598
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100599 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100600 if (!input.IsValid())
601 {
602 return Fail("%s: Operation has invalid inputs", __func__);
603 }
604
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100605 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100606 if (!output)
607 {
608 return Fail("%s: Could not read output 0", __func__);
609 }
610
narpra012fb804a2018-10-22 14:52:32 +0100611 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100612 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
613
arovir01b0717b52018-09-05 17:03:25 +0100614 armnn::NormalizationDescriptor descriptor;
615
narpra012fb804a2018-10-22 14:52:32 +0100616 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100617 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100618 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100619
620 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100621 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
622 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
623 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
624 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100625 {
626 return Fail("%s: Operation has invalid inputs", __func__);
627 }
628
629 // ArmNN expects normSize to be the full size of the normalization
630 // window rather than the radius as in AndroidNN.
631 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
632
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100633 bool isSupported = false;
634 FORWARD_LAYER_SUPPORT_FUNC(__func__,
635 IsNormalizationSupported,
636 data.m_Backends,
637 isSupported,
638 inputInfo,
639 outputInfo,
640 descriptor);
641 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100642 {
643 return false;
644 }
645
646
647 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
648 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100649 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100650
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100651 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100652}
653
654bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
655{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100656 ALOGV("hal_1_0::HalPolicy::ConvertLogistic()");
657
arovir01b0717b52018-09-05 17:03:25 +0100658 armnn::ActivationDescriptor desc;
659 desc.m_Function = armnn::ActivationFunction::Sigmoid;
660
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100661 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100662}
663
664bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
665{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100666 ALOGV("hal_1_0::HalPolicy::ConvertLstm()");
667
arovir01b0717b52018-09-05 17:03:25 +0100668 // Inputs:
669 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
670 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100671 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100672 if (!input.IsValid())
673 {
674 return Fail("%s: Could not read input 0: input", __func__);
675 }
676 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100677 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100678 if (!outputStateIn.IsValid())
679 {
680 return Fail("%s: Could not read input 18: outputStateIn", __func__);
681 }
682 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100683 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100684 if (!cellStateIn.IsValid())
685 {
686 return Fail("%s: Could not read input 19: cellStateIn", __func__);
687 }
688
689 // Get the mandatory input tensors:
690 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
691 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100692 const ConstTensorPin inputToForgetWeightsPin =
693 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
694 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
695 // [num_units, input_size].
696 const ConstTensorPin inputToCellWeightsPin =
697 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100698 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
699 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100700 const ConstTensorPin inputToOutputWeightsPin =
701 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100702 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
703 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100704 const ConstTensorPin recurrentToForgetWeightsPin =
705 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100706 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
707 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100708 const ConstTensorPin recurrentToCellWeightsPin =
709 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100710 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
711 // [num_units, output_size].
712 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100714 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100715 const ConstTensorPin forgetGateBiasPin =
716 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100717 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100718 const ConstTensorPin cellBiasPin =
719 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100720 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100721 const ConstTensorPin outputGateBiasPin =
722 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100723
724 if (!inputToForgetWeightsPin.IsValid() ||
725 !inputToCellWeightsPin.IsValid() ||
726 !inputToOutputWeightsPin.IsValid() ||
727 !recurrentToForgetWeightsPin.IsValid() ||
728 !recurrentToCellWeightsPin.IsValid() ||
729 !recurrentToOutputWeightsPin.IsValid() ||
730 !forgetGateBiasPin.IsValid() ||
731 !cellBiasPin.IsValid() ||
732 !outputGateBiasPin.IsValid())
733 {
734 return Fail("%s: Operation has invalid tensor inputs", __func__);
735 }
736
737 // Get the optional input tensors:
738 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
739 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100740 const ConstTensorPin inputToInputWeightsPin =
741 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
742 1,
743 model,
744 data,
745 g_DontPermute,
746 nullptr,
747 true);
748
arovir01b0717b52018-09-05 17:03:25 +0100749 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
750 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
751 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100752 const ConstTensorPin recurrentToInputWeightsPin =
753 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
754 5,
755 model,
756 data,
757 g_DontPermute,
758 nullptr,
759 true);
760
arovir01b0717b52018-09-05 17:03:25 +0100761 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100762 const ConstTensorPin cellToInputWeightsPin =
763 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
764 9,
765 model,
766 data,
767 g_DontPermute,
768 nullptr,
769 true);
770
arovir01b0717b52018-09-05 17:03:25 +0100771 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100772 const ConstTensorPin cellToForgetWeightsPin =
773 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
774 10,
775 model,
776 data,
777 g_DontPermute,
778 nullptr,
779 true);
780
arovir01b0717b52018-09-05 17:03:25 +0100781 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100782 const ConstTensorPin cellToOutputWeightsPin =
783 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
784 11,
785 model,
786 data,
787 g_DontPermute,
788 nullptr,
789 true);
790
arovir01b0717b52018-09-05 17:03:25 +0100791 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100792 const ConstTensorPin inputGateBiasPin =
793 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
794 12,
795 model,
796 data,
797 g_DontPermute,
798 nullptr,
799 true);
800
arovir01b0717b52018-09-05 17:03:25 +0100801 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
802 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100803 const ConstTensorPin projectionWeightsPin =
804 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
805 16,
806 model,
807 data,
808 g_DontPermute,
809 nullptr,
810 true);
811
arovir01b0717b52018-09-05 17:03:25 +0100812 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100813 const ConstTensorPin projectionBiasPin =
814 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
815 17,
816 model,
817 data,
818 g_DontPermute,
819 nullptr,
820 true);
arovir01b0717b52018-09-05 17:03:25 +0100821
822 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
823 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
824 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
825 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
826 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
827 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
828 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
829 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
830 {
831 return Fail("%s: Operation has invalid tensor inputs", __func__);
832 }
833
834 // Get the mandatory input scalars (actually 1-D tensors of size 1):
835 // 20: The activation function: A value indicating the activation function:
836 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
837 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
838 // If set to 0.0 then clipping is disabled.
839 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
840 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
841 ActivationFn activation;
842 float cellClip;
843 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100844 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
845 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
846 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100847 {
848 return Fail("%s: Operation has invalid scalar inputs", __func__);
849 }
850
851 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100852 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
853 // with CIFG, or [batch_size, num_units * 3] without CIFG.
854 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100855 if (!scratchBuffer)
856 {
857 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
858 }
859 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100860 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100861 if (!outputStateOut)
862 {
863 return Fail("%s: Could not read output 1: outputStateOut", __func__);
864 }
865 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100866 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100867 if (!cellStateOut)
868 {
869 return Fail("%s: Could not read output 2: cellStateOut", __func__);
870 }
871 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
872 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100873 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100874 if (!output)
875 {
876 return Fail("%s: Could not read output 3: output", __func__);
877 }
878
879 // set the params structure for the AddLstmLayer call
880 armnn::LstmInputParams params;
881 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
882 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
883 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
884 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
885 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
886 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
887 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
888 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
889 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
890 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
891 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
892 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
893 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
894 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
895 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
896 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
897 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
898
899 // set the layer descriptor
900 armnn::LstmDescriptor desc;
901 desc.m_ActivationFunc = activation;
902 desc.m_ClippingThresCell = cellClip;
903 desc.m_ClippingThresProj = projClip;
904 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
905 params.m_RecurrentToInputWeights == nullptr ||
906 params.m_InputGateBias == nullptr);
907 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
908 params.m_CellToOutputWeights != nullptr);
909 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
910
911 // validate the optional input groups
912 if (desc.m_CifgEnabled &&
913 (params.m_InputToInputWeights != nullptr ||
914 params.m_RecurrentToInputWeights != nullptr ||
915 params.m_InputGateBias != nullptr))
916 {
917 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
918 " and input gate bias must be provided", __func__);
919 }
920
921 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
922 {
923 return Fail("%s: projection bias should not be provided without projection weights", __func__);
924 }
925
926 if (desc.m_PeepholeEnabled &&
927 (params.m_CellToForgetWeights == nullptr ||
928 params.m_CellToOutputWeights == nullptr ||
929 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
930 {
931 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
932 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
933 }
934
935 // Check if the layer is supported
936 // Inputs
937 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
938 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
939 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
940
941 // Outputs
942 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
943 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
944 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
945 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
946
947 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100948 armnn::LstmInputParamsInfo paramsInfo;
949 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
950 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
951 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
952 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
953 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
954 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
955 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
956 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
957 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100958
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100959 // Optional parameters
arovir01b0717b52018-09-05 17:03:25 +0100960 if(!desc.m_CifgEnabled)
961 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100962 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
963 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100964 if (params.m_CellToInputWeights != nullptr)
965 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100966 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100967 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100968 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100969 }
970
971 if(desc.m_ProjectionEnabled)
972 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100973 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100974 if (params.m_ProjectionBias != nullptr)
975 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100976 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100977 }
978 }
979
980 if(desc.m_PeepholeEnabled)
981 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100982 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
983 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100984 }
985
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100986 bool isSupported = false;
987 FORWARD_LAYER_SUPPORT_FUNC(__func__,
988 IsLstmSupported,
989 data.m_Backends,
990 isSupported,
991 inputInfo,
992 outputStateInInfo,
993 cellStateInInfo,
994 scratchBufferInfo,
995 outputStateOutInfo,
996 cellStateOutInfo,
997 outputInfo,
998 desc,
999 paramsInfo);
1000 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001001 {
1002 return false;
1003 }
1004
1005 // Add the layer
1006 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1007
1008 input.Connect(layer->GetInputSlot(0));
1009 outputStateIn.Connect(layer->GetInputSlot(1));
1010 cellStateIn.Connect(layer->GetInputSlot(2));
1011
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001012 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1013 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1014 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1015 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +01001016}
1017
1018bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
1019{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001020 ALOGV("hal_1_0::HalPolicy::ConvertL2Normalization()");
1021
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001022 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001023 if (!input.IsValid())
1024 {
1025 return Fail("%s: Operation has invalid inputs", __func__);
1026 }
1027
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001028 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001029 if (!output)
1030 {
1031 return Fail("%s: Could not read output 0", __func__);
1032 }
1033
1034 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001035 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
arovir01b0717b52018-09-05 17:03:25 +01001036
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001037 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001038 {
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001039 ALOGD("Output shape not set, will infer from inputs");
1040 outputInfo.SetShape(inputInfo.GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001041 }
1042
Matteo Martincigh58f71092018-09-25 15:58:52 +01001043 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001044 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001045
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001046 bool isSupported = false;
1047 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1048 IsL2NormalizationSupported,
1049 data.m_Backends,
1050 isSupported,
1051 inputInfo,
1052 outputInfo,
1053 desc);
1054 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001055 {
1056 return false;
1057 }
1058
Matteo Martincigh58f71092018-09-25 15:58:52 +01001059 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001060 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001061 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001062
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001063 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1064 0,
1065 *layer,
1066 model,
1067 data,
1068 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001069}
1070
1071bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1072{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001073 ALOGV("hal_1_0::HalPolicy::ConvertL2Pool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001074 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001075}
1076
1077bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1078{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001079 ALOGV("hal_1_0::HalPolicy::ConvertMaxPool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001080 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001081}
1082
1083bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1084{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001085 ALOGV("hal_1_0::HalPolicy::ConvertMul()");
1086
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001087 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1088 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001089
1090 if (!input0.IsValid() || !input1.IsValid())
1091 {
1092 return Fail("%s: Operation has invalid inputs", __func__);
1093 }
1094
1095 // The FuseActivation parameter is always the input index 2
1096 // and it should be optional
1097 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001098 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001099 {
1100 return Fail("%s: Operation has invalid inputs", __func__);
1101 }
1102
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001103 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001104
1105 if (outputOperand == nullptr)
1106 {
1107 return false;
1108 }
1109
1110 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1111
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001112 bool isSupported = false;
1113 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1114 IsMultiplicationSupported,
1115 data.m_Backends,
1116 isSupported,
1117 input0.GetTensorInfo(),
1118 input1.GetTensorInfo(),
1119 outInfo);
1120 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001121 {
1122 return false;
1123 }
1124
1125 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1126 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1127
1128 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1129 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1130
1131 if (endLayer != nullptr)
1132 {
1133 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001134 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001135 }
1136 else
1137 {
1138 return Fail("%s: ProcessActivation failed", __func__);
1139 }
1140}
1141
1142bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1143{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001144 ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
Sadik Armagan61113162019-07-25 09:09:40 +01001145 return ::ConvertReLu<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001146}
1147
1148bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1149{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001150 ALOGV("hal_1_0::HalPolicy::ConvertReLu1()");
Sadik Armagan61113162019-07-25 09:09:40 +01001151 return ::ConvertReLu1<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001152}
1153
1154bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1155{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001156 ALOGV("hal_1_0::HalPolicy::ConvertReLu6()");
Sadik Armagan61113162019-07-25 09:09:40 +01001157 return ::ConvertReLu6<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001158}
1159
1160bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1161{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001162 ALOGV("hal_1_0::HalPolicy::ConvertSoftmax()");
1163
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001164 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001165 if (!input.IsValid())
1166 {
1167 return Fail("%s: Operation has invalid inputs", __func__);
1168 }
1169
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001170 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001171 if (!outputOperand)
1172 {
1173 return Fail("%s: Operation has no outputs", __func__);
1174 }
1175
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001176 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001177 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001178 {
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001179 ALOGD("Output shape not set, will infer from input");
1180 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001181 }
arovir01b0717b52018-09-05 17:03:25 +01001182
1183 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001184 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001185 {
1186 return Fail("%s: Operation has invalid inputs", __func__);
1187 }
1188
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001189 bool isSupported = false;
1190 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1191 IsSoftmaxSupported,
1192 data.m_Backends,
1193 isSupported,
1194 input.GetTensorInfo(),
1195 outputInfo,
1196 desc);
1197 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001198 {
1199 return false;
1200 }
1201
1202 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1203 assert(layer != nullptr);
1204 input.Connect(layer->GetInputSlot(0));
1205
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001206 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1207 0,
1208 *layer,
1209 model,
1210 data,
1211 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001212}
1213
Keith Davisa6bc52f2019-06-26 09:39:49 +01001214bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1215{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001216 ALOGV("hal_1_0::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +01001217
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001218 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001219 if (!input.IsValid() )
1220 {
1221 return Fail("%s: Operation has invalid inputs", __func__);
1222 }
1223
1224 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1225 unsigned int rank = inputInfo.GetNumDimensions();
1226
1227 if (rank != 4)
1228 {
1229 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1230 }
1231
1232 armnn::SpaceToDepthDescriptor desc;
1233 bool dataLayoutCheck;
1234
1235 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1236
1237 if (desc.m_BlockSize <= 1)
1238 {
1239 return Fail("%s: Block size must be at least 1 in all dimensions");
1240 }
1241
1242 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1243 if (!output)
1244 {
1245 return Fail("%s: Could not read output 0", __func__);
1246 }
1247
1248 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001249
1250 bool isSupported = false;
1251 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1252 IsSpaceToDepthSupported,
1253 data.m_Backends,
1254 isSupported,
1255 inputInfo,
1256 outputInfo,
1257 desc);
1258 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001259 {
1260 return false;
1261 }
1262
1263 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1264 assert(layer != nullptr);
1265 input.Connect(layer->GetInputSlot(0));
1266
1267 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1268}
1269
arovir01b0717b52018-09-05 17:03:25 +01001270bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1271{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001272 ALOGV("hal_1_0::HalPolicy::ConvertTanH()");
Sadik Armagan61113162019-07-25 09:09:40 +01001273 return ::ConvertTanH<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001274}
1275
1276bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1277{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001278 ALOGV("hal_1_0::HalPolicy::ConvertReshape()");
1279
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001280 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1281 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1282 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001283
1284 if (inputOperand == nullptr
1285 || requestedShapeOperand == nullptr
1286 || outputOperand == nullptr)
1287 {
1288 return Fail("%s: Operation has invalid inputs", __func__);
1289 }
1290
1291
1292 if (requestedShapeOperand->dimensions.size() != 1)
1293 {
1294 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1295 __func__, requestedShapeOperand->dimensions.size());
1296 }
1297
1298 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001299 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001300 {
1301 return Fail("%s: Could not read values of input 1", __func__);
1302 }
1303
1304 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1305
1306 Shape requestedShape;
1307 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1308 // function that resolves these values into a fully specified tensor shape.
1309 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1310 {
1311 return Fail("%s: Failed to resolve the requested shape", __func__);
1312 }
1313
1314 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1315 if (!SameShape(requestedShape, outputOperandShape))
1316 {
1317 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1318 }
1319
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001320 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001321 if (!input.IsValid())
1322 {
1323 return Fail("%s: Could not read input 0", __func__);
1324 }
1325
arovir01b0717b52018-09-05 17:03:25 +01001326 armnn::ReshapeDescriptor reshapeDescriptor;
1327 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1328 requestedShape.dimensions.data());
1329
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001330 bool isSupported = false;
1331 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1332 IsReshapeSupported,
1333 data.m_Backends,
1334 isSupported,
1335 input.GetTensorInfo(),
1336 reshapeDescriptor);
1337 if (!isSupported)
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001338 {
1339 return false;
1340 }
1341
arovir01b0717b52018-09-05 17:03:25 +01001342 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1343 assert(layer != nullptr);
1344 input.Connect(layer->GetInputSlot(0));
1345
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001346 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001347}
1348
1349bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1350{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001351 ALOGV("hal_1_0::HalPolicy::ConvertResizeBilinear()");
1352
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001353 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001354 if (!input.IsValid())
1355 {
1356 return Fail("%s: Could not read input 0", __func__);
1357 }
1358
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001359 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001360 if (!output)
1361 {
1362 return Fail("%s: Could not read output 0", __func__);
1363 }
1364
1365 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1366 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1367
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001368 armnn::ResizeDescriptor desc;
1369 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001370 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001371
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001372 bool isSupported = false;
1373 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1374 IsResizeSupported,
1375 data.m_Backends,
1376 isSupported,
1377 inputInfo,
1378 outputInfo,
1379 desc);
1380 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001381 {
1382 return false;
1383 }
1384
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001385 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1386 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001387 {
1388 return Fail("%s: Operation has invalid inputs", __func__);
1389 }
1390
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001391 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001392
arovir01b0717b52018-09-05 17:03:25 +01001393 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001394
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001395 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1396 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001397
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001398 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001399
1400}
1401
1402} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001403} // namespace armnn_driver