blob: 8c6170016d16658c2a6232622f1d07e459e3fb55 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +010011#include "OutputShapeUtils.hpp"
arovir015602b192018-10-04 16:15:02 +010012
arovir01b0717b52018-09-05 17:03:25 +010013namespace armnn_driver
14{
15namespace hal_1_0
16{
17
18bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
19{
20 switch (operation.type)
21 {
22 case V1_0::OperationType::ADD:
23 return ConvertAdd(operation, model, data);
24 case V1_0::OperationType::AVERAGE_POOL_2D:
25 return ConvertAveragePool2d(operation, model, data);
26 case V1_0::OperationType::CONCATENATION:
27 return ConvertConcatenation(operation, model, data);
28 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010029 return ValidateConv2dParameters(operation) &&
30 ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010031 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010032 return ValidateDepthwiseConv2dParameters(operation) &&
33 ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010034 case V1_0::OperationType::DEQUANTIZE:
35 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010036 case V1_0::OperationType::FLOOR:
37 return ConvertFloor(operation, model, data);
38 case V1_0::OperationType::FULLY_CONNECTED:
39 return ConvertFullyConnected(operation, model, data);
40 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
41 return ConvertLocalResponseNormalization(operation, model, data);
42 case V1_0::OperationType::LOGISTIC:
43 return ConvertLogistic(operation, model, data);
44 case V1_0::OperationType::LSTM:
45 return ConvertLstm(operation, model, data);
46 case V1_0::OperationType::L2_NORMALIZATION:
47 return ConvertL2Normalization(operation, model, data);
48 case V1_0::OperationType::L2_POOL_2D:
49 return ConvertL2Pool2d(operation, model, data);
50 case V1_0::OperationType::MAX_POOL_2D:
51 return ConvertMaxPool2d(operation, model, data);
52 case V1_0::OperationType::MUL:
53 return ConvertMul(operation, model, data);
54 case V1_0::OperationType::RELU:
55 return ConvertReLu(operation, model, data);
56 case V1_0::OperationType::RELU1:
57 return ConvertReLu1(operation, model, data);
58 case V1_0::OperationType::RELU6:
59 return ConvertReLu6(operation, model, data);
60 case V1_0::OperationType::SOFTMAX:
61 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010062 case V1_0::OperationType::SPACE_TO_DEPTH:
63 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010064 case V1_0::OperationType::TANH:
65 return ConvertTanH(operation, model, data);
66 case V1_0::OperationType::RESHAPE:
67 return ConvertReshape(operation, model, data);
68 case V1_0::OperationType::RESIZE_BILINEAR:
69 return ConvertResizeBilinear(operation, model, data);
70 default:
71 return Fail("%s: Operation type %s not supported in ArmnnDriver",
72 __func__, toString(operation.type).c_str());
73 }
74}
75
Mike Kellyb5fdf382019-06-11 16:35:25 +010076bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
77{
78 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
79 {
80 return Fail("%s: Unsupported number of operation inputs", __func__);
81 }
82 return true;
83}
84
85bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
86{
87 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
88 {
89 return Fail("%s: Unsupported number of operation inputs", __func__);
90 }
91 return true;
92}
93
arovir01b0717b52018-09-05 17:03:25 +010094bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
95{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010096 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
97 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010098
99 if (!input0.IsValid() || !input1.IsValid())
100 {
101 return Fail("%s: Operation has invalid inputs", __func__);
102 }
103
104 // The FuseActivation parameter is always the input index 2
105 // and it should be optional
106 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100107 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100108 {
109 return Fail("%s: Operation has invalid inputs", __func__);
110 }
111
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100112 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100113 if (!outputOperand)
114 {
115 return false;
116 }
117
118 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
119
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100120 bool isSupported = false;
121 FORWARD_LAYER_SUPPORT_FUNC(__func__,
122 IsAdditionSupported,
123 data.m_Backends,
124 isSupported,
125 input0.GetTensorInfo(),
126 input1.GetTensorInfo(),
127 outInfo);
128 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100129 {
130 return false;
131 }
132
133 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
134 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
135
136 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
137 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
138
139 if (endLayer != nullptr)
140 {
141 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100142 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100143 }
144 else
145 {
146 return Fail("%s: ProcessActivation failed", __func__);
147 }
148}
149
150bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
151{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100152 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100153}
154
155bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
156{
157 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
158 if (operation.inputs.size() <= 1)
159 {
160 return Fail("%s: Operation has insufficient arguments", __func__);
161 }
162
163 // Get inputs and outputs
164 const std::size_t numInputTensors = operation.inputs.size() - 1;
165
166 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100167 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100168 {
169 return Fail("%s: Operation has invalid inputs", __func__);
170 }
171
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100172 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100173 if (!outputOperand)
174 {
175 return Fail("%s: Operation has no outputs", __func__);
176 }
177
178
179 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
180 armnn::TensorShape outputShape = outputInfo.GetShape();
181
182 //
183 // handle negative concat dims along the lines of tensorflow as described here:
184 // https://www.tensorflow.org/api_docs/python/tf/concat
185 // "negative axis refers to axis + rank(values)-th dimension"
186 //
187 if (concatDim < 0)
188 {
189 concatDim += outputShape.GetNumDimensions();
190 }
191
192 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
193 {
194 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
195 }
196
197 std::vector<LayerInputHandle> inputHandles;
198 std::vector<armnn::TensorShape> inputShapes;
199
200 inputHandles.reserve(numInputTensors);
201 inputShapes.reserve(numInputTensors);
202
203 bool inputsHaveBeenReshaped = false;
204 unsigned int tensorDimensionsAdded = 0;
205
206 for (uint32_t i = 0; i < numInputTensors; ++i)
207 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100208 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100209 if (!operand)
210 {
211 return Fail("%s: Operation has invalid inputs", __func__);
212 }
213
214 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100215 LayerInputHandle operandInputHandle =
216 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100217
218 if (operandShape.GetNumDimensions() == 0)
219 {
220 return Fail("%s: Operands with rank 0 are not supported", __func__);
221 }
222
223 if (RequiresReshape(operandShape))
224 {
225 inputsHaveBeenReshaped = true;
226
227 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
228
229 // Expand the tensor to three dimensions
230 if (operandShape.GetNumDimensions() == 2)
231 {
232 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
233 tensorDimensionsAdded = 1;
234 }
235 else
236 {
237 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
238 tensorDimensionsAdded = 2;
239 }
240
241 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
242 *data.m_Network,
243 operandInputHandle,
244 reshapeInfo
245 );
246
247 // Point to the reshape operation rather then the input operation
248 operandShape = reshapeInfo.GetShape();
249 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
250 }
251
252 inputShapes.emplace_back(operandShape);
253 inputHandles.emplace_back(operandInputHandle);
254
255 if (!inputHandles.back().IsValid())
256 {
257 return Fail("%s: Operation has invalid inputs", __func__);
258 }
259 }
260
261 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
262
263 if (inputsHaveBeenReshaped)
264 {
265 // Adjust the concatenation dimension by the amount of dimensions added (if any)
266 concatDim += tensorDimensionsAdded;
267
268 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
269 if (tensorDimensionsAdded == 1)
270 {
271 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
272 }
273 else if (tensorDimensionsAdded == 2)
274 {
narpra01f176d5a2018-11-18 20:17:48 +0000275 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100276 }
277 }
278
narpra01f176d5a2018-11-18 20:17:48 +0000279 // Check if permutations is required and get the pair of permutations required for the concatenation.
280 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100281 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
282 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
283
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100284 bool needPermute =
285 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100286
narpra01f176d5a2018-11-18 20:17:48 +0000287 if (needPermute)
288 {
289 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
290 }
291
arovir01b0717b52018-09-05 17:03:25 +0100292 outputInfo.SetShape(outputShape);
293
294 // this is no-op for identity swizzles, otherwise it replaces both
295 // the handles and shapes with the swizzled layer output handles and shapes
296 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
297
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100298 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
299 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000300
arovir01b0717b52018-09-05 17:03:25 +0100301 try
302 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100303 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000304 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100305 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100306 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100307 }
308 catch (const armnn::Exception& error)
309 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100310 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100311 }
312
313 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000314 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100315 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
316 {
317 return Fail("%s: Error validating the output shape for concat", __func__);
318 }
319
320 std::vector<const armnn::TensorInfo*> inputTensorInfos;
321 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
322 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100323
324 bool isSupported = false;
325 FORWARD_LAYER_SUPPORT_FUNC(__func__,
326 IsConcatSupported,
327 data.m_Backends,
328 isSupported,
329 inputTensorInfos,
330 outputInfo,
331 concatDescriptor);
332 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100333 {
334 return false;
335 }
336
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100337 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100338 assert(layer != nullptr);
339 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
340
341 // Connect inputs to the layer
342 const int numInputSlots = layer->GetNumInputSlots();
343 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
344 for (int i = 0; i < numInputSlots; ++i)
345 {
346 // connect the input directly to the merge (concat) layer
347 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
348 }
349
narpra01f176d5a2018-11-18 20:17:48 +0000350 if (needPermute)
351 {
352 // Add permutation layer and connect the output to it, the permutation becomes the output layer
353 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
354 layer->GetOutputSlot(0),
355 permutationPair.second);
356 layer = &deswizzleLayer;
357 }
arovir01b0717b52018-09-05 17:03:25 +0100358
359 if (inputsHaveBeenReshaped)
360 {
361 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
362
363 // Undo the reshape knowing the amount of dimensions added
364 if (tensorDimensionsAdded == 1)
365 {
366 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
367 afterConcatInfo.GetShape()[2] }));
368 }
369 else if (tensorDimensionsAdded == 2)
370 {
narpra01f176d5a2018-11-18 20:17:48 +0000371 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100372 }
373
374 layer = &AddReshapeLayer(
375 *data.m_Network,
376 layer->GetOutputSlot(0),
377 afterConcatInfo
378 );
379 }
380
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100381 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100382}
383
David Monahanacf479a2019-05-29 14:27:04 +0100384bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
385{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100386 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100387
388 if (!input.IsValid())
389 {
390 return Fail("%s: Operation has invalid input", __func__);
391 }
392
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100393 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100394 if (!outputOperand)
395 {
396 return Fail("%s: Operation has invalid outputs", __func__);
397 }
398
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100399 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100400 if (IsDynamicOutput(outputInfo))
401 {
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100402 ALOGD("Output shape not set, will infer from input");
403 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100404 }
405
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100406 bool isSupported = false;
407 FORWARD_LAYER_SUPPORT_FUNC(__func__,
408 IsDequantizeSupported,
409 data.m_Backends,
410 isSupported,
411 input.GetTensorInfo(),
412 GetTensorInfoForOperand(*outputOperand));
413 if (!isSupported)
David Monahanacf479a2019-05-29 14:27:04 +0100414 {
415 return false;
416 }
417
418 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
419 assert(layer != nullptr);
420 input.Connect(layer->GetInputSlot(0));
421
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100422 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
423 0,
424 *layer,
425 model,
426 data,
427 armnn::Optional<armnn::TensorInfo>(outputInfo));
David Monahanacf479a2019-05-29 14:27:04 +0100428}
429
arovir01b0717b52018-09-05 17:03:25 +0100430bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
431{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100432 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100433 if (!input.IsValid())
434 {
435 return Fail("%s: Operation has invalid inputs", __func__);
436 }
437
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100438 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100439 if (!outputOperand)
440 {
441 return Fail("%s: Operation has invalid outputs", __func__);
442 }
443
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100444 bool isSupported = false;
445 FORWARD_LAYER_SUPPORT_FUNC(__func__,
446 IsFloorSupported,
447 data.m_Backends,
448 isSupported,
449 input.GetTensorInfo(),
450 GetTensorInfoForOperand(*outputOperand));
451 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100452 {
453 return false;
454 }
455
456 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
457 assert(layer != nullptr);
458 input.Connect(layer->GetInputSlot(0));
459
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100460 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100461}
462
463bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
464{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100465 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100466 if (!input.IsValid())
467 {
468 return Fail("%s: Operation has invalid inputs", __func__);
469 }
470
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100471 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100472 if (!output)
473 {
474 return Fail("%s: Could not read output 0", __func__);
475 }
476
477 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100478 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
479
480 if (IsDynamicOutput(outputInfo))
481 {
482 ALOGD("Output shape not set, will infer from inputs");
483 outputInfo.SetShape(inputInfo.GetShape());
484 }
arovir01b0717b52018-09-05 17:03:25 +0100485
486 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100487 ConstTensorPin weightsPin =
488 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
489 ConstTensorPin biasPin =
490 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100491
492 if (!weightsPin.IsValid() || !biasPin.IsValid())
493 {
494 return Fail("%s: Operation has invalid inputs", __func__);
495 }
496
497 armnn::ConstTensor weights = weightsPin.GetConstTensor();
498 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100499 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100500
501 try
arovir01b0717b52018-09-05 17:03:25 +0100502 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100503 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
504 } catch (const std::exception &e) {
505 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100506 }
507
508 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
509 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
510
511 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100512 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100513 {
514 return Fail("%s: Operation has invalid inputs", __func__);
515 }
516
517 armnn::FullyConnectedDescriptor desc;
518 desc.m_TransposeWeightMatrix = true;
519 desc.m_BiasEnabled = true;
520
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100521 bool isSupported = false;
522 FORWARD_LAYER_SUPPORT_FUNC(__func__,
523 IsFullyConnectedSupported,
524 data.m_Backends,
525 isSupported,
526 reshapedInfo,
527 outputInfo,
528 weights.GetInfo(),
529 bias.GetInfo(),
530 desc);
531 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100532 {
533 return false;
534 }
535
Matteo Martincighba01f372019-05-14 13:28:21 +0100536 armnn::IConnectableLayer* startLayer =
537 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100538 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
539
540 if (endLayer != nullptr)
541 {
542 if (inputInfo.GetNumDimensions() > 2U)
543 {
544 armnn::ReshapeDescriptor reshapeDescriptor;
545 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
546
547 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
548 assert(reshapeLayer != nullptr);
549 input.Connect(reshapeLayer->GetInputSlot(0));
550 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
551 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
552 }
553 else
554 {
555 input.Connect(startLayer->GetInputSlot(0));
556 }
557
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100558 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
559 0,
560 *endLayer,
561 model,
562 data,
563 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +0100564 }
565 else
566 {
567 return Fail("%s: ProcessActivation failed", __func__);
568 }
569}
570
571bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
572 const Model& model,
573 ConversionData& data)
574{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100575 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100576 if (!input.IsValid())
577 {
578 return Fail("%s: Operation has invalid inputs", __func__);
579 }
580
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100581 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100582 if (!output)
583 {
584 return Fail("%s: Could not read output 0", __func__);
585 }
586
narpra012fb804a2018-10-22 14:52:32 +0100587 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100588 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
589
arovir01b0717b52018-09-05 17:03:25 +0100590 armnn::NormalizationDescriptor descriptor;
591
narpra012fb804a2018-10-22 14:52:32 +0100592 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100593 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100594 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100595
596 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100597 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
598 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
599 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
600 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100601 {
602 return Fail("%s: Operation has invalid inputs", __func__);
603 }
604
605 // ArmNN expects normSize to be the full size of the normalization
606 // window rather than the radius as in AndroidNN.
607 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
608
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100609 bool isSupported = false;
610 FORWARD_LAYER_SUPPORT_FUNC(__func__,
611 IsNormalizationSupported,
612 data.m_Backends,
613 isSupported,
614 inputInfo,
615 outputInfo,
616 descriptor);
617 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100618 {
619 return false;
620 }
621
622
623 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
624 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100625 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100626
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100627 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100628}
629
630bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
631{
632 armnn::ActivationDescriptor desc;
633 desc.m_Function = armnn::ActivationFunction::Sigmoid;
634
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100635 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100636}
637
638bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
639{
640 // Inputs:
641 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
642 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100643 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100644 if (!input.IsValid())
645 {
646 return Fail("%s: Could not read input 0: input", __func__);
647 }
648 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100649 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100650 if (!outputStateIn.IsValid())
651 {
652 return Fail("%s: Could not read input 18: outputStateIn", __func__);
653 }
654 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100655 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100656 if (!cellStateIn.IsValid())
657 {
658 return Fail("%s: Could not read input 19: cellStateIn", __func__);
659 }
660
661 // Get the mandatory input tensors:
662 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
663 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100664 const ConstTensorPin inputToForgetWeightsPin =
665 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
666 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
667 // [num_units, input_size].
668 const ConstTensorPin inputToCellWeightsPin =
669 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100670 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
671 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100672 const ConstTensorPin inputToOutputWeightsPin =
673 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100674 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
675 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100676 const ConstTensorPin recurrentToForgetWeightsPin =
677 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100678 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
679 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100680 const ConstTensorPin recurrentToCellWeightsPin =
681 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100682 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
683 // [num_units, output_size].
684 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100685 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100686 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100687 const ConstTensorPin forgetGateBiasPin =
688 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100689 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100690 const ConstTensorPin cellBiasPin =
691 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100692 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100693 const ConstTensorPin outputGateBiasPin =
694 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100695
696 if (!inputToForgetWeightsPin.IsValid() ||
697 !inputToCellWeightsPin.IsValid() ||
698 !inputToOutputWeightsPin.IsValid() ||
699 !recurrentToForgetWeightsPin.IsValid() ||
700 !recurrentToCellWeightsPin.IsValid() ||
701 !recurrentToOutputWeightsPin.IsValid() ||
702 !forgetGateBiasPin.IsValid() ||
703 !cellBiasPin.IsValid() ||
704 !outputGateBiasPin.IsValid())
705 {
706 return Fail("%s: Operation has invalid tensor inputs", __func__);
707 }
708
709 // Get the optional input tensors:
710 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
711 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100712 const ConstTensorPin inputToInputWeightsPin =
713 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
714 1,
715 model,
716 data,
717 g_DontPermute,
718 nullptr,
719 true);
720
arovir01b0717b52018-09-05 17:03:25 +0100721 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
722 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
723 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100724 const ConstTensorPin recurrentToInputWeightsPin =
725 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
726 5,
727 model,
728 data,
729 g_DontPermute,
730 nullptr,
731 true);
732
arovir01b0717b52018-09-05 17:03:25 +0100733 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100734 const ConstTensorPin cellToInputWeightsPin =
735 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
736 9,
737 model,
738 data,
739 g_DontPermute,
740 nullptr,
741 true);
742
arovir01b0717b52018-09-05 17:03:25 +0100743 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100744 const ConstTensorPin cellToForgetWeightsPin =
745 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
746 10,
747 model,
748 data,
749 g_DontPermute,
750 nullptr,
751 true);
752
arovir01b0717b52018-09-05 17:03:25 +0100753 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100754 const ConstTensorPin cellToOutputWeightsPin =
755 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
756 11,
757 model,
758 data,
759 g_DontPermute,
760 nullptr,
761 true);
762
arovir01b0717b52018-09-05 17:03:25 +0100763 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100764 const ConstTensorPin inputGateBiasPin =
765 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
766 12,
767 model,
768 data,
769 g_DontPermute,
770 nullptr,
771 true);
772
arovir01b0717b52018-09-05 17:03:25 +0100773 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
774 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100775 const ConstTensorPin projectionWeightsPin =
776 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
777 16,
778 model,
779 data,
780 g_DontPermute,
781 nullptr,
782 true);
783
arovir01b0717b52018-09-05 17:03:25 +0100784 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100785 const ConstTensorPin projectionBiasPin =
786 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
787 17,
788 model,
789 data,
790 g_DontPermute,
791 nullptr,
792 true);
arovir01b0717b52018-09-05 17:03:25 +0100793
794 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
795 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
796 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
797 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
798 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
799 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
800 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
801 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
802 {
803 return Fail("%s: Operation has invalid tensor inputs", __func__);
804 }
805
806 // Get the mandatory input scalars (actually 1-D tensors of size 1):
807 // 20: The activation function: A value indicating the activation function:
808 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
809 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
810 // If set to 0.0 then clipping is disabled.
811 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
812 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
813 ActivationFn activation;
814 float cellClip;
815 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100816 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
817 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
818 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100819 {
820 return Fail("%s: Operation has invalid scalar inputs", __func__);
821 }
822
823 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100824 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
825 // with CIFG, or [batch_size, num_units * 3] without CIFG.
826 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100827 if (!scratchBuffer)
828 {
829 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
830 }
831 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100832 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100833 if (!outputStateOut)
834 {
835 return Fail("%s: Could not read output 1: outputStateOut", __func__);
836 }
837 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100838 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100839 if (!cellStateOut)
840 {
841 return Fail("%s: Could not read output 2: cellStateOut", __func__);
842 }
843 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
844 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100845 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100846 if (!output)
847 {
848 return Fail("%s: Could not read output 3: output", __func__);
849 }
850
851 // set the params structure for the AddLstmLayer call
852 armnn::LstmInputParams params;
853 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
854 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
855 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
856 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
857 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
858 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
859 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
860 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
861 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
862 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
863 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
864 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
865 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
866 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
867 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
868 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
869 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
870
871 // set the layer descriptor
872 armnn::LstmDescriptor desc;
873 desc.m_ActivationFunc = activation;
874 desc.m_ClippingThresCell = cellClip;
875 desc.m_ClippingThresProj = projClip;
876 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
877 params.m_RecurrentToInputWeights == nullptr ||
878 params.m_InputGateBias == nullptr);
879 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
880 params.m_CellToOutputWeights != nullptr);
881 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
882
883 // validate the optional input groups
884 if (desc.m_CifgEnabled &&
885 (params.m_InputToInputWeights != nullptr ||
886 params.m_RecurrentToInputWeights != nullptr ||
887 params.m_InputGateBias != nullptr))
888 {
889 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
890 " and input gate bias must be provided", __func__);
891 }
892
893 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
894 {
895 return Fail("%s: projection bias should not be provided without projection weights", __func__);
896 }
897
898 if (desc.m_PeepholeEnabled &&
899 (params.m_CellToForgetWeights == nullptr ||
900 params.m_CellToOutputWeights == nullptr ||
901 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
902 {
903 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
904 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
905 }
906
907 // Check if the layer is supported
908 // Inputs
909 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
910 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
911 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
912
913 // Outputs
914 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
915 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
916 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
917 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
918
919 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100920 armnn::LstmInputParamsInfo paramsInfo;
921 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
922 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
923 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
924 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
925 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
926 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
927 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
928 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
929 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100930
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100931 // Optional parameters
arovir01b0717b52018-09-05 17:03:25 +0100932 if(!desc.m_CifgEnabled)
933 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100934 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
935 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100936 if (params.m_CellToInputWeights != nullptr)
937 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100938 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100939 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100940 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100941 }
942
943 if(desc.m_ProjectionEnabled)
944 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100945 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100946 if (params.m_ProjectionBias != nullptr)
947 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100948 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100949 }
950 }
951
952 if(desc.m_PeepholeEnabled)
953 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100954 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
955 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100956 }
957
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100958 bool isSupported = false;
959 FORWARD_LAYER_SUPPORT_FUNC(__func__,
960 IsLstmSupported,
961 data.m_Backends,
962 isSupported,
963 inputInfo,
964 outputStateInInfo,
965 cellStateInInfo,
966 scratchBufferInfo,
967 outputStateOutInfo,
968 cellStateOutInfo,
969 outputInfo,
970 desc,
971 paramsInfo);
972 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100973 {
974 return false;
975 }
976
977 // Add the layer
978 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
979
980 input.Connect(layer->GetInputSlot(0));
981 outputStateIn.Connect(layer->GetInputSlot(1));
982 cellStateIn.Connect(layer->GetInputSlot(2));
983
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100984 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
985 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
986 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
987 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100988}
989
990bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
991{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100992 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100993 if (!input.IsValid())
994 {
995 return Fail("%s: Operation has invalid inputs", __func__);
996 }
997
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100998 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100999 if (!output)
1000 {
1001 return Fail("%s: Could not read output 0", __func__);
1002 }
1003
1004 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001005 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
arovir01b0717b52018-09-05 17:03:25 +01001006
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001007 if (IsDynamicOutput(outputInfo))
1008 {
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001009 ALOGD("Output shape not set, will infer from inputs");
1010 outputInfo.SetShape(inputInfo.GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001011 }
1012
Matteo Martincigh58f71092018-09-25 15:58:52 +01001013 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001014 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001015
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001016 bool isSupported = false;
1017 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1018 IsL2NormalizationSupported,
1019 data.m_Backends,
1020 isSupported,
1021 inputInfo,
1022 outputInfo,
1023 desc);
1024 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001025 {
1026 return false;
1027 }
1028
Matteo Martincigh58f71092018-09-25 15:58:52 +01001029 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001030 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001031 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001032
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001033 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1034 0,
1035 *layer,
1036 model,
1037 data,
1038 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001039}
1040
1041bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1042{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001043 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001044}
1045
1046bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1047{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001048 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001049}
1050
1051bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1052{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001053 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1054 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001055
1056 if (!input0.IsValid() || !input1.IsValid())
1057 {
1058 return Fail("%s: Operation has invalid inputs", __func__);
1059 }
1060
1061 // The FuseActivation parameter is always the input index 2
1062 // and it should be optional
1063 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001064 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001065 {
1066 return Fail("%s: Operation has invalid inputs", __func__);
1067 }
1068
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001069 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001070
1071 if (outputOperand == nullptr)
1072 {
1073 return false;
1074 }
1075
1076 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1077
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001078 bool isSupported = false;
1079 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1080 IsMultiplicationSupported,
1081 data.m_Backends,
1082 isSupported,
1083 input0.GetTensorInfo(),
1084 input1.GetTensorInfo(),
1085 outInfo);
1086 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001087 {
1088 return false;
1089 }
1090
1091 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1092 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1093
1094 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1095 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1096
1097 if (endLayer != nullptr)
1098 {
1099 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001100 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001101 }
1102 else
1103 {
1104 return Fail("%s: ProcessActivation failed", __func__);
1105 }
1106}
1107
1108bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1109{
1110 armnn::ActivationDescriptor desc;
1111 desc.m_Function = armnn::ActivationFunction::ReLu;
1112
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001113 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001114}
1115
1116bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1117{
1118 armnn::ActivationDescriptor desc;
1119 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1120 desc.m_A = 1.0f;
1121 desc.m_B = -1.0f;
1122
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001123 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001124}
1125
1126bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1127{
1128 armnn::ActivationDescriptor desc;
1129 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1130 desc.m_A = 6.0f;
1131
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001132 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001133}
1134
1135bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1136{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001137 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001138 if (!input.IsValid())
1139 {
1140 return Fail("%s: Operation has invalid inputs", __func__);
1141 }
1142
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001143 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001144 if (!outputOperand)
1145 {
1146 return Fail("%s: Operation has no outputs", __func__);
1147 }
1148
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001149 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001150 if (IsDynamicOutput(outputInfo))
1151 {
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001152 ALOGD("Output shape not set, will infer from input");
1153 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001154 }
arovir01b0717b52018-09-05 17:03:25 +01001155
1156 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001157 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001158 {
1159 return Fail("%s: Operation has invalid inputs", __func__);
1160 }
1161
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001162 bool isSupported = false;
1163 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1164 IsSoftmaxSupported,
1165 data.m_Backends,
1166 isSupported,
1167 input.GetTensorInfo(),
1168 outputInfo,
1169 desc);
1170 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001171 {
1172 return false;
1173 }
1174
1175 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1176 assert(layer != nullptr);
1177 input.Connect(layer->GetInputSlot(0));
1178
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001179 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1180 0,
1181 *layer,
1182 model,
1183 data,
1184 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001185}
1186
Keith Davisa6bc52f2019-06-26 09:39:49 +01001187bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1188{
1189 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1190
1191 if (!input.IsValid() )
1192 {
1193 return Fail("%s: Operation has invalid inputs", __func__);
1194 }
1195
1196 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1197 unsigned int rank = inputInfo.GetNumDimensions();
1198
1199 if (rank != 4)
1200 {
1201 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1202 }
1203
1204 armnn::SpaceToDepthDescriptor desc;
1205 bool dataLayoutCheck;
1206
1207 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1208
1209 if (desc.m_BlockSize <= 1)
1210 {
1211 return Fail("%s: Block size must be at least 1 in all dimensions");
1212 }
1213
1214 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1215 if (!output)
1216 {
1217 return Fail("%s: Could not read output 0", __func__);
1218 }
1219
1220 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001221
1222 bool isSupported = false;
1223 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1224 IsSpaceToDepthSupported,
1225 data.m_Backends,
1226 isSupported,
1227 inputInfo,
1228 outputInfo,
1229 desc);
1230 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001231 {
1232 return false;
1233 }
1234
1235 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1236 assert(layer != nullptr);
1237 input.Connect(layer->GetInputSlot(0));
1238
1239 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1240}
1241
arovir01b0717b52018-09-05 17:03:25 +01001242bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1243{
1244 armnn::ActivationDescriptor desc;
1245 desc.m_Function = armnn::ActivationFunction::TanH;
1246 desc.m_A = 1.0f; // android nn does not support tanH parameters
1247 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1248
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001249 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001250}
1251
1252bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1253{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001254 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1255 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1256 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001257
1258 if (inputOperand == nullptr
1259 || requestedShapeOperand == nullptr
1260 || outputOperand == nullptr)
1261 {
1262 return Fail("%s: Operation has invalid inputs", __func__);
1263 }
1264
1265
1266 if (requestedShapeOperand->dimensions.size() != 1)
1267 {
1268 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1269 __func__, requestedShapeOperand->dimensions.size());
1270 }
1271
1272 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001273 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001274 {
1275 return Fail("%s: Could not read values of input 1", __func__);
1276 }
1277
1278 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1279
1280 Shape requestedShape;
1281 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1282 // function that resolves these values into a fully specified tensor shape.
1283 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1284 {
1285 return Fail("%s: Failed to resolve the requested shape", __func__);
1286 }
1287
1288 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1289 if (!SameShape(requestedShape, outputOperandShape))
1290 {
1291 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1292 }
1293
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001294 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001295 if (!input.IsValid())
1296 {
1297 return Fail("%s: Could not read input 0", __func__);
1298 }
1299
arovir01b0717b52018-09-05 17:03:25 +01001300 armnn::ReshapeDescriptor reshapeDescriptor;
1301 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1302 requestedShape.dimensions.data());
1303
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001304 bool isSupported = false;
1305 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1306 IsReshapeSupported,
1307 data.m_Backends,
1308 isSupported,
1309 input.GetTensorInfo(),
1310 reshapeDescriptor);
1311 if (!isSupported)
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001312 {
1313 return false;
1314 }
1315
arovir01b0717b52018-09-05 17:03:25 +01001316 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1317 assert(layer != nullptr);
1318 input.Connect(layer->GetInputSlot(0));
1319
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001320 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001321}
1322
1323bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1324{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001325 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001326 if (!input.IsValid())
1327 {
1328 return Fail("%s: Could not read input 0", __func__);
1329 }
1330
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001331 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001332 if (!output)
1333 {
1334 return Fail("%s: Could not read output 0", __func__);
1335 }
1336
1337 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1338 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1339
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001340 armnn::ResizeDescriptor desc;
1341 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001342 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001343
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001344 bool isSupported = false;
1345 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1346 IsResizeSupported,
1347 data.m_Backends,
1348 isSupported,
1349 inputInfo,
1350 outputInfo,
1351 desc);
1352 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001353 {
1354 return false;
1355 }
1356
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001357 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1358 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001359 {
1360 return Fail("%s: Operation has invalid inputs", __func__);
1361 }
1362
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001363 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001364
arovir01b0717b52018-09-05 17:03:25 +01001365 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001366
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001367 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1368 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001369
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001370 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001371
1372}
1373
1374} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001375} // namespace armnn_driver