blob: b87727c6eff37bcc686dcddf9c1c0520aa4bd8e3 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +010011#include "OutputShapeUtils.hpp"
arovir015602b192018-10-04 16:15:02 +010012
arovir01b0717b52018-09-05 17:03:25 +010013namespace armnn_driver
14{
15namespace hal_1_0
16{
17
18bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
19{
20 switch (operation.type)
21 {
22 case V1_0::OperationType::ADD:
23 return ConvertAdd(operation, model, data);
24 case V1_0::OperationType::AVERAGE_POOL_2D:
25 return ConvertAveragePool2d(operation, model, data);
26 case V1_0::OperationType::CONCATENATION:
27 return ConvertConcatenation(operation, model, data);
28 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010029 return ValidateConv2dParameters(operation) &&
30 ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010031 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010032 return ValidateDepthwiseConv2dParameters(operation) &&
33 ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010034 case V1_0::OperationType::DEQUANTIZE:
35 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010036 case V1_0::OperationType::FLOOR:
37 return ConvertFloor(operation, model, data);
38 case V1_0::OperationType::FULLY_CONNECTED:
39 return ConvertFullyConnected(operation, model, data);
40 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
41 return ConvertLocalResponseNormalization(operation, model, data);
42 case V1_0::OperationType::LOGISTIC:
43 return ConvertLogistic(operation, model, data);
44 case V1_0::OperationType::LSTM:
45 return ConvertLstm(operation, model, data);
46 case V1_0::OperationType::L2_NORMALIZATION:
47 return ConvertL2Normalization(operation, model, data);
48 case V1_0::OperationType::L2_POOL_2D:
49 return ConvertL2Pool2d(operation, model, data);
50 case V1_0::OperationType::MAX_POOL_2D:
51 return ConvertMaxPool2d(operation, model, data);
52 case V1_0::OperationType::MUL:
53 return ConvertMul(operation, model, data);
54 case V1_0::OperationType::RELU:
55 return ConvertReLu(operation, model, data);
56 case V1_0::OperationType::RELU1:
57 return ConvertReLu1(operation, model, data);
58 case V1_0::OperationType::RELU6:
59 return ConvertReLu6(operation, model, data);
60 case V1_0::OperationType::SOFTMAX:
61 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010062 case V1_0::OperationType::SPACE_TO_DEPTH:
63 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010064 case V1_0::OperationType::TANH:
65 return ConvertTanH(operation, model, data);
66 case V1_0::OperationType::RESHAPE:
67 return ConvertReshape(operation, model, data);
68 case V1_0::OperationType::RESIZE_BILINEAR:
69 return ConvertResizeBilinear(operation, model, data);
70 default:
71 return Fail("%s: Operation type %s not supported in ArmnnDriver",
72 __func__, toString(operation.type).c_str());
73 }
74}
75
Mike Kellyb5fdf382019-06-11 16:35:25 +010076bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
77{
78 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
79 {
80 return Fail("%s: Unsupported number of operation inputs", __func__);
81 }
82 return true;
83}
84
85bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
86{
87 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
88 {
89 return Fail("%s: Unsupported number of operation inputs", __func__);
90 }
91 return true;
92}
93
arovir01b0717b52018-09-05 17:03:25 +010094bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
95{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010096 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
97 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010098
99 if (!input0.IsValid() || !input1.IsValid())
100 {
101 return Fail("%s: Operation has invalid inputs", __func__);
102 }
103
104 // The FuseActivation parameter is always the input index 2
105 // and it should be optional
106 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100107 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100108 {
109 return Fail("%s: Operation has invalid inputs", __func__);
110 }
111
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100112 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100113 if (!outputOperand)
114 {
115 return false;
116 }
117
118 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
119
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100120 bool isSupported = false;
121 FORWARD_LAYER_SUPPORT_FUNC(__func__,
122 IsAdditionSupported,
123 data.m_Backends,
124 isSupported,
125 input0.GetTensorInfo(),
126 input1.GetTensorInfo(),
127 outInfo);
128 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100129 {
130 return false;
131 }
132
133 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
134 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
135
136 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
137 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
138
139 if (endLayer != nullptr)
140 {
141 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100142 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100143 }
144 else
145 {
146 return Fail("%s: ProcessActivation failed", __func__);
147 }
148}
149
150bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
151{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100152 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100153}
154
155bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
156{
157 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
158 if (operation.inputs.size() <= 1)
159 {
160 return Fail("%s: Operation has insufficient arguments", __func__);
161 }
162
163 // Get inputs and outputs
164 const std::size_t numInputTensors = operation.inputs.size() - 1;
165
166 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100167 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100168 {
169 return Fail("%s: Operation has invalid inputs", __func__);
170 }
171
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100172 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100173 if (!outputOperand)
174 {
175 return Fail("%s: Operation has no outputs", __func__);
176 }
177
178
179 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
180 armnn::TensorShape outputShape = outputInfo.GetShape();
181
182 //
183 // handle negative concat dims along the lines of tensorflow as described here:
184 // https://www.tensorflow.org/api_docs/python/tf/concat
185 // "negative axis refers to axis + rank(values)-th dimension"
186 //
187 if (concatDim < 0)
188 {
189 concatDim += outputShape.GetNumDimensions();
190 }
191
192 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
193 {
194 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
195 }
196
197 std::vector<LayerInputHandle> inputHandles;
198 std::vector<armnn::TensorShape> inputShapes;
199
200 inputHandles.reserve(numInputTensors);
201 inputShapes.reserve(numInputTensors);
202
203 bool inputsHaveBeenReshaped = false;
204 unsigned int tensorDimensionsAdded = 0;
205
206 for (uint32_t i = 0; i < numInputTensors; ++i)
207 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100208 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100209 if (!operand)
210 {
211 return Fail("%s: Operation has invalid inputs", __func__);
212 }
213
214 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100215 LayerInputHandle operandInputHandle =
216 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100217
218 if (operandShape.GetNumDimensions() == 0)
219 {
220 return Fail("%s: Operands with rank 0 are not supported", __func__);
221 }
222
223 if (RequiresReshape(operandShape))
224 {
225 inputsHaveBeenReshaped = true;
226
227 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
228
229 // Expand the tensor to three dimensions
230 if (operandShape.GetNumDimensions() == 2)
231 {
232 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
233 tensorDimensionsAdded = 1;
234 }
235 else
236 {
237 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
238 tensorDimensionsAdded = 2;
239 }
240
241 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
242 *data.m_Network,
243 operandInputHandle,
244 reshapeInfo
245 );
246
247 // Point to the reshape operation rather then the input operation
248 operandShape = reshapeInfo.GetShape();
249 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
250 }
251
252 inputShapes.emplace_back(operandShape);
253 inputHandles.emplace_back(operandInputHandle);
254
255 if (!inputHandles.back().IsValid())
256 {
257 return Fail("%s: Operation has invalid inputs", __func__);
258 }
259 }
260
261 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
262
263 if (inputsHaveBeenReshaped)
264 {
265 // Adjust the concatenation dimension by the amount of dimensions added (if any)
266 concatDim += tensorDimensionsAdded;
267
268 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
269 if (tensorDimensionsAdded == 1)
270 {
271 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
272 }
273 else if (tensorDimensionsAdded == 2)
274 {
narpra01f176d5a2018-11-18 20:17:48 +0000275 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100276 }
277 }
278
narpra01f176d5a2018-11-18 20:17:48 +0000279 // Check if permutations is required and get the pair of permutations required for the concatenation.
280 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100281 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
282 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
283
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100284 bool needPermute =
285 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100286
narpra01f176d5a2018-11-18 20:17:48 +0000287 if (needPermute)
288 {
289 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
290 }
291
arovir01b0717b52018-09-05 17:03:25 +0100292 outputInfo.SetShape(outputShape);
293
294 // this is no-op for identity swizzles, otherwise it replaces both
295 // the handles and shapes with the swizzled layer output handles and shapes
296 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
297
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100298 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
299 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000300
arovir01b0717b52018-09-05 17:03:25 +0100301 try
302 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100303 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000304 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100305 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100306 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100307 }
308 catch (const armnn::Exception& error)
309 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100310 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100311 }
312
313 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000314 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100315 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
316 {
317 return Fail("%s: Error validating the output shape for concat", __func__);
318 }
319
320 std::vector<const armnn::TensorInfo*> inputTensorInfos;
321 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
322 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100323
324 bool isSupported = false;
325 FORWARD_LAYER_SUPPORT_FUNC(__func__,
326 IsConcatSupported,
327 data.m_Backends,
328 isSupported,
329 inputTensorInfos,
330 outputInfo,
331 concatDescriptor);
332 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100333 {
334 return false;
335 }
336
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100337 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100338 assert(layer != nullptr);
339 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
340
341 // Connect inputs to the layer
342 const int numInputSlots = layer->GetNumInputSlots();
343 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
344 for (int i = 0; i < numInputSlots; ++i)
345 {
346 // connect the input directly to the merge (concat) layer
347 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
348 }
349
narpra01f176d5a2018-11-18 20:17:48 +0000350 if (needPermute)
351 {
352 // Add permutation layer and connect the output to it, the permutation becomes the output layer
353 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
354 layer->GetOutputSlot(0),
355 permutationPair.second);
356 layer = &deswizzleLayer;
357 }
arovir01b0717b52018-09-05 17:03:25 +0100358
359 if (inputsHaveBeenReshaped)
360 {
361 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
362
363 // Undo the reshape knowing the amount of dimensions added
364 if (tensorDimensionsAdded == 1)
365 {
366 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
367 afterConcatInfo.GetShape()[2] }));
368 }
369 else if (tensorDimensionsAdded == 2)
370 {
narpra01f176d5a2018-11-18 20:17:48 +0000371 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100372 }
373
374 layer = &AddReshapeLayer(
375 *data.m_Network,
376 layer->GetOutputSlot(0),
377 afterConcatInfo
378 );
379 }
380
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100381 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100382}
383
David Monahanacf479a2019-05-29 14:27:04 +0100384bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
385{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100386 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100387
388 if (!input.IsValid())
389 {
390 return Fail("%s: Operation has invalid input", __func__);
391 }
392
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100393 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100394 if (!outputOperand)
395 {
396 return Fail("%s: Operation has invalid outputs", __func__);
397 }
398
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100399 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100400 if (IsDynamicOutput(outputInfo))
401 {
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100402 ALOGD("Output shape not set, will infer from input");
403 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100404 }
405
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100406 bool isSupported = false;
407 FORWARD_LAYER_SUPPORT_FUNC(__func__,
408 IsDequantizeSupported,
409 data.m_Backends,
410 isSupported,
411 input.GetTensorInfo(),
412 GetTensorInfoForOperand(*outputOperand));
413 if (!isSupported)
David Monahanacf479a2019-05-29 14:27:04 +0100414 {
415 return false;
416 }
417
418 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
419 assert(layer != nullptr);
420 input.Connect(layer->GetInputSlot(0));
421
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100422 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
423 0,
424 *layer,
425 model,
426 data,
427 armnn::Optional<armnn::TensorInfo>(outputInfo));
David Monahanacf479a2019-05-29 14:27:04 +0100428}
429
arovir01b0717b52018-09-05 17:03:25 +0100430bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
431{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100432 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100433 if (!input.IsValid())
434 {
435 return Fail("%s: Operation has invalid inputs", __func__);
436 }
437
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100438 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100439 if (!outputOperand)
440 {
441 return Fail("%s: Operation has invalid outputs", __func__);
442 }
443
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100444 bool isSupported = false;
445 FORWARD_LAYER_SUPPORT_FUNC(__func__,
446 IsFloorSupported,
447 data.m_Backends,
448 isSupported,
449 input.GetTensorInfo(),
450 GetTensorInfoForOperand(*outputOperand));
451 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100452 {
453 return false;
454 }
455
456 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
457 assert(layer != nullptr);
458 input.Connect(layer->GetInputSlot(0));
459
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100460 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100461}
462
463bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
464{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100465 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100466 if (!input.IsValid())
467 {
468 return Fail("%s: Operation has invalid inputs", __func__);
469 }
470
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100471 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100472 if (!output)
473 {
474 return Fail("%s: Could not read output 0", __func__);
475 }
476
477 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
478 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
479
480 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100481 ConstTensorPin weightsPin =
482 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
483 ConstTensorPin biasPin =
484 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100485
486 if (!weightsPin.IsValid() || !biasPin.IsValid())
487 {
488 return Fail("%s: Operation has invalid inputs", __func__);
489 }
490
491 armnn::ConstTensor weights = weightsPin.GetConstTensor();
492 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100493 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100494
495 try
arovir01b0717b52018-09-05 17:03:25 +0100496 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100497 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
498 } catch (const std::exception &e) {
499 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100500 }
501
502 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
503 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
504
505 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100506 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100507 {
508 return Fail("%s: Operation has invalid inputs", __func__);
509 }
510
511 armnn::FullyConnectedDescriptor desc;
512 desc.m_TransposeWeightMatrix = true;
513 desc.m_BiasEnabled = true;
514
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100515 bool isSupported = false;
516 FORWARD_LAYER_SUPPORT_FUNC(__func__,
517 IsFullyConnectedSupported,
518 data.m_Backends,
519 isSupported,
520 reshapedInfo,
521 outputInfo,
522 weights.GetInfo(),
523 bias.GetInfo(),
524 desc);
525 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100526 {
527 return false;
528 }
529
Matteo Martincighba01f372019-05-14 13:28:21 +0100530 armnn::IConnectableLayer* startLayer =
531 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100532 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
533
534 if (endLayer != nullptr)
535 {
536 if (inputInfo.GetNumDimensions() > 2U)
537 {
538 armnn::ReshapeDescriptor reshapeDescriptor;
539 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
540
541 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
542 assert(reshapeLayer != nullptr);
543 input.Connect(reshapeLayer->GetInputSlot(0));
544 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
545 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
546 }
547 else
548 {
549 input.Connect(startLayer->GetInputSlot(0));
550 }
551
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100552 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100553 }
554 else
555 {
556 return Fail("%s: ProcessActivation failed", __func__);
557 }
558}
559
560bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
561 const Model& model,
562 ConversionData& data)
563{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100564 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100565 if (!input.IsValid())
566 {
567 return Fail("%s: Operation has invalid inputs", __func__);
568 }
569
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100570 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100571 if (!output)
572 {
573 return Fail("%s: Could not read output 0", __func__);
574 }
575
narpra012fb804a2018-10-22 14:52:32 +0100576 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100577 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
578
arovir01b0717b52018-09-05 17:03:25 +0100579 armnn::NormalizationDescriptor descriptor;
580
narpra012fb804a2018-10-22 14:52:32 +0100581 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100582 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100583 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100584
585 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100586 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
587 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
588 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
589 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100590 {
591 return Fail("%s: Operation has invalid inputs", __func__);
592 }
593
594 // ArmNN expects normSize to be the full size of the normalization
595 // window rather than the radius as in AndroidNN.
596 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
597
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100598 bool isSupported = false;
599 FORWARD_LAYER_SUPPORT_FUNC(__func__,
600 IsNormalizationSupported,
601 data.m_Backends,
602 isSupported,
603 inputInfo,
604 outputInfo,
605 descriptor);
606 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100607 {
608 return false;
609 }
610
611
612 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
613 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100614 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100615
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100616 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100617}
618
619bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
620{
621 armnn::ActivationDescriptor desc;
622 desc.m_Function = armnn::ActivationFunction::Sigmoid;
623
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100624 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100625}
626
627bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
628{
629 // Inputs:
630 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
631 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100632 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100633 if (!input.IsValid())
634 {
635 return Fail("%s: Could not read input 0: input", __func__);
636 }
637 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100638 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100639 if (!outputStateIn.IsValid())
640 {
641 return Fail("%s: Could not read input 18: outputStateIn", __func__);
642 }
643 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100644 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100645 if (!cellStateIn.IsValid())
646 {
647 return Fail("%s: Could not read input 19: cellStateIn", __func__);
648 }
649
650 // Get the mandatory input tensors:
651 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
652 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100653 const ConstTensorPin inputToForgetWeightsPin =
654 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
655 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
656 // [num_units, input_size].
657 const ConstTensorPin inputToCellWeightsPin =
658 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100659 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
660 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100661 const ConstTensorPin inputToOutputWeightsPin =
662 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100663 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
664 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100665 const ConstTensorPin recurrentToForgetWeightsPin =
666 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100667 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
668 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100669 const ConstTensorPin recurrentToCellWeightsPin =
670 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100671 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
672 // [num_units, output_size].
673 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100674 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100675 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100676 const ConstTensorPin forgetGateBiasPin =
677 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100678 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100679 const ConstTensorPin cellBiasPin =
680 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100681 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100682 const ConstTensorPin outputGateBiasPin =
683 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100684
685 if (!inputToForgetWeightsPin.IsValid() ||
686 !inputToCellWeightsPin.IsValid() ||
687 !inputToOutputWeightsPin.IsValid() ||
688 !recurrentToForgetWeightsPin.IsValid() ||
689 !recurrentToCellWeightsPin.IsValid() ||
690 !recurrentToOutputWeightsPin.IsValid() ||
691 !forgetGateBiasPin.IsValid() ||
692 !cellBiasPin.IsValid() ||
693 !outputGateBiasPin.IsValid())
694 {
695 return Fail("%s: Operation has invalid tensor inputs", __func__);
696 }
697
698 // Get the optional input tensors:
699 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
700 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100701 const ConstTensorPin inputToInputWeightsPin =
702 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
703 1,
704 model,
705 data,
706 g_DontPermute,
707 nullptr,
708 true);
709
arovir01b0717b52018-09-05 17:03:25 +0100710 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
711 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
712 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100713 const ConstTensorPin recurrentToInputWeightsPin =
714 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
715 5,
716 model,
717 data,
718 g_DontPermute,
719 nullptr,
720 true);
721
arovir01b0717b52018-09-05 17:03:25 +0100722 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100723 const ConstTensorPin cellToInputWeightsPin =
724 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
725 9,
726 model,
727 data,
728 g_DontPermute,
729 nullptr,
730 true);
731
arovir01b0717b52018-09-05 17:03:25 +0100732 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100733 const ConstTensorPin cellToForgetWeightsPin =
734 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
735 10,
736 model,
737 data,
738 g_DontPermute,
739 nullptr,
740 true);
741
arovir01b0717b52018-09-05 17:03:25 +0100742 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100743 const ConstTensorPin cellToOutputWeightsPin =
744 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
745 11,
746 model,
747 data,
748 g_DontPermute,
749 nullptr,
750 true);
751
arovir01b0717b52018-09-05 17:03:25 +0100752 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100753 const ConstTensorPin inputGateBiasPin =
754 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
755 12,
756 model,
757 data,
758 g_DontPermute,
759 nullptr,
760 true);
761
arovir01b0717b52018-09-05 17:03:25 +0100762 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
763 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100764 const ConstTensorPin projectionWeightsPin =
765 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
766 16,
767 model,
768 data,
769 g_DontPermute,
770 nullptr,
771 true);
772
arovir01b0717b52018-09-05 17:03:25 +0100773 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100774 const ConstTensorPin projectionBiasPin =
775 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
776 17,
777 model,
778 data,
779 g_DontPermute,
780 nullptr,
781 true);
arovir01b0717b52018-09-05 17:03:25 +0100782
783 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
784 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
785 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
786 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
787 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
788 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
789 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
790 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
791 {
792 return Fail("%s: Operation has invalid tensor inputs", __func__);
793 }
794
795 // Get the mandatory input scalars (actually 1-D tensors of size 1):
796 // 20: The activation function: A value indicating the activation function:
797 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
798 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
799 // If set to 0.0 then clipping is disabled.
800 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
801 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
802 ActivationFn activation;
803 float cellClip;
804 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100805 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
806 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
807 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100808 {
809 return Fail("%s: Operation has invalid scalar inputs", __func__);
810 }
811
812 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100813 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
814 // with CIFG, or [batch_size, num_units * 3] without CIFG.
815 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100816 if (!scratchBuffer)
817 {
818 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
819 }
820 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100821 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100822 if (!outputStateOut)
823 {
824 return Fail("%s: Could not read output 1: outputStateOut", __func__);
825 }
826 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100827 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100828 if (!cellStateOut)
829 {
830 return Fail("%s: Could not read output 2: cellStateOut", __func__);
831 }
832 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
833 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100834 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100835 if (!output)
836 {
837 return Fail("%s: Could not read output 3: output", __func__);
838 }
839
840 // set the params structure for the AddLstmLayer call
841 armnn::LstmInputParams params;
842 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
843 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
844 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
845 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
846 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
847 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
848 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
849 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
850 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
851 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
852 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
853 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
854 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
855 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
856 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
857 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
858 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
859
860 // set the layer descriptor
861 armnn::LstmDescriptor desc;
862 desc.m_ActivationFunc = activation;
863 desc.m_ClippingThresCell = cellClip;
864 desc.m_ClippingThresProj = projClip;
865 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
866 params.m_RecurrentToInputWeights == nullptr ||
867 params.m_InputGateBias == nullptr);
868 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
869 params.m_CellToOutputWeights != nullptr);
870 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
871
872 // validate the optional input groups
873 if (desc.m_CifgEnabled &&
874 (params.m_InputToInputWeights != nullptr ||
875 params.m_RecurrentToInputWeights != nullptr ||
876 params.m_InputGateBias != nullptr))
877 {
878 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
879 " and input gate bias must be provided", __func__);
880 }
881
882 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
883 {
884 return Fail("%s: projection bias should not be provided without projection weights", __func__);
885 }
886
887 if (desc.m_PeepholeEnabled &&
888 (params.m_CellToForgetWeights == nullptr ||
889 params.m_CellToOutputWeights == nullptr ||
890 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
891 {
892 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
893 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
894 }
895
896 // Check if the layer is supported
897 // Inputs
898 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
899 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
900 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
901
902 // Outputs
903 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
904 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
905 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
906 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
907
908 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100909 armnn::LstmInputParamsInfo paramsInfo;
910 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
911 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
912 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
913 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
914 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
915 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
916 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
917 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
918 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100919
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100920 // Optional parameters
arovir01b0717b52018-09-05 17:03:25 +0100921 if(!desc.m_CifgEnabled)
922 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100923 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
924 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100925 if (params.m_CellToInputWeights != nullptr)
926 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100927 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100928 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100929 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100930 }
931
932 if(desc.m_ProjectionEnabled)
933 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100934 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100935 if (params.m_ProjectionBias != nullptr)
936 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100937 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100938 }
939 }
940
941 if(desc.m_PeepholeEnabled)
942 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100943 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
944 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100945 }
946
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100947 bool isSupported = false;
948 FORWARD_LAYER_SUPPORT_FUNC(__func__,
949 IsLstmSupported,
950 data.m_Backends,
951 isSupported,
952 inputInfo,
953 outputStateInInfo,
954 cellStateInInfo,
955 scratchBufferInfo,
956 outputStateOutInfo,
957 cellStateOutInfo,
958 outputInfo,
959 desc,
960 paramsInfo);
961 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100962 {
963 return false;
964 }
965
966 // Add the layer
967 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
968
969 input.Connect(layer->GetInputSlot(0));
970 outputStateIn.Connect(layer->GetInputSlot(1));
971 cellStateIn.Connect(layer->GetInputSlot(2));
972
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100973 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
974 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
975 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
976 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100977}
978
979bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
980{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100981 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100982 if (!input.IsValid())
983 {
984 return Fail("%s: Operation has invalid inputs", __func__);
985 }
986
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100987 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100988 if (!output)
989 {
990 return Fail("%s: Could not read output 0", __func__);
991 }
992
993 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +0100994 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
arovir01b0717b52018-09-05 17:03:25 +0100995
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100996 if (IsDynamicOutput(outputInfo))
997 {
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +0100998 ALOGD("Output shape not set, will infer from inputs");
999 outputInfo.SetShape(inputInfo.GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001000 }
1001
Matteo Martincigh58f71092018-09-25 15:58:52 +01001002 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001003 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001004
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001005 bool isSupported = false;
1006 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1007 IsL2NormalizationSupported,
1008 data.m_Backends,
1009 isSupported,
1010 inputInfo,
1011 outputInfo,
1012 desc);
1013 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001014 {
1015 return false;
1016 }
1017
Matteo Martincigh58f71092018-09-25 15:58:52 +01001018 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001019 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001020 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001021
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001022 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1023 0,
1024 *layer,
1025 model,
1026 data,
1027 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001028}
1029
1030bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1031{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001032 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001033}
1034
1035bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1036{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001037 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001038}
1039
1040bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1041{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001042 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1043 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001044
1045 if (!input0.IsValid() || !input1.IsValid())
1046 {
1047 return Fail("%s: Operation has invalid inputs", __func__);
1048 }
1049
1050 // The FuseActivation parameter is always the input index 2
1051 // and it should be optional
1052 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001053 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001054 {
1055 return Fail("%s: Operation has invalid inputs", __func__);
1056 }
1057
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001058 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001059
1060 if (outputOperand == nullptr)
1061 {
1062 return false;
1063 }
1064
1065 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1066
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001067 bool isSupported = false;
1068 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1069 IsMultiplicationSupported,
1070 data.m_Backends,
1071 isSupported,
1072 input0.GetTensorInfo(),
1073 input1.GetTensorInfo(),
1074 outInfo);
1075 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001076 {
1077 return false;
1078 }
1079
1080 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1081 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1082
1083 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1084 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1085
1086 if (endLayer != nullptr)
1087 {
1088 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001089 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001090 }
1091 else
1092 {
1093 return Fail("%s: ProcessActivation failed", __func__);
1094 }
1095}
1096
1097bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1098{
1099 armnn::ActivationDescriptor desc;
1100 desc.m_Function = armnn::ActivationFunction::ReLu;
1101
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001102 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001103}
1104
1105bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1106{
1107 armnn::ActivationDescriptor desc;
1108 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1109 desc.m_A = 1.0f;
1110 desc.m_B = -1.0f;
1111
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001112 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001113}
1114
1115bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1116{
1117 armnn::ActivationDescriptor desc;
1118 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1119 desc.m_A = 6.0f;
1120
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001121 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001122}
1123
1124bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1125{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001126 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001127 if (!input.IsValid())
1128 {
1129 return Fail("%s: Operation has invalid inputs", __func__);
1130 }
1131
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001132 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001133 if (!outputOperand)
1134 {
1135 return Fail("%s: Operation has no outputs", __func__);
1136 }
1137
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001138 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001139 if (IsDynamicOutput(outputInfo))
1140 {
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001141 ALOGD("Output shape not set, will infer from input");
1142 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001143 }
arovir01b0717b52018-09-05 17:03:25 +01001144
1145 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001146 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001147 {
1148 return Fail("%s: Operation has invalid inputs", __func__);
1149 }
1150
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001151 bool isSupported = false;
1152 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1153 IsSoftmaxSupported,
1154 data.m_Backends,
1155 isSupported,
1156 input.GetTensorInfo(),
1157 outputInfo,
1158 desc);
1159 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001160 {
1161 return false;
1162 }
1163
1164 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1165 assert(layer != nullptr);
1166 input.Connect(layer->GetInputSlot(0));
1167
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001168 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1169 0,
1170 *layer,
1171 model,
1172 data,
1173 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001174}
1175
Keith Davisa6bc52f2019-06-26 09:39:49 +01001176bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1177{
1178 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1179
1180 if (!input.IsValid() )
1181 {
1182 return Fail("%s: Operation has invalid inputs", __func__);
1183 }
1184
1185 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1186 unsigned int rank = inputInfo.GetNumDimensions();
1187
1188 if (rank != 4)
1189 {
1190 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1191 }
1192
1193 armnn::SpaceToDepthDescriptor desc;
1194 bool dataLayoutCheck;
1195
1196 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1197
1198 if (desc.m_BlockSize <= 1)
1199 {
1200 return Fail("%s: Block size must be at least 1 in all dimensions");
1201 }
1202
1203 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1204 if (!output)
1205 {
1206 return Fail("%s: Could not read output 0", __func__);
1207 }
1208
1209 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001210
1211 bool isSupported = false;
1212 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1213 IsSpaceToDepthSupported,
1214 data.m_Backends,
1215 isSupported,
1216 inputInfo,
1217 outputInfo,
1218 desc);
1219 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001220 {
1221 return false;
1222 }
1223
1224 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1225 assert(layer != nullptr);
1226 input.Connect(layer->GetInputSlot(0));
1227
1228 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1229}
1230
arovir01b0717b52018-09-05 17:03:25 +01001231bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1232{
1233 armnn::ActivationDescriptor desc;
1234 desc.m_Function = armnn::ActivationFunction::TanH;
1235 desc.m_A = 1.0f; // android nn does not support tanH parameters
1236 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1237
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001238 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001239}
1240
1241bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1242{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001243 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1244 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1245 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001246
1247 if (inputOperand == nullptr
1248 || requestedShapeOperand == nullptr
1249 || outputOperand == nullptr)
1250 {
1251 return Fail("%s: Operation has invalid inputs", __func__);
1252 }
1253
1254
1255 if (requestedShapeOperand->dimensions.size() != 1)
1256 {
1257 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1258 __func__, requestedShapeOperand->dimensions.size());
1259 }
1260
1261 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001262 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001263 {
1264 return Fail("%s: Could not read values of input 1", __func__);
1265 }
1266
1267 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1268
1269 Shape requestedShape;
1270 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1271 // function that resolves these values into a fully specified tensor shape.
1272 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1273 {
1274 return Fail("%s: Failed to resolve the requested shape", __func__);
1275 }
1276
1277 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1278 if (!SameShape(requestedShape, outputOperandShape))
1279 {
1280 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1281 }
1282
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001283 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001284 if (!input.IsValid())
1285 {
1286 return Fail("%s: Could not read input 0", __func__);
1287 }
1288
arovir01b0717b52018-09-05 17:03:25 +01001289 armnn::ReshapeDescriptor reshapeDescriptor;
1290 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1291 requestedShape.dimensions.data());
1292
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001293 bool isSupported = false;
1294 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1295 IsReshapeSupported,
1296 data.m_Backends,
1297 isSupported,
1298 input.GetTensorInfo(),
1299 reshapeDescriptor);
1300 if (!isSupported)
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001301 {
1302 return false;
1303 }
1304
arovir01b0717b52018-09-05 17:03:25 +01001305 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1306 assert(layer != nullptr);
1307 input.Connect(layer->GetInputSlot(0));
1308
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001309 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001310}
1311
1312bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1313{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001314 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001315 if (!input.IsValid())
1316 {
1317 return Fail("%s: Could not read input 0", __func__);
1318 }
1319
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001320 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001321 if (!output)
1322 {
1323 return Fail("%s: Could not read output 0", __func__);
1324 }
1325
1326 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1327 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1328
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001329 armnn::ResizeDescriptor desc;
1330 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001331 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001332
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001333 bool isSupported = false;
1334 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1335 IsResizeSupported,
1336 data.m_Backends,
1337 isSupported,
1338 inputInfo,
1339 outputInfo,
1340 desc);
1341 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001342 {
1343 return false;
1344 }
1345
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001346 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1347 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001348 {
1349 return Fail("%s: Operation has invalid inputs", __func__);
1350 }
1351
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001352 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001353
arovir01b0717b52018-09-05 17:03:25 +01001354 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001355
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001356 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1357 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001358
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001359 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001360
1361}
1362
1363} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001364} // namespace armnn_driver