blob: eb594427ba2013699a8f8e4dc88ab4c1cdb0e8e4 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +010011#include "OutputShapeUtils.hpp"
arovir015602b192018-10-04 16:15:02 +010012
arovir01b0717b52018-09-05 17:03:25 +010013namespace armnn_driver
14{
15namespace hal_1_0
16{
17
18bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
19{
20 switch (operation.type)
21 {
22 case V1_0::OperationType::ADD:
23 return ConvertAdd(operation, model, data);
24 case V1_0::OperationType::AVERAGE_POOL_2D:
25 return ConvertAveragePool2d(operation, model, data);
26 case V1_0::OperationType::CONCATENATION:
27 return ConvertConcatenation(operation, model, data);
28 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010029 return ValidateConv2dParameters(operation) &&
30 ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010031 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010032 return ValidateDepthwiseConv2dParameters(operation) &&
33 ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010034 case V1_0::OperationType::DEQUANTIZE:
35 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010036 case V1_0::OperationType::FLOOR:
37 return ConvertFloor(operation, model, data);
38 case V1_0::OperationType::FULLY_CONNECTED:
39 return ConvertFullyConnected(operation, model, data);
40 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
41 return ConvertLocalResponseNormalization(operation, model, data);
42 case V1_0::OperationType::LOGISTIC:
43 return ConvertLogistic(operation, model, data);
44 case V1_0::OperationType::LSTM:
45 return ConvertLstm(operation, model, data);
46 case V1_0::OperationType::L2_NORMALIZATION:
47 return ConvertL2Normalization(operation, model, data);
48 case V1_0::OperationType::L2_POOL_2D:
49 return ConvertL2Pool2d(operation, model, data);
50 case V1_0::OperationType::MAX_POOL_2D:
51 return ConvertMaxPool2d(operation, model, data);
52 case V1_0::OperationType::MUL:
53 return ConvertMul(operation, model, data);
54 case V1_0::OperationType::RELU:
55 return ConvertReLu(operation, model, data);
56 case V1_0::OperationType::RELU1:
57 return ConvertReLu1(operation, model, data);
58 case V1_0::OperationType::RELU6:
59 return ConvertReLu6(operation, model, data);
60 case V1_0::OperationType::SOFTMAX:
61 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010062 case V1_0::OperationType::SPACE_TO_DEPTH:
63 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010064 case V1_0::OperationType::TANH:
65 return ConvertTanH(operation, model, data);
66 case V1_0::OperationType::RESHAPE:
67 return ConvertReshape(operation, model, data);
68 case V1_0::OperationType::RESIZE_BILINEAR:
69 return ConvertResizeBilinear(operation, model, data);
70 default:
71 return Fail("%s: Operation type %s not supported in ArmnnDriver",
72 __func__, toString(operation.type).c_str());
73 }
74}
75
Mike Kellyb5fdf382019-06-11 16:35:25 +010076bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
77{
78 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
79 {
80 return Fail("%s: Unsupported number of operation inputs", __func__);
81 }
82 return true;
83}
84
85bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
86{
87 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
88 {
89 return Fail("%s: Unsupported number of operation inputs", __func__);
90 }
91 return true;
92}
93
arovir01b0717b52018-09-05 17:03:25 +010094bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
95{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010096 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
97 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010098
99 if (!input0.IsValid() || !input1.IsValid())
100 {
101 return Fail("%s: Operation has invalid inputs", __func__);
102 }
103
104 // The FuseActivation parameter is always the input index 2
105 // and it should be optional
106 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100107 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100108 {
109 return Fail("%s: Operation has invalid inputs", __func__);
110 }
111
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100112 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100113 if (!outputOperand)
114 {
115 return false;
116 }
117
118 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
119
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100120 if (!IsLayerSupportedForAnyBackend(__func__,
121 armnn::IsAdditionSupported,
122 data.m_Backends,
123 input0.GetTensorInfo(),
124 input1.GetTensorInfo(),
125 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100126 {
127 return false;
128 }
129
130 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
131 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
132
133 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
134 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
135
136 if (endLayer != nullptr)
137 {
138 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100139 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100140 }
141 else
142 {
143 return Fail("%s: ProcessActivation failed", __func__);
144 }
145}
146
147bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
148{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100149 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100150}
151
152bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
153{
154 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
155 if (operation.inputs.size() <= 1)
156 {
157 return Fail("%s: Operation has insufficient arguments", __func__);
158 }
159
160 // Get inputs and outputs
161 const std::size_t numInputTensors = operation.inputs.size() - 1;
162
163 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100164 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100165 {
166 return Fail("%s: Operation has invalid inputs", __func__);
167 }
168
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100169 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100170 if (!outputOperand)
171 {
172 return Fail("%s: Operation has no outputs", __func__);
173 }
174
175
176 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
177 armnn::TensorShape outputShape = outputInfo.GetShape();
178
179 //
180 // handle negative concat dims along the lines of tensorflow as described here:
181 // https://www.tensorflow.org/api_docs/python/tf/concat
182 // "negative axis refers to axis + rank(values)-th dimension"
183 //
184 if (concatDim < 0)
185 {
186 concatDim += outputShape.GetNumDimensions();
187 }
188
189 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
190 {
191 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
192 }
193
194 std::vector<LayerInputHandle> inputHandles;
195 std::vector<armnn::TensorShape> inputShapes;
196
197 inputHandles.reserve(numInputTensors);
198 inputShapes.reserve(numInputTensors);
199
200 bool inputsHaveBeenReshaped = false;
201 unsigned int tensorDimensionsAdded = 0;
202
203 for (uint32_t i = 0; i < numInputTensors; ++i)
204 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100205 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100206 if (!operand)
207 {
208 return Fail("%s: Operation has invalid inputs", __func__);
209 }
210
211 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100212 LayerInputHandle operandInputHandle =
213 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100214
215 if (operandShape.GetNumDimensions() == 0)
216 {
217 return Fail("%s: Operands with rank 0 are not supported", __func__);
218 }
219
220 if (RequiresReshape(operandShape))
221 {
222 inputsHaveBeenReshaped = true;
223
224 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
225
226 // Expand the tensor to three dimensions
227 if (operandShape.GetNumDimensions() == 2)
228 {
229 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
230 tensorDimensionsAdded = 1;
231 }
232 else
233 {
234 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
235 tensorDimensionsAdded = 2;
236 }
237
238 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
239 *data.m_Network,
240 operandInputHandle,
241 reshapeInfo
242 );
243
244 // Point to the reshape operation rather then the input operation
245 operandShape = reshapeInfo.GetShape();
246 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
247 }
248
249 inputShapes.emplace_back(operandShape);
250 inputHandles.emplace_back(operandInputHandle);
251
252 if (!inputHandles.back().IsValid())
253 {
254 return Fail("%s: Operation has invalid inputs", __func__);
255 }
256 }
257
258 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
259
260 if (inputsHaveBeenReshaped)
261 {
262 // Adjust the concatenation dimension by the amount of dimensions added (if any)
263 concatDim += tensorDimensionsAdded;
264
265 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
266 if (tensorDimensionsAdded == 1)
267 {
268 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
269 }
270 else if (tensorDimensionsAdded == 2)
271 {
narpra01f176d5a2018-11-18 20:17:48 +0000272 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100273 }
274 }
275
narpra01f176d5a2018-11-18 20:17:48 +0000276 // Check if permutations is required and get the pair of permutations required for the concatenation.
277 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100278 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
279 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
280
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100281 bool needPermute =
282 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100283
narpra01f176d5a2018-11-18 20:17:48 +0000284 if (needPermute)
285 {
286 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
287 }
288
arovir01b0717b52018-09-05 17:03:25 +0100289 outputInfo.SetShape(outputShape);
290
291 // this is no-op for identity swizzles, otherwise it replaces both
292 // the handles and shapes with the swizzled layer output handles and shapes
293 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
294
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100295 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
296 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000297
arovir01b0717b52018-09-05 17:03:25 +0100298 try
299 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100300 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000301 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100302 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100303 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100304 }
305 catch (const armnn::Exception& error)
306 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100307 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100308 }
309
310 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000311 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100312 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
313 {
314 return Fail("%s: Error validating the output shape for concat", __func__);
315 }
316
317 std::vector<const armnn::TensorInfo*> inputTensorInfos;
318 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
319 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100320 if (!IsLayerSupportedForAnyBackend(__func__,
Jim Flynn073d7a32019-05-13 13:52:56 +0100321 armnn::IsConcatSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100322 data.m_Backends,
323 inputTensorInfos,
324 outputInfo,
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100325 concatDescriptor))
arovir01b0717b52018-09-05 17:03:25 +0100326 {
327 return false;
328 }
329
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100330 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100331 assert(layer != nullptr);
332 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
333
334 // Connect inputs to the layer
335 const int numInputSlots = layer->GetNumInputSlots();
336 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
337 for (int i = 0; i < numInputSlots; ++i)
338 {
339 // connect the input directly to the merge (concat) layer
340 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
341 }
342
narpra01f176d5a2018-11-18 20:17:48 +0000343 if (needPermute)
344 {
345 // Add permutation layer and connect the output to it, the permutation becomes the output layer
346 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
347 layer->GetOutputSlot(0),
348 permutationPair.second);
349 layer = &deswizzleLayer;
350 }
arovir01b0717b52018-09-05 17:03:25 +0100351
352 if (inputsHaveBeenReshaped)
353 {
354 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
355
356 // Undo the reshape knowing the amount of dimensions added
357 if (tensorDimensionsAdded == 1)
358 {
359 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
360 afterConcatInfo.GetShape()[2] }));
361 }
362 else if (tensorDimensionsAdded == 2)
363 {
narpra01f176d5a2018-11-18 20:17:48 +0000364 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100365 }
366
367 layer = &AddReshapeLayer(
368 *data.m_Network,
369 layer->GetOutputSlot(0),
370 afterConcatInfo
371 );
372 }
373
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100374 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100375}
376
David Monahanacf479a2019-05-29 14:27:04 +0100377bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
378{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100379 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100380
381 if (!input.IsValid())
382 {
383 return Fail("%s: Operation has invalid input", __func__);
384 }
385
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100386 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100387 if (!outputOperand)
388 {
389 return Fail("%s: Operation has invalid outputs", __func__);
390 }
391
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100392 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100393 if (IsDynamicOutput(outputInfo))
394 {
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100395 ALOGD("Output shape not set, will infer from input");
396 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100397 }
398
David Monahanacf479a2019-05-29 14:27:04 +0100399 if (!IsLayerSupportedForAnyBackend(__func__,
400 armnn::IsDequantizeSupported,
401 data.m_Backends,
402 input.GetTensorInfo(),
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100403 outputInfo))
David Monahanacf479a2019-05-29 14:27:04 +0100404 {
405 return false;
406 }
407
408 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
409 assert(layer != nullptr);
410 input.Connect(layer->GetInputSlot(0));
411
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100412 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
413 0,
414 *layer,
415 model,
416 data,
417 armnn::Optional<armnn::TensorInfo>(outputInfo));
David Monahanacf479a2019-05-29 14:27:04 +0100418}
419
arovir01b0717b52018-09-05 17:03:25 +0100420bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
421{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100422 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100423 if (!input.IsValid())
424 {
425 return Fail("%s: Operation has invalid inputs", __func__);
426 }
427
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100428 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100429 if (!outputOperand)
430 {
431 return Fail("%s: Operation has invalid outputs", __func__);
432 }
433
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100434 if (!IsLayerSupportedForAnyBackend(__func__,
435 armnn::IsFloorSupported,
436 data.m_Backends,
437 input.GetTensorInfo(),
438 GetTensorInfoForOperand(*outputOperand)))
arovir01b0717b52018-09-05 17:03:25 +0100439 {
440 return false;
441 }
442
443 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
444 assert(layer != nullptr);
445 input.Connect(layer->GetInputSlot(0));
446
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100447 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100448}
449
450bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
451{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100452 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100453 if (!input.IsValid())
454 {
455 return Fail("%s: Operation has invalid inputs", __func__);
456 }
457
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100458 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100459 if (!output)
460 {
461 return Fail("%s: Could not read output 0", __func__);
462 }
463
464 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
465 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
466
467 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100468 ConstTensorPin weightsPin =
469 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
470 ConstTensorPin biasPin =
471 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100472
473 if (!weightsPin.IsValid() || !biasPin.IsValid())
474 {
475 return Fail("%s: Operation has invalid inputs", __func__);
476 }
477
478 armnn::ConstTensor weights = weightsPin.GetConstTensor();
479 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100480 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100481
482 try
arovir01b0717b52018-09-05 17:03:25 +0100483 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100484 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
485 } catch (const std::exception &e) {
486 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100487 }
488
489 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
490 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
491
492 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100493 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100494 {
495 return Fail("%s: Operation has invalid inputs", __func__);
496 }
497
498 armnn::FullyConnectedDescriptor desc;
499 desc.m_TransposeWeightMatrix = true;
500 desc.m_BiasEnabled = true;
501
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100502 if (!IsLayerSupportedForAnyBackend(__func__,
503 armnn::IsFullyConnectedSupported,
504 data.m_Backends,
505 reshapedInfo,
506 outputInfo,
507 weights.GetInfo(),
508 bias.GetInfo(),
509 desc))
arovir01b0717b52018-09-05 17:03:25 +0100510 {
511 return false;
512 }
513
Matteo Martincighba01f372019-05-14 13:28:21 +0100514 armnn::IConnectableLayer* startLayer =
515 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100516 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
517
518 if (endLayer != nullptr)
519 {
520 if (inputInfo.GetNumDimensions() > 2U)
521 {
522 armnn::ReshapeDescriptor reshapeDescriptor;
523 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
524
525 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
526 assert(reshapeLayer != nullptr);
527 input.Connect(reshapeLayer->GetInputSlot(0));
528 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
529 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
530 }
531 else
532 {
533 input.Connect(startLayer->GetInputSlot(0));
534 }
535
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100536 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100537 }
538 else
539 {
540 return Fail("%s: ProcessActivation failed", __func__);
541 }
542}
543
544bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
545 const Model& model,
546 ConversionData& data)
547{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100548 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100549 if (!input.IsValid())
550 {
551 return Fail("%s: Operation has invalid inputs", __func__);
552 }
553
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100554 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100555 if (!output)
556 {
557 return Fail("%s: Could not read output 0", __func__);
558 }
559
narpra012fb804a2018-10-22 14:52:32 +0100560 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100561 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
562
arovir01b0717b52018-09-05 17:03:25 +0100563 armnn::NormalizationDescriptor descriptor;
564
narpra012fb804a2018-10-22 14:52:32 +0100565 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100566 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100567 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100568
569 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100570 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
571 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
572 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
573 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100574 {
575 return Fail("%s: Operation has invalid inputs", __func__);
576 }
577
578 // ArmNN expects normSize to be the full size of the normalization
579 // window rather than the radius as in AndroidNN.
580 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
581
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100582 if (!IsLayerSupportedForAnyBackend(__func__,
583 armnn::IsNormalizationSupported,
584 data.m_Backends,
585 inputInfo,
586 outputInfo,
587 descriptor))
arovir01b0717b52018-09-05 17:03:25 +0100588 {
589 return false;
590 }
591
592
593 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
594 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100595 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100596
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100597 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100598}
599
600bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
601{
602 armnn::ActivationDescriptor desc;
603 desc.m_Function = armnn::ActivationFunction::Sigmoid;
604
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100605 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100606}
607
608bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
609{
610 // Inputs:
611 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
612 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100613 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100614 if (!input.IsValid())
615 {
616 return Fail("%s: Could not read input 0: input", __func__);
617 }
618 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100619 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100620 if (!outputStateIn.IsValid())
621 {
622 return Fail("%s: Could not read input 18: outputStateIn", __func__);
623 }
624 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100625 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100626 if (!cellStateIn.IsValid())
627 {
628 return Fail("%s: Could not read input 19: cellStateIn", __func__);
629 }
630
631 // Get the mandatory input tensors:
632 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
633 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100634 const ConstTensorPin inputToForgetWeightsPin =
635 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
636 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
637 // [num_units, input_size].
638 const ConstTensorPin inputToCellWeightsPin =
639 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100640 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
641 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100642 const ConstTensorPin inputToOutputWeightsPin =
643 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100644 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
645 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100646 const ConstTensorPin recurrentToForgetWeightsPin =
647 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100648 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
649 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100650 const ConstTensorPin recurrentToCellWeightsPin =
651 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100652 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
653 // [num_units, output_size].
654 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100655 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100656 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100657 const ConstTensorPin forgetGateBiasPin =
658 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100659 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100660 const ConstTensorPin cellBiasPin =
661 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100662 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100663 const ConstTensorPin outputGateBiasPin =
664 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100665
666 if (!inputToForgetWeightsPin.IsValid() ||
667 !inputToCellWeightsPin.IsValid() ||
668 !inputToOutputWeightsPin.IsValid() ||
669 !recurrentToForgetWeightsPin.IsValid() ||
670 !recurrentToCellWeightsPin.IsValid() ||
671 !recurrentToOutputWeightsPin.IsValid() ||
672 !forgetGateBiasPin.IsValid() ||
673 !cellBiasPin.IsValid() ||
674 !outputGateBiasPin.IsValid())
675 {
676 return Fail("%s: Operation has invalid tensor inputs", __func__);
677 }
678
679 // Get the optional input tensors:
680 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
681 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100682 const ConstTensorPin inputToInputWeightsPin =
683 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
684 1,
685 model,
686 data,
687 g_DontPermute,
688 nullptr,
689 true);
690
arovir01b0717b52018-09-05 17:03:25 +0100691 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
692 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
693 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100694 const ConstTensorPin recurrentToInputWeightsPin =
695 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
696 5,
697 model,
698 data,
699 g_DontPermute,
700 nullptr,
701 true);
702
arovir01b0717b52018-09-05 17:03:25 +0100703 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100704 const ConstTensorPin cellToInputWeightsPin =
705 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
706 9,
707 model,
708 data,
709 g_DontPermute,
710 nullptr,
711 true);
712
arovir01b0717b52018-09-05 17:03:25 +0100713 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100714 const ConstTensorPin cellToForgetWeightsPin =
715 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
716 10,
717 model,
718 data,
719 g_DontPermute,
720 nullptr,
721 true);
722
arovir01b0717b52018-09-05 17:03:25 +0100723 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100724 const ConstTensorPin cellToOutputWeightsPin =
725 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
726 11,
727 model,
728 data,
729 g_DontPermute,
730 nullptr,
731 true);
732
arovir01b0717b52018-09-05 17:03:25 +0100733 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100734 const ConstTensorPin inputGateBiasPin =
735 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
736 12,
737 model,
738 data,
739 g_DontPermute,
740 nullptr,
741 true);
742
arovir01b0717b52018-09-05 17:03:25 +0100743 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
744 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100745 const ConstTensorPin projectionWeightsPin =
746 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
747 16,
748 model,
749 data,
750 g_DontPermute,
751 nullptr,
752 true);
753
arovir01b0717b52018-09-05 17:03:25 +0100754 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100755 const ConstTensorPin projectionBiasPin =
756 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
757 17,
758 model,
759 data,
760 g_DontPermute,
761 nullptr,
762 true);
arovir01b0717b52018-09-05 17:03:25 +0100763
764 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
765 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
766 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
767 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
768 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
769 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
770 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
771 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
772 {
773 return Fail("%s: Operation has invalid tensor inputs", __func__);
774 }
775
776 // Get the mandatory input scalars (actually 1-D tensors of size 1):
777 // 20: The activation function: A value indicating the activation function:
778 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
779 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
780 // If set to 0.0 then clipping is disabled.
781 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
782 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
783 ActivationFn activation;
784 float cellClip;
785 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100786 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
787 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
788 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100789 {
790 return Fail("%s: Operation has invalid scalar inputs", __func__);
791 }
792
793 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100794 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
795 // with CIFG, or [batch_size, num_units * 3] without CIFG.
796 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100797 if (!scratchBuffer)
798 {
799 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
800 }
801 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100802 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100803 if (!outputStateOut)
804 {
805 return Fail("%s: Could not read output 1: outputStateOut", __func__);
806 }
807 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100808 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100809 if (!cellStateOut)
810 {
811 return Fail("%s: Could not read output 2: cellStateOut", __func__);
812 }
813 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
814 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100815 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100816 if (!output)
817 {
818 return Fail("%s: Could not read output 3: output", __func__);
819 }
820
821 // set the params structure for the AddLstmLayer call
822 armnn::LstmInputParams params;
823 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
824 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
825 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
826 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
827 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
828 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
829 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
830 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
831 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
832 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
833 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
834 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
835 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
836 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
837 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
838 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
839 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
840
841 // set the layer descriptor
842 armnn::LstmDescriptor desc;
843 desc.m_ActivationFunc = activation;
844 desc.m_ClippingThresCell = cellClip;
845 desc.m_ClippingThresProj = projClip;
846 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
847 params.m_RecurrentToInputWeights == nullptr ||
848 params.m_InputGateBias == nullptr);
849 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
850 params.m_CellToOutputWeights != nullptr);
851 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
852
853 // validate the optional input groups
854 if (desc.m_CifgEnabled &&
855 (params.m_InputToInputWeights != nullptr ||
856 params.m_RecurrentToInputWeights != nullptr ||
857 params.m_InputGateBias != nullptr))
858 {
859 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
860 " and input gate bias must be provided", __func__);
861 }
862
863 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
864 {
865 return Fail("%s: projection bias should not be provided without projection weights", __func__);
866 }
867
868 if (desc.m_PeepholeEnabled &&
869 (params.m_CellToForgetWeights == nullptr ||
870 params.m_CellToOutputWeights == nullptr ||
871 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
872 {
873 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
874 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
875 }
876
877 // Check if the layer is supported
878 // Inputs
879 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
880 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
881 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
882
883 // Outputs
884 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
885 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
886 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
887 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
888
889 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100890 armnn::LstmInputParamsInfo paramsInfo;
891 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
892 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
893 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
894 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
895 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
896 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
897 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
898 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
899 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100900
901 if(!desc.m_CifgEnabled)
902 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100903 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
904 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100905 if (params.m_CellToInputWeights != nullptr)
906 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100907 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100908 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100909 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100910 }
911
912 if(desc.m_ProjectionEnabled)
913 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100914 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100915 if (params.m_ProjectionBias != nullptr)
916 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100917 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100918 }
919 }
920
921 if(desc.m_PeepholeEnabled)
922 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100923 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
924 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100925 }
926
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100927 if (!IsLayerSupportedForAnyBackend(__func__,
928 armnn::IsLstmSupported,
929 data.m_Backends,
930 inputInfo,
931 outputStateInInfo,
932 cellStateInInfo,
933 scratchBufferInfo,
934 outputStateOutInfo,
935 cellStateOutInfo,
936 outputInfo,
937 desc,
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100938 paramsInfo))
arovir01b0717b52018-09-05 17:03:25 +0100939 {
940 return false;
941 }
942
943 // Add the layer
944 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
945
946 input.Connect(layer->GetInputSlot(0));
947 outputStateIn.Connect(layer->GetInputSlot(1));
948 cellStateIn.Connect(layer->GetInputSlot(2));
949
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100950 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
951 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
952 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
953 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100954}
955
956bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
957{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100958 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100959 if (!input.IsValid())
960 {
961 return Fail("%s: Operation has invalid inputs", __func__);
962 }
963
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100964 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100965 if (!output)
966 {
967 return Fail("%s: Could not read output 0", __func__);
968 }
969
970 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +0100971 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
arovir01b0717b52018-09-05 17:03:25 +0100972
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100973 if (IsDynamicOutput(outputInfo))
974 {
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +0100975 ALOGD("Output shape not set, will infer from inputs");
976 outputInfo.SetShape(inputInfo.GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100977 }
978
Matteo Martincigh58f71092018-09-25 15:58:52 +0100979 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100980 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +0100981
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100982 if (!IsLayerSupportedForAnyBackend(__func__,
983 armnn::IsL2NormalizationSupported,
984 data.m_Backends,
985 inputInfo,
986 outputInfo,
987 desc))
arovir01b0717b52018-09-05 17:03:25 +0100988 {
989 return false;
990 }
991
Matteo Martincigh58f71092018-09-25 15:58:52 +0100992 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +0100993 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100994 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100995
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +0100996 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
997 0,
998 *layer,
999 model,
1000 data,
1001 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001002}
1003
1004bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1005{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001006 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001007}
1008
1009bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1010{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001011 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001012}
1013
1014bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1015{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001016 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1017 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001018
1019 if (!input0.IsValid() || !input1.IsValid())
1020 {
1021 return Fail("%s: Operation has invalid inputs", __func__);
1022 }
1023
1024 // The FuseActivation parameter is always the input index 2
1025 // and it should be optional
1026 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001027 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001028 {
1029 return Fail("%s: Operation has invalid inputs", __func__);
1030 }
1031
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001032 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001033
1034 if (outputOperand == nullptr)
1035 {
1036 return false;
1037 }
1038
1039 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1040
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001041 if (!IsLayerSupportedForAnyBackend(__func__,
1042 armnn::IsMultiplicationSupported,
1043 data.m_Backends,
1044 input0.GetTensorInfo(),
1045 input1.GetTensorInfo(),
1046 outInfo))
arovir01b0717b52018-09-05 17:03:25 +01001047 {
1048 return false;
1049 }
1050
1051 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1052 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1053
1054 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1055 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1056
1057 if (endLayer != nullptr)
1058 {
1059 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001060 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001061 }
1062 else
1063 {
1064 return Fail("%s: ProcessActivation failed", __func__);
1065 }
1066}
1067
1068bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1069{
1070 armnn::ActivationDescriptor desc;
1071 desc.m_Function = armnn::ActivationFunction::ReLu;
1072
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001073 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001074}
1075
1076bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1077{
1078 armnn::ActivationDescriptor desc;
1079 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1080 desc.m_A = 1.0f;
1081 desc.m_B = -1.0f;
1082
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001083 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001084}
1085
1086bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1087{
1088 armnn::ActivationDescriptor desc;
1089 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1090 desc.m_A = 6.0f;
1091
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001092 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001093}
1094
1095bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1096{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001097 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001098 if (!input.IsValid())
1099 {
1100 return Fail("%s: Operation has invalid inputs", __func__);
1101 }
1102
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001103 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001104 if (!outputOperand)
1105 {
1106 return Fail("%s: Operation has no outputs", __func__);
1107 }
1108
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001109 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001110 if (IsDynamicOutput(outputInfo))
1111 {
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001112 ALOGD("Output shape not set, will infer from input");
1113 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001114 }
arovir01b0717b52018-09-05 17:03:25 +01001115
1116 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001117 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001118 {
1119 return Fail("%s: Operation has invalid inputs", __func__);
1120 }
1121
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001122 if (!IsLayerSupportedForAnyBackend(__func__,
1123 armnn::IsSoftmaxSupported,
1124 data.m_Backends,
1125 input.GetTensorInfo(),
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001126 outputInfo,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001127 desc))
arovir01b0717b52018-09-05 17:03:25 +01001128 {
1129 return false;
1130 }
1131
1132 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1133 assert(layer != nullptr);
1134 input.Connect(layer->GetInputSlot(0));
1135
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001136 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1137 0,
1138 *layer,
1139 model,
1140 data,
1141 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001142}
1143
Keith Davisa6bc52f2019-06-26 09:39:49 +01001144bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1145{
1146 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1147
1148 if (!input.IsValid() )
1149 {
1150 return Fail("%s: Operation has invalid inputs", __func__);
1151 }
1152
1153 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1154 unsigned int rank = inputInfo.GetNumDimensions();
1155
1156 if (rank != 4)
1157 {
1158 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1159 }
1160
1161 armnn::SpaceToDepthDescriptor desc;
1162 bool dataLayoutCheck;
1163
1164 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1165
1166 if (desc.m_BlockSize <= 1)
1167 {
1168 return Fail("%s: Block size must be at least 1 in all dimensions");
1169 }
1170
1171 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1172 if (!output)
1173 {
1174 return Fail("%s: Could not read output 0", __func__);
1175 }
1176
1177 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1178 if (!IsLayerSupportedForAnyBackend(__func__,
1179 armnn::IsSpaceToDepthSupported,
1180 data.m_Backends,
1181 inputInfo,
1182 outputInfo,
1183 desc))
1184 {
1185 return false;
1186 }
1187
1188 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1189 assert(layer != nullptr);
1190 input.Connect(layer->GetInputSlot(0));
1191
1192 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1193}
1194
arovir01b0717b52018-09-05 17:03:25 +01001195bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1196{
1197 armnn::ActivationDescriptor desc;
1198 desc.m_Function = armnn::ActivationFunction::TanH;
1199 desc.m_A = 1.0f; // android nn does not support tanH parameters
1200 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1201
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001202 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001203}
1204
1205bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1206{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001207 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1208 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1209 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001210
1211 if (inputOperand == nullptr
1212 || requestedShapeOperand == nullptr
1213 || outputOperand == nullptr)
1214 {
1215 return Fail("%s: Operation has invalid inputs", __func__);
1216 }
1217
1218
1219 if (requestedShapeOperand->dimensions.size() != 1)
1220 {
1221 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1222 __func__, requestedShapeOperand->dimensions.size());
1223 }
1224
1225 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001226 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001227 {
1228 return Fail("%s: Could not read values of input 1", __func__);
1229 }
1230
1231 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1232
1233 Shape requestedShape;
1234 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1235 // function that resolves these values into a fully specified tensor shape.
1236 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1237 {
1238 return Fail("%s: Failed to resolve the requested shape", __func__);
1239 }
1240
1241 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1242 if (!SameShape(requestedShape, outputOperandShape))
1243 {
1244 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1245 }
1246
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001247 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001248 if (!input.IsValid())
1249 {
1250 return Fail("%s: Could not read input 0", __func__);
1251 }
1252
arovir01b0717b52018-09-05 17:03:25 +01001253 armnn::ReshapeDescriptor reshapeDescriptor;
1254 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1255 requestedShape.dimensions.data());
1256
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001257 if (!IsLayerSupportedForAnyBackend(__func__,
1258 armnn::IsReshapeSupported,
1259 data.m_Backends,
1260 input.GetTensorInfo(),
1261 reshapeDescriptor))
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001262 {
1263 return false;
1264 }
1265
arovir01b0717b52018-09-05 17:03:25 +01001266 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1267 assert(layer != nullptr);
1268 input.Connect(layer->GetInputSlot(0));
1269
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001270 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001271}
1272
1273bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1274{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001275 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001276 if (!input.IsValid())
1277 {
1278 return Fail("%s: Could not read input 0", __func__);
1279 }
1280
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001281 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001282 if (!output)
1283 {
1284 return Fail("%s: Could not read output 0", __func__);
1285 }
1286
1287 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1288 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1289
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001290 armnn::ResizeDescriptor desc;
1291 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001292 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001293
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001294 if (!IsLayerSupportedForAnyBackend(__func__,
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001295 armnn::IsResizeSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001296 data.m_Backends,
1297 inputInfo,
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001298 outputInfo,
1299 desc))
arovir01b0717b52018-09-05 17:03:25 +01001300 {
1301 return false;
1302 }
1303
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001304 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1305 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001306 {
1307 return Fail("%s: Operation has invalid inputs", __func__);
1308 }
1309
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001310 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001311
arovir01b0717b52018-09-05 17:03:25 +01001312 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001313
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001314 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1315 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001316
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001317 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001318
1319}
1320
1321} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001322} // namespace armnn_driver