blob: cd009011961a2db573bd656dcf5596541d1c79ab [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +010011#include "OutputShapeUtils.hpp"
arovir015602b192018-10-04 16:15:02 +010012
arovir01b0717b52018-09-05 17:03:25 +010013namespace armnn_driver
14{
15namespace hal_1_0
16{
17
18bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
19{
20 switch (operation.type)
21 {
22 case V1_0::OperationType::ADD:
23 return ConvertAdd(operation, model, data);
24 case V1_0::OperationType::AVERAGE_POOL_2D:
25 return ConvertAveragePool2d(operation, model, data);
26 case V1_0::OperationType::CONCATENATION:
27 return ConvertConcatenation(operation, model, data);
28 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010029 return ValidateConv2dParameters(operation) &&
30 ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010031 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010032 return ValidateDepthwiseConv2dParameters(operation) &&
33 ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010034 case V1_0::OperationType::DEQUANTIZE:
35 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010036 case V1_0::OperationType::FLOOR:
37 return ConvertFloor(operation, model, data);
38 case V1_0::OperationType::FULLY_CONNECTED:
39 return ConvertFullyConnected(operation, model, data);
40 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
41 return ConvertLocalResponseNormalization(operation, model, data);
42 case V1_0::OperationType::LOGISTIC:
43 return ConvertLogistic(operation, model, data);
44 case V1_0::OperationType::LSTM:
45 return ConvertLstm(operation, model, data);
46 case V1_0::OperationType::L2_NORMALIZATION:
47 return ConvertL2Normalization(operation, model, data);
48 case V1_0::OperationType::L2_POOL_2D:
49 return ConvertL2Pool2d(operation, model, data);
50 case V1_0::OperationType::MAX_POOL_2D:
51 return ConvertMaxPool2d(operation, model, data);
52 case V1_0::OperationType::MUL:
53 return ConvertMul(operation, model, data);
54 case V1_0::OperationType::RELU:
55 return ConvertReLu(operation, model, data);
56 case V1_0::OperationType::RELU1:
57 return ConvertReLu1(operation, model, data);
58 case V1_0::OperationType::RELU6:
59 return ConvertReLu6(operation, model, data);
60 case V1_0::OperationType::SOFTMAX:
61 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010062 case V1_0::OperationType::SPACE_TO_DEPTH:
63 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010064 case V1_0::OperationType::TANH:
65 return ConvertTanH(operation, model, data);
66 case V1_0::OperationType::RESHAPE:
67 return ConvertReshape(operation, model, data);
68 case V1_0::OperationType::RESIZE_BILINEAR:
69 return ConvertResizeBilinear(operation, model, data);
70 default:
71 return Fail("%s: Operation type %s not supported in ArmnnDriver",
72 __func__, toString(operation.type).c_str());
73 }
74}
75
Mike Kellyb5fdf382019-06-11 16:35:25 +010076bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
77{
78 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
79 {
80 return Fail("%s: Unsupported number of operation inputs", __func__);
81 }
82 return true;
83}
84
85bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
86{
87 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
88 {
89 return Fail("%s: Unsupported number of operation inputs", __func__);
90 }
91 return true;
92}
93
arovir01b0717b52018-09-05 17:03:25 +010094bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
95{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010096 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
97 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010098
99 if (!input0.IsValid() || !input1.IsValid())
100 {
101 return Fail("%s: Operation has invalid inputs", __func__);
102 }
103
104 // The FuseActivation parameter is always the input index 2
105 // and it should be optional
106 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100107 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100108 {
109 return Fail("%s: Operation has invalid inputs", __func__);
110 }
111
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100112 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100113 if (!outputOperand)
114 {
115 return false;
116 }
117
118 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
119
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100120 if (!IsLayerSupportedForAnyBackend(__func__,
121 armnn::IsAdditionSupported,
122 data.m_Backends,
123 input0.GetTensorInfo(),
124 input1.GetTensorInfo(),
125 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100126 {
127 return false;
128 }
129
130 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
131 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
132
133 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
134 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
135
136 if (endLayer != nullptr)
137 {
138 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100139 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100140 }
141 else
142 {
143 return Fail("%s: ProcessActivation failed", __func__);
144 }
145}
146
147bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
148{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100149 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100150}
151
152bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
153{
154 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
155 if (operation.inputs.size() <= 1)
156 {
157 return Fail("%s: Operation has insufficient arguments", __func__);
158 }
159
160 // Get inputs and outputs
161 const std::size_t numInputTensors = operation.inputs.size() - 1;
162
163 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100164 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100165 {
166 return Fail("%s: Operation has invalid inputs", __func__);
167 }
168
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100169 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100170 if (!outputOperand)
171 {
172 return Fail("%s: Operation has no outputs", __func__);
173 }
174
175
176 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
177 armnn::TensorShape outputShape = outputInfo.GetShape();
178
179 //
180 // handle negative concat dims along the lines of tensorflow as described here:
181 // https://www.tensorflow.org/api_docs/python/tf/concat
182 // "negative axis refers to axis + rank(values)-th dimension"
183 //
184 if (concatDim < 0)
185 {
186 concatDim += outputShape.GetNumDimensions();
187 }
188
189 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
190 {
191 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
192 }
193
194 std::vector<LayerInputHandle> inputHandles;
195 std::vector<armnn::TensorShape> inputShapes;
196
197 inputHandles.reserve(numInputTensors);
198 inputShapes.reserve(numInputTensors);
199
200 bool inputsHaveBeenReshaped = false;
201 unsigned int tensorDimensionsAdded = 0;
202
203 for (uint32_t i = 0; i < numInputTensors; ++i)
204 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100205 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100206 if (!operand)
207 {
208 return Fail("%s: Operation has invalid inputs", __func__);
209 }
210
211 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100212 LayerInputHandle operandInputHandle =
213 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100214
215 if (operandShape.GetNumDimensions() == 0)
216 {
217 return Fail("%s: Operands with rank 0 are not supported", __func__);
218 }
219
220 if (RequiresReshape(operandShape))
221 {
222 inputsHaveBeenReshaped = true;
223
224 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
225
226 // Expand the tensor to three dimensions
227 if (operandShape.GetNumDimensions() == 2)
228 {
229 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
230 tensorDimensionsAdded = 1;
231 }
232 else
233 {
234 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
235 tensorDimensionsAdded = 2;
236 }
237
238 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
239 *data.m_Network,
240 operandInputHandle,
241 reshapeInfo
242 );
243
244 // Point to the reshape operation rather then the input operation
245 operandShape = reshapeInfo.GetShape();
246 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
247 }
248
249 inputShapes.emplace_back(operandShape);
250 inputHandles.emplace_back(operandInputHandle);
251
252 if (!inputHandles.back().IsValid())
253 {
254 return Fail("%s: Operation has invalid inputs", __func__);
255 }
256 }
257
258 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
259
260 if (inputsHaveBeenReshaped)
261 {
262 // Adjust the concatenation dimension by the amount of dimensions added (if any)
263 concatDim += tensorDimensionsAdded;
264
265 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
266 if (tensorDimensionsAdded == 1)
267 {
268 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
269 }
270 else if (tensorDimensionsAdded == 2)
271 {
narpra01f176d5a2018-11-18 20:17:48 +0000272 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100273 }
274 }
275
narpra01f176d5a2018-11-18 20:17:48 +0000276 // Check if permutations is required and get the pair of permutations required for the concatenation.
277 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100278 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
279 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
280
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100281 bool needPermute =
282 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100283
narpra01f176d5a2018-11-18 20:17:48 +0000284 if (needPermute)
285 {
286 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
287 }
288
arovir01b0717b52018-09-05 17:03:25 +0100289 outputInfo.SetShape(outputShape);
290
291 // this is no-op for identity swizzles, otherwise it replaces both
292 // the handles and shapes with the swizzled layer output handles and shapes
293 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
294
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100295 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
296 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000297
arovir01b0717b52018-09-05 17:03:25 +0100298 try
299 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100300 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000301 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100302 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100303 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100304 }
305 catch (const armnn::Exception& error)
306 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100307 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100308 }
309
310 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000311 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100312 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
313 {
314 return Fail("%s: Error validating the output shape for concat", __func__);
315 }
316
317 std::vector<const armnn::TensorInfo*> inputTensorInfos;
318 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
319 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100320 if (!IsLayerSupportedForAnyBackend(__func__,
Jim Flynn073d7a32019-05-13 13:52:56 +0100321 armnn::IsConcatSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100322 data.m_Backends,
323 inputTensorInfos,
324 outputInfo,
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100325 concatDescriptor))
arovir01b0717b52018-09-05 17:03:25 +0100326 {
327 return false;
328 }
329
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100330 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100331 assert(layer != nullptr);
332 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
333
334 // Connect inputs to the layer
335 const int numInputSlots = layer->GetNumInputSlots();
336 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
337 for (int i = 0; i < numInputSlots; ++i)
338 {
339 // connect the input directly to the merge (concat) layer
340 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
341 }
342
narpra01f176d5a2018-11-18 20:17:48 +0000343 if (needPermute)
344 {
345 // Add permutation layer and connect the output to it, the permutation becomes the output layer
346 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
347 layer->GetOutputSlot(0),
348 permutationPair.second);
349 layer = &deswizzleLayer;
350 }
arovir01b0717b52018-09-05 17:03:25 +0100351
352 if (inputsHaveBeenReshaped)
353 {
354 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
355
356 // Undo the reshape knowing the amount of dimensions added
357 if (tensorDimensionsAdded == 1)
358 {
359 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
360 afterConcatInfo.GetShape()[2] }));
361 }
362 else if (tensorDimensionsAdded == 2)
363 {
narpra01f176d5a2018-11-18 20:17:48 +0000364 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100365 }
366
367 layer = &AddReshapeLayer(
368 *data.m_Network,
369 layer->GetOutputSlot(0),
370 afterConcatInfo
371 );
372 }
373
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100374 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100375}
376
David Monahanacf479a2019-05-29 14:27:04 +0100377bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
378{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100379 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100380
381 if (!input.IsValid())
382 {
383 return Fail("%s: Operation has invalid input", __func__);
384 }
385
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100386 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100387 if (!outputOperand)
388 {
389 return Fail("%s: Operation has invalid outputs", __func__);
390 }
391
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100392 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100393 if (IsDynamicOutput(outputInfo))
394 {
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100395 ALOGD("Output shape not set, will infer from input");
396 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100397 }
398
David Monahanacf479a2019-05-29 14:27:04 +0100399 if (!IsLayerSupportedForAnyBackend(__func__,
400 armnn::IsDequantizeSupported,
401 data.m_Backends,
402 input.GetTensorInfo(),
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100403 outputInfo))
David Monahanacf479a2019-05-29 14:27:04 +0100404 {
405 return false;
406 }
407
408 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
409 assert(layer != nullptr);
410 input.Connect(layer->GetInputSlot(0));
411
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100412 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
413 0,
414 *layer,
415 model,
416 data,
417 armnn::Optional<armnn::TensorInfo>(outputInfo));
David Monahanacf479a2019-05-29 14:27:04 +0100418}
419
arovir01b0717b52018-09-05 17:03:25 +0100420bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
421{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100422 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100423 if (!input.IsValid())
424 {
425 return Fail("%s: Operation has invalid inputs", __func__);
426 }
427
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100428 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100429 if (!outputOperand)
430 {
431 return Fail("%s: Operation has invalid outputs", __func__);
432 }
433
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100434 if (!IsLayerSupportedForAnyBackend(__func__,
435 armnn::IsFloorSupported,
436 data.m_Backends,
437 input.GetTensorInfo(),
438 GetTensorInfoForOperand(*outputOperand)))
arovir01b0717b52018-09-05 17:03:25 +0100439 {
440 return false;
441 }
442
443 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
444 assert(layer != nullptr);
445 input.Connect(layer->GetInputSlot(0));
446
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100447 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100448}
449
450bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
451{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100452 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100453 if (!input.IsValid())
454 {
455 return Fail("%s: Operation has invalid inputs", __func__);
456 }
457
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100458 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100459 if (!output)
460 {
461 return Fail("%s: Could not read output 0", __func__);
462 }
463
464 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
465 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
466
467 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100468 ConstTensorPin weightsPin =
469 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
470 ConstTensorPin biasPin =
471 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100472
473 if (!weightsPin.IsValid() || !biasPin.IsValid())
474 {
475 return Fail("%s: Operation has invalid inputs", __func__);
476 }
477
478 armnn::ConstTensor weights = weightsPin.GetConstTensor();
479 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100480 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100481
482 try
arovir01b0717b52018-09-05 17:03:25 +0100483 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100484 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
485 } catch (const std::exception &e) {
486 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100487 }
488
489 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
490 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
491
492 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100493 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100494 {
495 return Fail("%s: Operation has invalid inputs", __func__);
496 }
497
498 armnn::FullyConnectedDescriptor desc;
499 desc.m_TransposeWeightMatrix = true;
500 desc.m_BiasEnabled = true;
501
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100502 if (!IsLayerSupportedForAnyBackend(__func__,
503 armnn::IsFullyConnectedSupported,
504 data.m_Backends,
505 reshapedInfo,
506 outputInfo,
507 weights.GetInfo(),
508 bias.GetInfo(),
509 desc))
arovir01b0717b52018-09-05 17:03:25 +0100510 {
511 return false;
512 }
513
Matteo Martincighba01f372019-05-14 13:28:21 +0100514 armnn::IConnectableLayer* startLayer =
515 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100516 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
517
518 if (endLayer != nullptr)
519 {
520 if (inputInfo.GetNumDimensions() > 2U)
521 {
522 armnn::ReshapeDescriptor reshapeDescriptor;
523 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
524
525 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
526 assert(reshapeLayer != nullptr);
527 input.Connect(reshapeLayer->GetInputSlot(0));
528 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
529 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
530 }
531 else
532 {
533 input.Connect(startLayer->GetInputSlot(0));
534 }
535
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100536 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100537 }
538 else
539 {
540 return Fail("%s: ProcessActivation failed", __func__);
541 }
542}
543
544bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
545 const Model& model,
546 ConversionData& data)
547{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100548 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100549 if (!input.IsValid())
550 {
551 return Fail("%s: Operation has invalid inputs", __func__);
552 }
553
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100554 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100555 if (!output)
556 {
557 return Fail("%s: Could not read output 0", __func__);
558 }
559
narpra012fb804a2018-10-22 14:52:32 +0100560 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100561 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
562
arovir01b0717b52018-09-05 17:03:25 +0100563 armnn::NormalizationDescriptor descriptor;
564
narpra012fb804a2018-10-22 14:52:32 +0100565 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100566 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100567 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100568
569 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100570 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
571 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
572 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
573 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100574 {
575 return Fail("%s: Operation has invalid inputs", __func__);
576 }
577
578 // ArmNN expects normSize to be the full size of the normalization
579 // window rather than the radius as in AndroidNN.
580 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
581
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100582 if (!IsLayerSupportedForAnyBackend(__func__,
583 armnn::IsNormalizationSupported,
584 data.m_Backends,
585 inputInfo,
586 outputInfo,
587 descriptor))
arovir01b0717b52018-09-05 17:03:25 +0100588 {
589 return false;
590 }
591
592
593 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
594 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100595 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100596
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100597 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100598}
599
600bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
601{
602 armnn::ActivationDescriptor desc;
603 desc.m_Function = armnn::ActivationFunction::Sigmoid;
604
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100605 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100606}
607
608bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
609{
610 // Inputs:
611 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
612 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100613 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100614 if (!input.IsValid())
615 {
616 return Fail("%s: Could not read input 0: input", __func__);
617 }
618 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100619 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100620 if (!outputStateIn.IsValid())
621 {
622 return Fail("%s: Could not read input 18: outputStateIn", __func__);
623 }
624 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100625 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100626 if (!cellStateIn.IsValid())
627 {
628 return Fail("%s: Could not read input 19: cellStateIn", __func__);
629 }
630
631 // Get the mandatory input tensors:
632 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
633 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100634 const ConstTensorPin inputToForgetWeightsPin =
635 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
636 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
637 // [num_units, input_size].
638 const ConstTensorPin inputToCellWeightsPin =
639 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100640 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
641 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100642 const ConstTensorPin inputToOutputWeightsPin =
643 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100644 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
645 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100646 const ConstTensorPin recurrentToForgetWeightsPin =
647 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100648 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
649 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100650 const ConstTensorPin recurrentToCellWeightsPin =
651 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100652 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
653 // [num_units, output_size].
654 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100655 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100656 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100657 const ConstTensorPin forgetGateBiasPin =
658 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100659 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100660 const ConstTensorPin cellBiasPin =
661 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100662 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100663 const ConstTensorPin outputGateBiasPin =
664 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100665
666 if (!inputToForgetWeightsPin.IsValid() ||
667 !inputToCellWeightsPin.IsValid() ||
668 !inputToOutputWeightsPin.IsValid() ||
669 !recurrentToForgetWeightsPin.IsValid() ||
670 !recurrentToCellWeightsPin.IsValid() ||
671 !recurrentToOutputWeightsPin.IsValid() ||
672 !forgetGateBiasPin.IsValid() ||
673 !cellBiasPin.IsValid() ||
674 !outputGateBiasPin.IsValid())
675 {
676 return Fail("%s: Operation has invalid tensor inputs", __func__);
677 }
678
679 // Get the optional input tensors:
680 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
681 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100682 const ConstTensorPin inputToInputWeightsPin =
683 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
684 1,
685 model,
686 data,
687 g_DontPermute,
688 nullptr,
689 true);
690
arovir01b0717b52018-09-05 17:03:25 +0100691 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
692 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
693 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100694 const ConstTensorPin recurrentToInputWeightsPin =
695 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
696 5,
697 model,
698 data,
699 g_DontPermute,
700 nullptr,
701 true);
702
arovir01b0717b52018-09-05 17:03:25 +0100703 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100704 const ConstTensorPin cellToInputWeightsPin =
705 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
706 9,
707 model,
708 data,
709 g_DontPermute,
710 nullptr,
711 true);
712
arovir01b0717b52018-09-05 17:03:25 +0100713 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100714 const ConstTensorPin cellToForgetWeightsPin =
715 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
716 10,
717 model,
718 data,
719 g_DontPermute,
720 nullptr,
721 true);
722
arovir01b0717b52018-09-05 17:03:25 +0100723 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100724 const ConstTensorPin cellToOutputWeightsPin =
725 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
726 11,
727 model,
728 data,
729 g_DontPermute,
730 nullptr,
731 true);
732
arovir01b0717b52018-09-05 17:03:25 +0100733 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100734 const ConstTensorPin inputGateBiasPin =
735 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
736 12,
737 model,
738 data,
739 g_DontPermute,
740 nullptr,
741 true);
742
arovir01b0717b52018-09-05 17:03:25 +0100743 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
744 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100745 const ConstTensorPin projectionWeightsPin =
746 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
747 16,
748 model,
749 data,
750 g_DontPermute,
751 nullptr,
752 true);
753
arovir01b0717b52018-09-05 17:03:25 +0100754 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100755 const ConstTensorPin projectionBiasPin =
756 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
757 17,
758 model,
759 data,
760 g_DontPermute,
761 nullptr,
762 true);
arovir01b0717b52018-09-05 17:03:25 +0100763
764 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
765 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
766 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
767 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
768 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
769 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
770 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
771 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
772 {
773 return Fail("%s: Operation has invalid tensor inputs", __func__);
774 }
775
776 // Get the mandatory input scalars (actually 1-D tensors of size 1):
777 // 20: The activation function: A value indicating the activation function:
778 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
779 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
780 // If set to 0.0 then clipping is disabled.
781 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
782 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
783 ActivationFn activation;
784 float cellClip;
785 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100786 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
787 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
788 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100789 {
790 return Fail("%s: Operation has invalid scalar inputs", __func__);
791 }
792
793 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100794 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
795 // with CIFG, or [batch_size, num_units * 3] without CIFG.
796 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100797 if (!scratchBuffer)
798 {
799 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
800 }
801 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100802 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100803 if (!outputStateOut)
804 {
805 return Fail("%s: Could not read output 1: outputStateOut", __func__);
806 }
807 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100808 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100809 if (!cellStateOut)
810 {
811 return Fail("%s: Could not read output 2: cellStateOut", __func__);
812 }
813 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
814 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100815 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100816 if (!output)
817 {
818 return Fail("%s: Could not read output 3: output", __func__);
819 }
820
821 // set the params structure for the AddLstmLayer call
822 armnn::LstmInputParams params;
823 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
824 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
825 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
826 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
827 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
828 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
829 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
830 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
831 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
832 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
833 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
834 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
835 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
836 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
837 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
838 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
839 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
840
841 // set the layer descriptor
842 armnn::LstmDescriptor desc;
843 desc.m_ActivationFunc = activation;
844 desc.m_ClippingThresCell = cellClip;
845 desc.m_ClippingThresProj = projClip;
846 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
847 params.m_RecurrentToInputWeights == nullptr ||
848 params.m_InputGateBias == nullptr);
849 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
850 params.m_CellToOutputWeights != nullptr);
851 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
852
853 // validate the optional input groups
854 if (desc.m_CifgEnabled &&
855 (params.m_InputToInputWeights != nullptr ||
856 params.m_RecurrentToInputWeights != nullptr ||
857 params.m_InputGateBias != nullptr))
858 {
859 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
860 " and input gate bias must be provided", __func__);
861 }
862
863 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
864 {
865 return Fail("%s: projection bias should not be provided without projection weights", __func__);
866 }
867
868 if (desc.m_PeepholeEnabled &&
869 (params.m_CellToForgetWeights == nullptr ||
870 params.m_CellToOutputWeights == nullptr ||
871 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
872 {
873 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
874 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
875 }
876
877 // Check if the layer is supported
878 // Inputs
879 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
880 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
881 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
882
883 // Outputs
884 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
885 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
886 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
887 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
888
889 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100890 armnn::LstmInputParamsInfo paramsInfo;
891 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
892 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
893 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
894 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
895 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
896 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
897 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
898 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
899 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100900
901 if(!desc.m_CifgEnabled)
902 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100903 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
904 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100905 if (params.m_CellToInputWeights != nullptr)
906 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100907 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100908 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100909 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100910 }
911
912 if(desc.m_ProjectionEnabled)
913 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100914 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100915 if (params.m_ProjectionBias != nullptr)
916 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100917 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100918 }
919 }
920
921 if(desc.m_PeepholeEnabled)
922 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100923 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
924 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100925 }
926
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100927 if (!IsLayerSupportedForAnyBackend(__func__,
928 armnn::IsLstmSupported,
929 data.m_Backends,
930 inputInfo,
931 outputStateInInfo,
932 cellStateInInfo,
933 scratchBufferInfo,
934 outputStateOutInfo,
935 cellStateOutInfo,
936 outputInfo,
937 desc,
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100938 paramsInfo))
arovir01b0717b52018-09-05 17:03:25 +0100939 {
940 return false;
941 }
942
943 // Add the layer
944 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
945
946 input.Connect(layer->GetInputSlot(0));
947 outputStateIn.Connect(layer->GetInputSlot(1));
948 cellStateIn.Connect(layer->GetInputSlot(2));
949
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100950 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
951 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
952 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
953 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100954}
955
956bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
957{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100958 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100959 if (!input.IsValid())
960 {
961 return Fail("%s: Operation has invalid inputs", __func__);
962 }
963
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100964 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100965 if (!output)
966 {
967 return Fail("%s: Could not read output 0", __func__);
968 }
969
970 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
971 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
972
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100973 if (IsDynamicOutput(outputInfo))
974 {
975 return Fail("%s: Dynamic output not supported", __func__);
976 }
977
Matteo Martincigh58f71092018-09-25 15:58:52 +0100978 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100979 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +0100980
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100981 if (!IsLayerSupportedForAnyBackend(__func__,
982 armnn::IsL2NormalizationSupported,
983 data.m_Backends,
984 inputInfo,
985 outputInfo,
986 desc))
arovir01b0717b52018-09-05 17:03:25 +0100987 {
988 return false;
989 }
990
Matteo Martincigh58f71092018-09-25 15:58:52 +0100991 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +0100992 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100993 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100994
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100995 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100996}
997
998bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
999{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001000 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001001}
1002
1003bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1004{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001005 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001006}
1007
1008bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1009{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001010 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1011 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001012
1013 if (!input0.IsValid() || !input1.IsValid())
1014 {
1015 return Fail("%s: Operation has invalid inputs", __func__);
1016 }
1017
1018 // The FuseActivation parameter is always the input index 2
1019 // and it should be optional
1020 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001021 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001022 {
1023 return Fail("%s: Operation has invalid inputs", __func__);
1024 }
1025
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001026 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001027
1028 if (outputOperand == nullptr)
1029 {
1030 return false;
1031 }
1032
1033 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1034
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001035 if (!IsLayerSupportedForAnyBackend(__func__,
1036 armnn::IsMultiplicationSupported,
1037 data.m_Backends,
1038 input0.GetTensorInfo(),
1039 input1.GetTensorInfo(),
1040 outInfo))
arovir01b0717b52018-09-05 17:03:25 +01001041 {
1042 return false;
1043 }
1044
1045 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1046 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1047
1048 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1049 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1050
1051 if (endLayer != nullptr)
1052 {
1053 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001054 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001055 }
1056 else
1057 {
1058 return Fail("%s: ProcessActivation failed", __func__);
1059 }
1060}
1061
1062bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1063{
1064 armnn::ActivationDescriptor desc;
1065 desc.m_Function = armnn::ActivationFunction::ReLu;
1066
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001067 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001068}
1069
1070bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1071{
1072 armnn::ActivationDescriptor desc;
1073 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1074 desc.m_A = 1.0f;
1075 desc.m_B = -1.0f;
1076
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001077 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001078}
1079
1080bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1081{
1082 armnn::ActivationDescriptor desc;
1083 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1084 desc.m_A = 6.0f;
1085
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001086 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001087}
1088
1089bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1090{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001091 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001092 if (!input.IsValid())
1093 {
1094 return Fail("%s: Operation has invalid inputs", __func__);
1095 }
1096
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001097 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001098 if (!outputOperand)
1099 {
1100 return Fail("%s: Operation has no outputs", __func__);
1101 }
1102
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001103 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001104 if (IsDynamicOutput(outputInfo))
1105 {
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001106 ALOGD("Output shape not set, will infer from input");
1107 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001108 }
arovir01b0717b52018-09-05 17:03:25 +01001109
1110 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001111 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001112 {
1113 return Fail("%s: Operation has invalid inputs", __func__);
1114 }
1115
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001116 if (!IsLayerSupportedForAnyBackend(__func__,
1117 armnn::IsSoftmaxSupported,
1118 data.m_Backends,
1119 input.GetTensorInfo(),
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001120 outputInfo,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001121 desc))
arovir01b0717b52018-09-05 17:03:25 +01001122 {
1123 return false;
1124 }
1125
1126 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1127 assert(layer != nullptr);
1128 input.Connect(layer->GetInputSlot(0));
1129
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001130 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1131 0,
1132 *layer,
1133 model,
1134 data,
1135 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001136}
1137
Keith Davisa6bc52f2019-06-26 09:39:49 +01001138bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1139{
1140 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1141
1142 if (!input.IsValid() )
1143 {
1144 return Fail("%s: Operation has invalid inputs", __func__);
1145 }
1146
1147 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1148 unsigned int rank = inputInfo.GetNumDimensions();
1149
1150 if (rank != 4)
1151 {
1152 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1153 }
1154
1155 armnn::SpaceToDepthDescriptor desc;
1156 bool dataLayoutCheck;
1157
1158 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1159
1160 if (desc.m_BlockSize <= 1)
1161 {
1162 return Fail("%s: Block size must be at least 1 in all dimensions");
1163 }
1164
1165 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1166 if (!output)
1167 {
1168 return Fail("%s: Could not read output 0", __func__);
1169 }
1170
1171 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1172 if (!IsLayerSupportedForAnyBackend(__func__,
1173 armnn::IsSpaceToDepthSupported,
1174 data.m_Backends,
1175 inputInfo,
1176 outputInfo,
1177 desc))
1178 {
1179 return false;
1180 }
1181
1182 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1183 assert(layer != nullptr);
1184 input.Connect(layer->GetInputSlot(0));
1185
1186 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1187}
1188
arovir01b0717b52018-09-05 17:03:25 +01001189bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1190{
1191 armnn::ActivationDescriptor desc;
1192 desc.m_Function = armnn::ActivationFunction::TanH;
1193 desc.m_A = 1.0f; // android nn does not support tanH parameters
1194 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1195
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001196 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001197}
1198
1199bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1200{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001201 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1202 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1203 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001204
1205 if (inputOperand == nullptr
1206 || requestedShapeOperand == nullptr
1207 || outputOperand == nullptr)
1208 {
1209 return Fail("%s: Operation has invalid inputs", __func__);
1210 }
1211
1212
1213 if (requestedShapeOperand->dimensions.size() != 1)
1214 {
1215 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1216 __func__, requestedShapeOperand->dimensions.size());
1217 }
1218
1219 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001220 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001221 {
1222 return Fail("%s: Could not read values of input 1", __func__);
1223 }
1224
1225 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1226
1227 Shape requestedShape;
1228 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1229 // function that resolves these values into a fully specified tensor shape.
1230 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1231 {
1232 return Fail("%s: Failed to resolve the requested shape", __func__);
1233 }
1234
1235 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1236 if (!SameShape(requestedShape, outputOperandShape))
1237 {
1238 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1239 }
1240
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001241 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001242 if (!input.IsValid())
1243 {
1244 return Fail("%s: Could not read input 0", __func__);
1245 }
1246
arovir01b0717b52018-09-05 17:03:25 +01001247 armnn::ReshapeDescriptor reshapeDescriptor;
1248 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1249 requestedShape.dimensions.data());
1250
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001251 if (!IsLayerSupportedForAnyBackend(__func__,
1252 armnn::IsReshapeSupported,
1253 data.m_Backends,
1254 input.GetTensorInfo(),
1255 reshapeDescriptor))
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001256 {
1257 return false;
1258 }
1259
arovir01b0717b52018-09-05 17:03:25 +01001260 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1261 assert(layer != nullptr);
1262 input.Connect(layer->GetInputSlot(0));
1263
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001264 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001265}
1266
1267bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1268{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001269 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001270 if (!input.IsValid())
1271 {
1272 return Fail("%s: Could not read input 0", __func__);
1273 }
1274
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001275 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001276 if (!output)
1277 {
1278 return Fail("%s: Could not read output 0", __func__);
1279 }
1280
1281 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1282 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1283
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001284 armnn::ResizeDescriptor desc;
1285 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001286 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001287
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001288 if (!IsLayerSupportedForAnyBackend(__func__,
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001289 armnn::IsResizeSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001290 data.m_Backends,
1291 inputInfo,
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001292 outputInfo,
1293 desc))
arovir01b0717b52018-09-05 17:03:25 +01001294 {
1295 return false;
1296 }
1297
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001298 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1299 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001300 {
1301 return Fail("%s: Operation has invalid inputs", __func__);
1302 }
1303
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001304 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001305
arovir01b0717b52018-09-05 17:03:25 +01001306 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001307
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001308 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1309 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001310
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001311 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001312
1313}
1314
1315} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001316} // namespace armnn_driver