blob: 9673a74cd0cfe97b40962159c6b46b73f9a2592d [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
arovir015602b192018-10-04 16:15:02 +010011
arovir01b0717b52018-09-05 17:03:25 +010012namespace armnn_driver
13{
14namespace hal_1_0
15{
16
17bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
18{
19 switch (operation.type)
20 {
21 case V1_0::OperationType::ADD:
22 return ConvertAdd(operation, model, data);
23 case V1_0::OperationType::AVERAGE_POOL_2D:
24 return ConvertAveragePool2d(operation, model, data);
25 case V1_0::OperationType::CONCATENATION:
26 return ConvertConcatenation(operation, model, data);
27 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010028 return ValidateConv2dParameters(operation) &&
29 ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010030 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010031 return ValidateDepthwiseConv2dParameters(operation) &&
32 ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010033 case V1_0::OperationType::DEQUANTIZE:
34 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010035 case V1_0::OperationType::FLOOR:
36 return ConvertFloor(operation, model, data);
37 case V1_0::OperationType::FULLY_CONNECTED:
38 return ConvertFullyConnected(operation, model, data);
39 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
40 return ConvertLocalResponseNormalization(operation, model, data);
41 case V1_0::OperationType::LOGISTIC:
42 return ConvertLogistic(operation, model, data);
43 case V1_0::OperationType::LSTM:
44 return ConvertLstm(operation, model, data);
45 case V1_0::OperationType::L2_NORMALIZATION:
46 return ConvertL2Normalization(operation, model, data);
47 case V1_0::OperationType::L2_POOL_2D:
48 return ConvertL2Pool2d(operation, model, data);
49 case V1_0::OperationType::MAX_POOL_2D:
50 return ConvertMaxPool2d(operation, model, data);
51 case V1_0::OperationType::MUL:
52 return ConvertMul(operation, model, data);
53 case V1_0::OperationType::RELU:
54 return ConvertReLu(operation, model, data);
55 case V1_0::OperationType::RELU1:
56 return ConvertReLu1(operation, model, data);
57 case V1_0::OperationType::RELU6:
58 return ConvertReLu6(operation, model, data);
59 case V1_0::OperationType::SOFTMAX:
60 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010061 case V1_0::OperationType::SPACE_TO_DEPTH:
62 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010063 case V1_0::OperationType::TANH:
64 return ConvertTanH(operation, model, data);
65 case V1_0::OperationType::RESHAPE:
66 return ConvertReshape(operation, model, data);
67 case V1_0::OperationType::RESIZE_BILINEAR:
68 return ConvertResizeBilinear(operation, model, data);
69 default:
70 return Fail("%s: Operation type %s not supported in ArmnnDriver",
71 __func__, toString(operation.type).c_str());
72 }
73}
74
Mike Kellyb5fdf382019-06-11 16:35:25 +010075bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
76{
77 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
78 {
79 return Fail("%s: Unsupported number of operation inputs", __func__);
80 }
81 return true;
82}
83
84bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
85{
86 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
87 {
88 return Fail("%s: Unsupported number of operation inputs", __func__);
89 }
90 return true;
91}
92
arovir01b0717b52018-09-05 17:03:25 +010093bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
94{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010095 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
96 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010097
98 if (!input0.IsValid() || !input1.IsValid())
99 {
100 return Fail("%s: Operation has invalid inputs", __func__);
101 }
102
103 // The FuseActivation parameter is always the input index 2
104 // and it should be optional
105 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100106 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100107 {
108 return Fail("%s: Operation has invalid inputs", __func__);
109 }
110
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100111 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100112 if (!outputOperand)
113 {
114 return false;
115 }
116
117 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
118
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100119 if (!IsLayerSupportedForAnyBackend(__func__,
120 armnn::IsAdditionSupported,
121 data.m_Backends,
122 input0.GetTensorInfo(),
123 input1.GetTensorInfo(),
124 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100125 {
126 return false;
127 }
128
129 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
130 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
131
132 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
133 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
134
135 if (endLayer != nullptr)
136 {
137 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100138 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100139 }
140 else
141 {
142 return Fail("%s: ProcessActivation failed", __func__);
143 }
144}
145
146bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
147{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100148 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100149}
150
151bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
152{
153 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
154 if (operation.inputs.size() <= 1)
155 {
156 return Fail("%s: Operation has insufficient arguments", __func__);
157 }
158
159 // Get inputs and outputs
160 const std::size_t numInputTensors = operation.inputs.size() - 1;
161
162 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100163 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100164 {
165 return Fail("%s: Operation has invalid inputs", __func__);
166 }
167
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100168 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100169 if (!outputOperand)
170 {
171 return Fail("%s: Operation has no outputs", __func__);
172 }
173
174
175 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
176 armnn::TensorShape outputShape = outputInfo.GetShape();
177
178 //
179 // handle negative concat dims along the lines of tensorflow as described here:
180 // https://www.tensorflow.org/api_docs/python/tf/concat
181 // "negative axis refers to axis + rank(values)-th dimension"
182 //
183 if (concatDim < 0)
184 {
185 concatDim += outputShape.GetNumDimensions();
186 }
187
188 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
189 {
190 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
191 }
192
193 std::vector<LayerInputHandle> inputHandles;
194 std::vector<armnn::TensorShape> inputShapes;
195
196 inputHandles.reserve(numInputTensors);
197 inputShapes.reserve(numInputTensors);
198
199 bool inputsHaveBeenReshaped = false;
200 unsigned int tensorDimensionsAdded = 0;
201
202 for (uint32_t i = 0; i < numInputTensors; ++i)
203 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100204 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100205 if (!operand)
206 {
207 return Fail("%s: Operation has invalid inputs", __func__);
208 }
209
210 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100211 LayerInputHandle operandInputHandle =
212 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100213
214 if (operandShape.GetNumDimensions() == 0)
215 {
216 return Fail("%s: Operands with rank 0 are not supported", __func__);
217 }
218
219 if (RequiresReshape(operandShape))
220 {
221 inputsHaveBeenReshaped = true;
222
223 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
224
225 // Expand the tensor to three dimensions
226 if (operandShape.GetNumDimensions() == 2)
227 {
228 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
229 tensorDimensionsAdded = 1;
230 }
231 else
232 {
233 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
234 tensorDimensionsAdded = 2;
235 }
236
237 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
238 *data.m_Network,
239 operandInputHandle,
240 reshapeInfo
241 );
242
243 // Point to the reshape operation rather then the input operation
244 operandShape = reshapeInfo.GetShape();
245 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
246 }
247
248 inputShapes.emplace_back(operandShape);
249 inputHandles.emplace_back(operandInputHandle);
250
251 if (!inputHandles.back().IsValid())
252 {
253 return Fail("%s: Operation has invalid inputs", __func__);
254 }
255 }
256
257 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
258
259 if (inputsHaveBeenReshaped)
260 {
261 // Adjust the concatenation dimension by the amount of dimensions added (if any)
262 concatDim += tensorDimensionsAdded;
263
264 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
265 if (tensorDimensionsAdded == 1)
266 {
267 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
268 }
269 else if (tensorDimensionsAdded == 2)
270 {
narpra01f176d5a2018-11-18 20:17:48 +0000271 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100272 }
273 }
274
narpra01f176d5a2018-11-18 20:17:48 +0000275 // Check if permutations is required and get the pair of permutations required for the concatenation.
276 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100277 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
278 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
279
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100280 bool needPermute =
281 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100282
narpra01f176d5a2018-11-18 20:17:48 +0000283 if (needPermute)
284 {
285 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
286 }
287
arovir01b0717b52018-09-05 17:03:25 +0100288 outputInfo.SetShape(outputShape);
289
290 // this is no-op for identity swizzles, otherwise it replaces both
291 // the handles and shapes with the swizzled layer output handles and shapes
292 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
293
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100294 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
295 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000296
arovir01b0717b52018-09-05 17:03:25 +0100297 try
298 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100299 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000300 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100301 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100302 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100303 }
304 catch (const armnn::Exception& error)
305 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100306 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100307 }
308
309 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000310 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100311 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
312 {
313 return Fail("%s: Error validating the output shape for concat", __func__);
314 }
315
316 std::vector<const armnn::TensorInfo*> inputTensorInfos;
317 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
318 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100319 if (!IsLayerSupportedForAnyBackend(__func__,
Jim Flynn073d7a32019-05-13 13:52:56 +0100320 armnn::IsConcatSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100321 data.m_Backends,
322 inputTensorInfos,
323 outputInfo,
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100324 concatDescriptor))
arovir01b0717b52018-09-05 17:03:25 +0100325 {
326 return false;
327 }
328
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100329 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100330 assert(layer != nullptr);
331 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
332
333 // Connect inputs to the layer
334 const int numInputSlots = layer->GetNumInputSlots();
335 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
336 for (int i = 0; i < numInputSlots; ++i)
337 {
338 // connect the input directly to the merge (concat) layer
339 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
340 }
341
narpra01f176d5a2018-11-18 20:17:48 +0000342 if (needPermute)
343 {
344 // Add permutation layer and connect the output to it, the permutation becomes the output layer
345 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
346 layer->GetOutputSlot(0),
347 permutationPair.second);
348 layer = &deswizzleLayer;
349 }
arovir01b0717b52018-09-05 17:03:25 +0100350
351 if (inputsHaveBeenReshaped)
352 {
353 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
354
355 // Undo the reshape knowing the amount of dimensions added
356 if (tensorDimensionsAdded == 1)
357 {
358 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
359 afterConcatInfo.GetShape()[2] }));
360 }
361 else if (tensorDimensionsAdded == 2)
362 {
narpra01f176d5a2018-11-18 20:17:48 +0000363 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100364 }
365
366 layer = &AddReshapeLayer(
367 *data.m_Network,
368 layer->GetOutputSlot(0),
369 afterConcatInfo
370 );
371 }
372
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100373 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100374}
375
David Monahanacf479a2019-05-29 14:27:04 +0100376bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
377{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100378 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100379
380 if (!input.IsValid())
381 {
382 return Fail("%s: Operation has invalid input", __func__);
383 }
384
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100385 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100386 if (!outputOperand)
387 {
388 return Fail("%s: Operation has invalid outputs", __func__);
389 }
390
391 if (!IsLayerSupportedForAnyBackend(__func__,
392 armnn::IsDequantizeSupported,
393 data.m_Backends,
394 input.GetTensorInfo(),
395 GetTensorInfoForOperand(*outputOperand)))
396 {
397 return false;
398 }
399
400 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
401 assert(layer != nullptr);
402 input.Connect(layer->GetInputSlot(0));
403
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100404 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100405}
406
arovir01b0717b52018-09-05 17:03:25 +0100407bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
408{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100409 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100410 if (!input.IsValid())
411 {
412 return Fail("%s: Operation has invalid inputs", __func__);
413 }
414
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100415 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100416 if (!outputOperand)
417 {
418 return Fail("%s: Operation has invalid outputs", __func__);
419 }
420
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100421 if (!IsLayerSupportedForAnyBackend(__func__,
422 armnn::IsFloorSupported,
423 data.m_Backends,
424 input.GetTensorInfo(),
425 GetTensorInfoForOperand(*outputOperand)))
arovir01b0717b52018-09-05 17:03:25 +0100426 {
427 return false;
428 }
429
430 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
431 assert(layer != nullptr);
432 input.Connect(layer->GetInputSlot(0));
433
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100434 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100435}
436
437bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
438{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100439 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100440 if (!input.IsValid())
441 {
442 return Fail("%s: Operation has invalid inputs", __func__);
443 }
444
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100445 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100446 if (!output)
447 {
448 return Fail("%s: Could not read output 0", __func__);
449 }
450
451 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
452 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
453
454 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100455 ConstTensorPin weightsPin =
456 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
457 ConstTensorPin biasPin =
458 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100459
460 if (!weightsPin.IsValid() || !biasPin.IsValid())
461 {
462 return Fail("%s: Operation has invalid inputs", __func__);
463 }
464
465 armnn::ConstTensor weights = weightsPin.GetConstTensor();
466 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100467 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100468
469 try
arovir01b0717b52018-09-05 17:03:25 +0100470 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100471 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
472 } catch (const std::exception &e) {
473 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100474 }
475
476 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
477 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
478
479 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100480 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100481 {
482 return Fail("%s: Operation has invalid inputs", __func__);
483 }
484
485 armnn::FullyConnectedDescriptor desc;
486 desc.m_TransposeWeightMatrix = true;
487 desc.m_BiasEnabled = true;
488
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100489 if (!IsLayerSupportedForAnyBackend(__func__,
490 armnn::IsFullyConnectedSupported,
491 data.m_Backends,
492 reshapedInfo,
493 outputInfo,
494 weights.GetInfo(),
495 bias.GetInfo(),
496 desc))
arovir01b0717b52018-09-05 17:03:25 +0100497 {
498 return false;
499 }
500
Matteo Martincighba01f372019-05-14 13:28:21 +0100501 armnn::IConnectableLayer* startLayer =
502 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100503 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
504
505 if (endLayer != nullptr)
506 {
507 if (inputInfo.GetNumDimensions() > 2U)
508 {
509 armnn::ReshapeDescriptor reshapeDescriptor;
510 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
511
512 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
513 assert(reshapeLayer != nullptr);
514 input.Connect(reshapeLayer->GetInputSlot(0));
515 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
516 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
517 }
518 else
519 {
520 input.Connect(startLayer->GetInputSlot(0));
521 }
522
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100523 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100524 }
525 else
526 {
527 return Fail("%s: ProcessActivation failed", __func__);
528 }
529}
530
531bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
532 const Model& model,
533 ConversionData& data)
534{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100535 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100536 if (!input.IsValid())
537 {
538 return Fail("%s: Operation has invalid inputs", __func__);
539 }
540
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100541 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100542 if (!output)
543 {
544 return Fail("%s: Could not read output 0", __func__);
545 }
546
narpra012fb804a2018-10-22 14:52:32 +0100547 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100548 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
549
arovir01b0717b52018-09-05 17:03:25 +0100550 armnn::NormalizationDescriptor descriptor;
551
narpra012fb804a2018-10-22 14:52:32 +0100552 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100553 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100554 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100555
556 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100557 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
558 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
559 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
560 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100561 {
562 return Fail("%s: Operation has invalid inputs", __func__);
563 }
564
565 // ArmNN expects normSize to be the full size of the normalization
566 // window rather than the radius as in AndroidNN.
567 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
568
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100569 if (!IsLayerSupportedForAnyBackend(__func__,
570 armnn::IsNormalizationSupported,
571 data.m_Backends,
572 inputInfo,
573 outputInfo,
574 descriptor))
arovir01b0717b52018-09-05 17:03:25 +0100575 {
576 return false;
577 }
578
579
580 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
581 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100582 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100585}
586
587bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
588{
589 armnn::ActivationDescriptor desc;
590 desc.m_Function = armnn::ActivationFunction::Sigmoid;
591
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100592 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100593}
594
595bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
596{
597 // Inputs:
598 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
599 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100600 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100601 if (!input.IsValid())
602 {
603 return Fail("%s: Could not read input 0: input", __func__);
604 }
605 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100606 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100607 if (!outputStateIn.IsValid())
608 {
609 return Fail("%s: Could not read input 18: outputStateIn", __func__);
610 }
611 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100612 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100613 if (!cellStateIn.IsValid())
614 {
615 return Fail("%s: Could not read input 19: cellStateIn", __func__);
616 }
617
618 // Get the mandatory input tensors:
619 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
620 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100621 const ConstTensorPin inputToForgetWeightsPin =
622 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
623 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
624 // [num_units, input_size].
625 const ConstTensorPin inputToCellWeightsPin =
626 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100627 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
628 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100629 const ConstTensorPin inputToOutputWeightsPin =
630 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100631 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
632 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100633 const ConstTensorPin recurrentToForgetWeightsPin =
634 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100635 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
636 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100637 const ConstTensorPin recurrentToCellWeightsPin =
638 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100639 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
640 // [num_units, output_size].
641 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100642 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100643 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100644 const ConstTensorPin forgetGateBiasPin =
645 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100646 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100647 const ConstTensorPin cellBiasPin =
648 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100649 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100650 const ConstTensorPin outputGateBiasPin =
651 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100652
653 if (!inputToForgetWeightsPin.IsValid() ||
654 !inputToCellWeightsPin.IsValid() ||
655 !inputToOutputWeightsPin.IsValid() ||
656 !recurrentToForgetWeightsPin.IsValid() ||
657 !recurrentToCellWeightsPin.IsValid() ||
658 !recurrentToOutputWeightsPin.IsValid() ||
659 !forgetGateBiasPin.IsValid() ||
660 !cellBiasPin.IsValid() ||
661 !outputGateBiasPin.IsValid())
662 {
663 return Fail("%s: Operation has invalid tensor inputs", __func__);
664 }
665
666 // Get the optional input tensors:
667 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
668 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100669 const ConstTensorPin inputToInputWeightsPin =
670 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
671 1,
672 model,
673 data,
674 g_DontPermute,
675 nullptr,
676 true);
677
arovir01b0717b52018-09-05 17:03:25 +0100678 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
679 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
680 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100681 const ConstTensorPin recurrentToInputWeightsPin =
682 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
683 5,
684 model,
685 data,
686 g_DontPermute,
687 nullptr,
688 true);
689
arovir01b0717b52018-09-05 17:03:25 +0100690 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100691 const ConstTensorPin cellToInputWeightsPin =
692 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
693 9,
694 model,
695 data,
696 g_DontPermute,
697 nullptr,
698 true);
699
arovir01b0717b52018-09-05 17:03:25 +0100700 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100701 const ConstTensorPin cellToForgetWeightsPin =
702 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
703 10,
704 model,
705 data,
706 g_DontPermute,
707 nullptr,
708 true);
709
arovir01b0717b52018-09-05 17:03:25 +0100710 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100711 const ConstTensorPin cellToOutputWeightsPin =
712 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
713 11,
714 model,
715 data,
716 g_DontPermute,
717 nullptr,
718 true);
719
arovir01b0717b52018-09-05 17:03:25 +0100720 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100721 const ConstTensorPin inputGateBiasPin =
722 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
723 12,
724 model,
725 data,
726 g_DontPermute,
727 nullptr,
728 true);
729
arovir01b0717b52018-09-05 17:03:25 +0100730 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
731 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100732 const ConstTensorPin projectionWeightsPin =
733 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
734 16,
735 model,
736 data,
737 g_DontPermute,
738 nullptr,
739 true);
740
arovir01b0717b52018-09-05 17:03:25 +0100741 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100742 const ConstTensorPin projectionBiasPin =
743 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
744 17,
745 model,
746 data,
747 g_DontPermute,
748 nullptr,
749 true);
arovir01b0717b52018-09-05 17:03:25 +0100750
751 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
752 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
753 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
754 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
755 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
756 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
757 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
758 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
759 {
760 return Fail("%s: Operation has invalid tensor inputs", __func__);
761 }
762
763 // Get the mandatory input scalars (actually 1-D tensors of size 1):
764 // 20: The activation function: A value indicating the activation function:
765 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
766 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
767 // If set to 0.0 then clipping is disabled.
768 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
769 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
770 ActivationFn activation;
771 float cellClip;
772 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100773 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
774 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
775 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100776 {
777 return Fail("%s: Operation has invalid scalar inputs", __func__);
778 }
779
780 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100781 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
782 // with CIFG, or [batch_size, num_units * 3] without CIFG.
783 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100784 if (!scratchBuffer)
785 {
786 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
787 }
788 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100789 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100790 if (!outputStateOut)
791 {
792 return Fail("%s: Could not read output 1: outputStateOut", __func__);
793 }
794 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100795 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100796 if (!cellStateOut)
797 {
798 return Fail("%s: Could not read output 2: cellStateOut", __func__);
799 }
800 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
801 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100802 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100803 if (!output)
804 {
805 return Fail("%s: Could not read output 3: output", __func__);
806 }
807
808 // set the params structure for the AddLstmLayer call
809 armnn::LstmInputParams params;
810 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
811 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
812 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
813 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
814 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
815 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
816 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
817 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
818 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
819 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
820 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
821 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
822 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
823 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
824 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
825 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
826 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
827
828 // set the layer descriptor
829 armnn::LstmDescriptor desc;
830 desc.m_ActivationFunc = activation;
831 desc.m_ClippingThresCell = cellClip;
832 desc.m_ClippingThresProj = projClip;
833 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
834 params.m_RecurrentToInputWeights == nullptr ||
835 params.m_InputGateBias == nullptr);
836 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
837 params.m_CellToOutputWeights != nullptr);
838 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
839
840 // validate the optional input groups
841 if (desc.m_CifgEnabled &&
842 (params.m_InputToInputWeights != nullptr ||
843 params.m_RecurrentToInputWeights != nullptr ||
844 params.m_InputGateBias != nullptr))
845 {
846 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
847 " and input gate bias must be provided", __func__);
848 }
849
850 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
851 {
852 return Fail("%s: projection bias should not be provided without projection weights", __func__);
853 }
854
855 if (desc.m_PeepholeEnabled &&
856 (params.m_CellToForgetWeights == nullptr ||
857 params.m_CellToOutputWeights == nullptr ||
858 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
859 {
860 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
861 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
862 }
863
864 // Check if the layer is supported
865 // Inputs
866 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
867 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
868 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
869
870 // Outputs
871 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
872 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
873 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
874 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
875
876 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100877 armnn::LstmInputParamsInfo paramsInfo;
878 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
879 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
880 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
881 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
882 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
883 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
884 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
885 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
886 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100887
888 if(!desc.m_CifgEnabled)
889 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100890 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
891 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100892 if (params.m_CellToInputWeights != nullptr)
893 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100894 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100895 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100896 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100897 }
898
899 if(desc.m_ProjectionEnabled)
900 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100901 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100902 if (params.m_ProjectionBias != nullptr)
903 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100904 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100905 }
906 }
907
908 if(desc.m_PeepholeEnabled)
909 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100910 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
911 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100912 }
913
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100914 if (!IsLayerSupportedForAnyBackend(__func__,
915 armnn::IsLstmSupported,
916 data.m_Backends,
917 inputInfo,
918 outputStateInInfo,
919 cellStateInInfo,
920 scratchBufferInfo,
921 outputStateOutInfo,
922 cellStateOutInfo,
923 outputInfo,
924 desc,
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100925 paramsInfo))
arovir01b0717b52018-09-05 17:03:25 +0100926 {
927 return false;
928 }
929
930 // Add the layer
931 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
932
933 input.Connect(layer->GetInputSlot(0));
934 outputStateIn.Connect(layer->GetInputSlot(1));
935 cellStateIn.Connect(layer->GetInputSlot(2));
936
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100937 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
938 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
939 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
940 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100941}
942
943bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
944{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100945 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100946 if (!input.IsValid())
947 {
948 return Fail("%s: Operation has invalid inputs", __func__);
949 }
950
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100951 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100952 if (!output)
953 {
954 return Fail("%s: Could not read output 0", __func__);
955 }
956
957 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
958 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
959
Matteo Martincigh58f71092018-09-25 15:58:52 +0100960 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100961 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +0100962
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100963 if (!IsLayerSupportedForAnyBackend(__func__,
964 armnn::IsL2NormalizationSupported,
965 data.m_Backends,
966 inputInfo,
967 outputInfo,
968 desc))
arovir01b0717b52018-09-05 17:03:25 +0100969 {
970 return false;
971 }
972
Matteo Martincigh58f71092018-09-25 15:58:52 +0100973 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +0100974 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100975 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100976
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100977 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100978}
979
980bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
981{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100982 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100983}
984
985bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
986{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100987 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100988}
989
990bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
991{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100992 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
993 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100994
995 if (!input0.IsValid() || !input1.IsValid())
996 {
997 return Fail("%s: Operation has invalid inputs", __func__);
998 }
999
1000 // The FuseActivation parameter is always the input index 2
1001 // and it should be optional
1002 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001003 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001004 {
1005 return Fail("%s: Operation has invalid inputs", __func__);
1006 }
1007
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001008 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001009
1010 if (outputOperand == nullptr)
1011 {
1012 return false;
1013 }
1014
1015 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1016
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001017 if (!IsLayerSupportedForAnyBackend(__func__,
1018 armnn::IsMultiplicationSupported,
1019 data.m_Backends,
1020 input0.GetTensorInfo(),
1021 input1.GetTensorInfo(),
1022 outInfo))
arovir01b0717b52018-09-05 17:03:25 +01001023 {
1024 return false;
1025 }
1026
1027 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1028 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1029
1030 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1031 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1032
1033 if (endLayer != nullptr)
1034 {
1035 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001036 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001037 }
1038 else
1039 {
1040 return Fail("%s: ProcessActivation failed", __func__);
1041 }
1042}
1043
1044bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1045{
1046 armnn::ActivationDescriptor desc;
1047 desc.m_Function = armnn::ActivationFunction::ReLu;
1048
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001049 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001050}
1051
1052bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1053{
1054 armnn::ActivationDescriptor desc;
1055 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1056 desc.m_A = 1.0f;
1057 desc.m_B = -1.0f;
1058
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001059 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001060}
1061
1062bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1063{
1064 armnn::ActivationDescriptor desc;
1065 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1066 desc.m_A = 6.0f;
1067
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001068 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001069}
1070
1071bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1072{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001073 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001074 if (!input.IsValid())
1075 {
1076 return Fail("%s: Operation has invalid inputs", __func__);
1077 }
1078
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001079 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001080 if (!outputOperand)
1081 {
1082 return Fail("%s: Operation has no outputs", __func__);
1083 }
1084
1085 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1086
1087 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001088 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001089 {
1090 return Fail("%s: Operation has invalid inputs", __func__);
1091 }
1092
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001093 if (!IsLayerSupportedForAnyBackend(__func__,
1094 armnn::IsSoftmaxSupported,
1095 data.m_Backends,
1096 input.GetTensorInfo(),
1097 outInfo,
1098 desc))
arovir01b0717b52018-09-05 17:03:25 +01001099 {
1100 return false;
1101 }
1102
1103 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1104 assert(layer != nullptr);
1105 input.Connect(layer->GetInputSlot(0));
1106
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001107 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001108}
1109
Keith Davisa6bc52f2019-06-26 09:39:49 +01001110bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1111{
1112 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1113
1114 if (!input.IsValid() )
1115 {
1116 return Fail("%s: Operation has invalid inputs", __func__);
1117 }
1118
1119 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1120 unsigned int rank = inputInfo.GetNumDimensions();
1121
1122 if (rank != 4)
1123 {
1124 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1125 }
1126
1127 armnn::SpaceToDepthDescriptor desc;
1128 bool dataLayoutCheck;
1129
1130 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1131
1132 if (desc.m_BlockSize <= 1)
1133 {
1134 return Fail("%s: Block size must be at least 1 in all dimensions");
1135 }
1136
1137 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1138 if (!output)
1139 {
1140 return Fail("%s: Could not read output 0", __func__);
1141 }
1142
1143 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1144 if (!IsLayerSupportedForAnyBackend(__func__,
1145 armnn::IsSpaceToDepthSupported,
1146 data.m_Backends,
1147 inputInfo,
1148 outputInfo,
1149 desc))
1150 {
1151 return false;
1152 }
1153
1154 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1155 assert(layer != nullptr);
1156 input.Connect(layer->GetInputSlot(0));
1157
1158 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1159}
1160
arovir01b0717b52018-09-05 17:03:25 +01001161bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1162{
1163 armnn::ActivationDescriptor desc;
1164 desc.m_Function = armnn::ActivationFunction::TanH;
1165 desc.m_A = 1.0f; // android nn does not support tanH parameters
1166 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1167
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001168 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001169}
1170
1171bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1172{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001173 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1174 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1175 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001176
1177 if (inputOperand == nullptr
1178 || requestedShapeOperand == nullptr
1179 || outputOperand == nullptr)
1180 {
1181 return Fail("%s: Operation has invalid inputs", __func__);
1182 }
1183
1184
1185 if (requestedShapeOperand->dimensions.size() != 1)
1186 {
1187 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1188 __func__, requestedShapeOperand->dimensions.size());
1189 }
1190
1191 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001192 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001193 {
1194 return Fail("%s: Could not read values of input 1", __func__);
1195 }
1196
1197 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1198
1199 Shape requestedShape;
1200 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1201 // function that resolves these values into a fully specified tensor shape.
1202 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1203 {
1204 return Fail("%s: Failed to resolve the requested shape", __func__);
1205 }
1206
1207 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1208 if (!SameShape(requestedShape, outputOperandShape))
1209 {
1210 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1211 }
1212
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001213 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001214 if (!input.IsValid())
1215 {
1216 return Fail("%s: Could not read input 0", __func__);
1217 }
1218
arovir01b0717b52018-09-05 17:03:25 +01001219 armnn::ReshapeDescriptor reshapeDescriptor;
1220 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1221 requestedShape.dimensions.data());
1222
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001223 if (!IsLayerSupportedForAnyBackend(__func__,
1224 armnn::IsReshapeSupported,
1225 data.m_Backends,
1226 input.GetTensorInfo(),
1227 reshapeDescriptor))
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001228 {
1229 return false;
1230 }
1231
arovir01b0717b52018-09-05 17:03:25 +01001232 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1233 assert(layer != nullptr);
1234 input.Connect(layer->GetInputSlot(0));
1235
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001236 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001237}
1238
1239bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1240{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001241 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001242 if (!input.IsValid())
1243 {
1244 return Fail("%s: Could not read input 0", __func__);
1245 }
1246
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001247 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001248 if (!output)
1249 {
1250 return Fail("%s: Could not read output 0", __func__);
1251 }
1252
1253 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1254 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1255
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001256 armnn::ResizeDescriptor desc;
1257 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001258 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001259
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001260 if (!IsLayerSupportedForAnyBackend(__func__,
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001261 armnn::IsResizeSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001262 data.m_Backends,
1263 inputInfo,
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001264 outputInfo,
1265 desc))
arovir01b0717b52018-09-05 17:03:25 +01001266 {
1267 return false;
1268 }
1269
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001270 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1271 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001272 {
1273 return Fail("%s: Operation has invalid inputs", __func__);
1274 }
1275
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001276 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001277
arovir01b0717b52018-09-05 17:03:25 +01001278 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001279
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001280 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1281 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001282
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001283 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001284
1285}
1286
1287} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001288} // namespace armnn_driver