blob: 13c93277b7a3cce43310a01037e548c5f6e722da [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
arovir015602b192018-10-04 16:15:02 +010011
arovir01b0717b52018-09-05 17:03:25 +010012namespace armnn_driver
13{
14namespace hal_1_0
15{
16
17bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
18{
19 switch (operation.type)
20 {
21 case V1_0::OperationType::ADD:
22 return ConvertAdd(operation, model, data);
23 case V1_0::OperationType::AVERAGE_POOL_2D:
24 return ConvertAveragePool2d(operation, model, data);
25 case V1_0::OperationType::CONCATENATION:
26 return ConvertConcatenation(operation, model, data);
27 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010028 return ValidateConv2dParameters(operation) &&
29 ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010030 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010031 return ValidateDepthwiseConv2dParameters(operation) &&
32 ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010033 case V1_0::OperationType::DEQUANTIZE:
34 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010035 case V1_0::OperationType::FLOOR:
36 return ConvertFloor(operation, model, data);
37 case V1_0::OperationType::FULLY_CONNECTED:
38 return ConvertFullyConnected(operation, model, data);
39 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
40 return ConvertLocalResponseNormalization(operation, model, data);
41 case V1_0::OperationType::LOGISTIC:
42 return ConvertLogistic(operation, model, data);
43 case V1_0::OperationType::LSTM:
44 return ConvertLstm(operation, model, data);
45 case V1_0::OperationType::L2_NORMALIZATION:
46 return ConvertL2Normalization(operation, model, data);
47 case V1_0::OperationType::L2_POOL_2D:
48 return ConvertL2Pool2d(operation, model, data);
49 case V1_0::OperationType::MAX_POOL_2D:
50 return ConvertMaxPool2d(operation, model, data);
51 case V1_0::OperationType::MUL:
52 return ConvertMul(operation, model, data);
53 case V1_0::OperationType::RELU:
54 return ConvertReLu(operation, model, data);
55 case V1_0::OperationType::RELU1:
56 return ConvertReLu1(operation, model, data);
57 case V1_0::OperationType::RELU6:
58 return ConvertReLu6(operation, model, data);
59 case V1_0::OperationType::SOFTMAX:
60 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010061 case V1_0::OperationType::SPACE_TO_DEPTH:
62 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010063 case V1_0::OperationType::TANH:
64 return ConvertTanH(operation, model, data);
65 case V1_0::OperationType::RESHAPE:
66 return ConvertReshape(operation, model, data);
67 case V1_0::OperationType::RESIZE_BILINEAR:
68 return ConvertResizeBilinear(operation, model, data);
69 default:
70 return Fail("%s: Operation type %s not supported in ArmnnDriver",
71 __func__, toString(operation.type).c_str());
72 }
73}
74
Mike Kellyb5fdf382019-06-11 16:35:25 +010075bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
76{
77 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
78 {
79 return Fail("%s: Unsupported number of operation inputs", __func__);
80 }
81 return true;
82}
83
84bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
85{
86 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
87 {
88 return Fail("%s: Unsupported number of operation inputs", __func__);
89 }
90 return true;
91}
92
arovir01b0717b52018-09-05 17:03:25 +010093bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
94{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010095 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
96 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010097
98 if (!input0.IsValid() || !input1.IsValid())
99 {
100 return Fail("%s: Operation has invalid inputs", __func__);
101 }
102
103 // The FuseActivation parameter is always the input index 2
104 // and it should be optional
105 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100106 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100107 {
108 return Fail("%s: Operation has invalid inputs", __func__);
109 }
110
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100111 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100112 if (!outputOperand)
113 {
114 return false;
115 }
116
117 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
118
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100119 if (!IsLayerSupportedForAnyBackend(__func__,
120 armnn::IsAdditionSupported,
121 data.m_Backends,
122 input0.GetTensorInfo(),
123 input1.GetTensorInfo(),
124 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100125 {
126 return false;
127 }
128
129 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
130 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
131
132 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
133 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
134
135 if (endLayer != nullptr)
136 {
137 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100138 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100139 }
140 else
141 {
142 return Fail("%s: ProcessActivation failed", __func__);
143 }
144}
145
146bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
147{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100148 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100149}
150
151bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
152{
153 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
154 if (operation.inputs.size() <= 1)
155 {
156 return Fail("%s: Operation has insufficient arguments", __func__);
157 }
158
159 // Get inputs and outputs
160 const std::size_t numInputTensors = operation.inputs.size() - 1;
161
162 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100163 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100164 {
165 return Fail("%s: Operation has invalid inputs", __func__);
166 }
167
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100168 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100169 if (!outputOperand)
170 {
171 return Fail("%s: Operation has no outputs", __func__);
172 }
173
174
175 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
176 armnn::TensorShape outputShape = outputInfo.GetShape();
177
178 //
179 // handle negative concat dims along the lines of tensorflow as described here:
180 // https://www.tensorflow.org/api_docs/python/tf/concat
181 // "negative axis refers to axis + rank(values)-th dimension"
182 //
183 if (concatDim < 0)
184 {
185 concatDim += outputShape.GetNumDimensions();
186 }
187
188 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
189 {
190 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
191 }
192
193 std::vector<LayerInputHandle> inputHandles;
194 std::vector<armnn::TensorShape> inputShapes;
195
196 inputHandles.reserve(numInputTensors);
197 inputShapes.reserve(numInputTensors);
198
199 bool inputsHaveBeenReshaped = false;
200 unsigned int tensorDimensionsAdded = 0;
201
202 for (uint32_t i = 0; i < numInputTensors; ++i)
203 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100204 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100205 if (!operand)
206 {
207 return Fail("%s: Operation has invalid inputs", __func__);
208 }
209
210 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100211 LayerInputHandle operandInputHandle =
212 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100213
214 if (operandShape.GetNumDimensions() == 0)
215 {
216 return Fail("%s: Operands with rank 0 are not supported", __func__);
217 }
218
219 if (RequiresReshape(operandShape))
220 {
221 inputsHaveBeenReshaped = true;
222
223 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
224
225 // Expand the tensor to three dimensions
226 if (operandShape.GetNumDimensions() == 2)
227 {
228 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
229 tensorDimensionsAdded = 1;
230 }
231 else
232 {
233 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
234 tensorDimensionsAdded = 2;
235 }
236
237 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
238 *data.m_Network,
239 operandInputHandle,
240 reshapeInfo
241 );
242
243 // Point to the reshape operation rather then the input operation
244 operandShape = reshapeInfo.GetShape();
245 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
246 }
247
248 inputShapes.emplace_back(operandShape);
249 inputHandles.emplace_back(operandInputHandle);
250
251 if (!inputHandles.back().IsValid())
252 {
253 return Fail("%s: Operation has invalid inputs", __func__);
254 }
255 }
256
257 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
258
259 if (inputsHaveBeenReshaped)
260 {
261 // Adjust the concatenation dimension by the amount of dimensions added (if any)
262 concatDim += tensorDimensionsAdded;
263
264 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
265 if (tensorDimensionsAdded == 1)
266 {
267 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
268 }
269 else if (tensorDimensionsAdded == 2)
270 {
narpra01f176d5a2018-11-18 20:17:48 +0000271 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100272 }
273 }
274
narpra01f176d5a2018-11-18 20:17:48 +0000275 // Check if permutations is required and get the pair of permutations required for the concatenation.
276 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100277 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
278 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
279
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100280 bool needPermute =
281 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100282
narpra01f176d5a2018-11-18 20:17:48 +0000283 if (needPermute)
284 {
285 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
286 }
287
arovir01b0717b52018-09-05 17:03:25 +0100288 outputInfo.SetShape(outputShape);
289
290 // this is no-op for identity swizzles, otherwise it replaces both
291 // the handles and shapes with the swizzled layer output handles and shapes
292 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
293
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100294 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
295 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000296
arovir01b0717b52018-09-05 17:03:25 +0100297 try
298 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100299 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000300 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100301 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100302 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100303 }
304 catch (const armnn::Exception& error)
305 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100306 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100307 }
308
309 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000310 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100311 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
312 {
313 return Fail("%s: Error validating the output shape for concat", __func__);
314 }
315
316 std::vector<const armnn::TensorInfo*> inputTensorInfos;
317 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
318 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100319 if (!IsLayerSupportedForAnyBackend(__func__,
Jim Flynn073d7a32019-05-13 13:52:56 +0100320 armnn::IsConcatSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100321 data.m_Backends,
322 inputTensorInfos,
323 outputInfo,
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100324 concatDescriptor))
arovir01b0717b52018-09-05 17:03:25 +0100325 {
326 return false;
327 }
328
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100329 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100330 assert(layer != nullptr);
331 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
332
333 // Connect inputs to the layer
334 const int numInputSlots = layer->GetNumInputSlots();
335 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
336 for (int i = 0; i < numInputSlots; ++i)
337 {
338 // connect the input directly to the merge (concat) layer
339 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
340 }
341
narpra01f176d5a2018-11-18 20:17:48 +0000342 if (needPermute)
343 {
344 // Add permutation layer and connect the output to it, the permutation becomes the output layer
345 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
346 layer->GetOutputSlot(0),
347 permutationPair.second);
348 layer = &deswizzleLayer;
349 }
arovir01b0717b52018-09-05 17:03:25 +0100350
351 if (inputsHaveBeenReshaped)
352 {
353 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
354
355 // Undo the reshape knowing the amount of dimensions added
356 if (tensorDimensionsAdded == 1)
357 {
358 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
359 afterConcatInfo.GetShape()[2] }));
360 }
361 else if (tensorDimensionsAdded == 2)
362 {
narpra01f176d5a2018-11-18 20:17:48 +0000363 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100364 }
365
366 layer = &AddReshapeLayer(
367 *data.m_Network,
368 layer->GetOutputSlot(0),
369 afterConcatInfo
370 );
371 }
372
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100373 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100374}
375
David Monahanacf479a2019-05-29 14:27:04 +0100376bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
377{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100378 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100379
380 if (!input.IsValid())
381 {
382 return Fail("%s: Operation has invalid input", __func__);
383 }
384
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100385 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100386 if (!outputOperand)
387 {
388 return Fail("%s: Operation has invalid outputs", __func__);
389 }
390
391 if (!IsLayerSupportedForAnyBackend(__func__,
392 armnn::IsDequantizeSupported,
393 data.m_Backends,
394 input.GetTensorInfo(),
395 GetTensorInfoForOperand(*outputOperand)))
396 {
397 return false;
398 }
399
400 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
401 assert(layer != nullptr);
402 input.Connect(layer->GetInputSlot(0));
403
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100404 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100405}
406
arovir01b0717b52018-09-05 17:03:25 +0100407bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
408{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100409 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100410 if (!input.IsValid())
411 {
412 return Fail("%s: Operation has invalid inputs", __func__);
413 }
414
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100415 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100416 if (!outputOperand)
417 {
418 return Fail("%s: Operation has invalid outputs", __func__);
419 }
420
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100421 if (!IsLayerSupportedForAnyBackend(__func__,
422 armnn::IsFloorSupported,
423 data.m_Backends,
424 input.GetTensorInfo(),
425 GetTensorInfoForOperand(*outputOperand)))
arovir01b0717b52018-09-05 17:03:25 +0100426 {
427 return false;
428 }
429
430 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
431 assert(layer != nullptr);
432 input.Connect(layer->GetInputSlot(0));
433
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100434 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100435}
436
437bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
438{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100439 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100440 if (!input.IsValid())
441 {
442 return Fail("%s: Operation has invalid inputs", __func__);
443 }
444
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100445 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100446 if (!output)
447 {
448 return Fail("%s: Could not read output 0", __func__);
449 }
450
451 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
452 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
453
454 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100455 ConstTensorPin weightsPin =
456 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
457 ConstTensorPin biasPin =
458 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100459
460 if (!weightsPin.IsValid() || !biasPin.IsValid())
461 {
462 return Fail("%s: Operation has invalid inputs", __func__);
463 }
464
465 armnn::ConstTensor weights = weightsPin.GetConstTensor();
466 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100467 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100468
469 try
arovir01b0717b52018-09-05 17:03:25 +0100470 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100471 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
472 } catch (const std::exception &e) {
473 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100474 }
475
476 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
477 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
478
479 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100480 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100481 {
482 return Fail("%s: Operation has invalid inputs", __func__);
483 }
484
485 armnn::FullyConnectedDescriptor desc;
486 desc.m_TransposeWeightMatrix = true;
487 desc.m_BiasEnabled = true;
488
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100489 if (!IsLayerSupportedForAnyBackend(__func__,
490 armnn::IsFullyConnectedSupported,
491 data.m_Backends,
492 reshapedInfo,
493 outputInfo,
494 weights.GetInfo(),
495 bias.GetInfo(),
496 desc))
arovir01b0717b52018-09-05 17:03:25 +0100497 {
498 return false;
499 }
500
Matteo Martincighba01f372019-05-14 13:28:21 +0100501 armnn::IConnectableLayer* startLayer =
502 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100503 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
504
505 if (endLayer != nullptr)
506 {
507 if (inputInfo.GetNumDimensions() > 2U)
508 {
509 armnn::ReshapeDescriptor reshapeDescriptor;
510 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
511
512 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
513 assert(reshapeLayer != nullptr);
514 input.Connect(reshapeLayer->GetInputSlot(0));
515 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
516 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
517 }
518 else
519 {
520 input.Connect(startLayer->GetInputSlot(0));
521 }
522
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100523 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100524 }
525 else
526 {
527 return Fail("%s: ProcessActivation failed", __func__);
528 }
529}
530
531bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
532 const Model& model,
533 ConversionData& data)
534{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100535 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100536 if (!input.IsValid())
537 {
538 return Fail("%s: Operation has invalid inputs", __func__);
539 }
540
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100541 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100542 if (!output)
543 {
544 return Fail("%s: Could not read output 0", __func__);
545 }
546
narpra012fb804a2018-10-22 14:52:32 +0100547 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100548 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
549
arovir01b0717b52018-09-05 17:03:25 +0100550 armnn::NormalizationDescriptor descriptor;
551
narpra012fb804a2018-10-22 14:52:32 +0100552 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100553 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100554 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100555
556 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100557 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
558 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
559 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
560 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100561 {
562 return Fail("%s: Operation has invalid inputs", __func__);
563 }
564
565 // ArmNN expects normSize to be the full size of the normalization
566 // window rather than the radius as in AndroidNN.
567 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
568
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100569 if (!IsLayerSupportedForAnyBackend(__func__,
570 armnn::IsNormalizationSupported,
571 data.m_Backends,
572 inputInfo,
573 outputInfo,
574 descriptor))
arovir01b0717b52018-09-05 17:03:25 +0100575 {
576 return false;
577 }
578
579
580 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
581 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100582 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100583
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100584 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100585}
586
587bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
588{
589 armnn::ActivationDescriptor desc;
590 desc.m_Function = armnn::ActivationFunction::Sigmoid;
591
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100592 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100593}
594
595bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
596{
597 // Inputs:
598 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
599 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100600 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100601 if (!input.IsValid())
602 {
603 return Fail("%s: Could not read input 0: input", __func__);
604 }
605 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100606 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100607 if (!outputStateIn.IsValid())
608 {
609 return Fail("%s: Could not read input 18: outputStateIn", __func__);
610 }
611 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100612 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100613 if (!cellStateIn.IsValid())
614 {
615 return Fail("%s: Could not read input 19: cellStateIn", __func__);
616 }
617
618 // Get the mandatory input tensors:
619 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
620 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100621 const ConstTensorPin inputToForgetWeightsPin =
622 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
623 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
624 // [num_units, input_size].
625 const ConstTensorPin inputToCellWeightsPin =
626 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100627 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
628 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100629 const ConstTensorPin inputToOutputWeightsPin =
630 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100631 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
632 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100633 const ConstTensorPin recurrentToForgetWeightsPin =
634 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100635 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
636 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100637 const ConstTensorPin recurrentToCellWeightsPin =
638 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100639 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
640 // [num_units, output_size].
641 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100642 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100643 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100644 const ConstTensorPin forgetGateBiasPin =
645 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100646 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100647 const ConstTensorPin cellBiasPin =
648 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100649 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100650 const ConstTensorPin outputGateBiasPin =
651 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100652
653 if (!inputToForgetWeightsPin.IsValid() ||
654 !inputToCellWeightsPin.IsValid() ||
655 !inputToOutputWeightsPin.IsValid() ||
656 !recurrentToForgetWeightsPin.IsValid() ||
657 !recurrentToCellWeightsPin.IsValid() ||
658 !recurrentToOutputWeightsPin.IsValid() ||
659 !forgetGateBiasPin.IsValid() ||
660 !cellBiasPin.IsValid() ||
661 !outputGateBiasPin.IsValid())
662 {
663 return Fail("%s: Operation has invalid tensor inputs", __func__);
664 }
665
666 // Get the optional input tensors:
667 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
668 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100669 const ConstTensorPin inputToInputWeightsPin =
670 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
671 1,
672 model,
673 data,
674 g_DontPermute,
675 nullptr,
676 true);
677
arovir01b0717b52018-09-05 17:03:25 +0100678 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
679 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
680 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100681 const ConstTensorPin recurrentToInputWeightsPin =
682 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
683 5,
684 model,
685 data,
686 g_DontPermute,
687 nullptr,
688 true);
689
arovir01b0717b52018-09-05 17:03:25 +0100690 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100691 const ConstTensorPin cellToInputWeightsPin =
692 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
693 9,
694 model,
695 data,
696 g_DontPermute,
697 nullptr,
698 true);
699
arovir01b0717b52018-09-05 17:03:25 +0100700 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100701 const ConstTensorPin cellToForgetWeightsPin =
702 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
703 10,
704 model,
705 data,
706 g_DontPermute,
707 nullptr,
708 true);
709
arovir01b0717b52018-09-05 17:03:25 +0100710 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100711 const ConstTensorPin cellToOutputWeightsPin =
712 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
713 11,
714 model,
715 data,
716 g_DontPermute,
717 nullptr,
718 true);
719
arovir01b0717b52018-09-05 17:03:25 +0100720 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100721 const ConstTensorPin inputGateBiasPin =
722 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
723 12,
724 model,
725 data,
726 g_DontPermute,
727 nullptr,
728 true);
729
arovir01b0717b52018-09-05 17:03:25 +0100730 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
731 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100732 const ConstTensorPin projectionWeightsPin =
733 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
734 16,
735 model,
736 data,
737 g_DontPermute,
738 nullptr,
739 true);
740
arovir01b0717b52018-09-05 17:03:25 +0100741 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100742 const ConstTensorPin projectionBiasPin =
743 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
744 17,
745 model,
746 data,
747 g_DontPermute,
748 nullptr,
749 true);
arovir01b0717b52018-09-05 17:03:25 +0100750
751 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
752 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
753 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
754 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
755 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
756 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
757 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
758 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
759 {
760 return Fail("%s: Operation has invalid tensor inputs", __func__);
761 }
762
763 // Get the mandatory input scalars (actually 1-D tensors of size 1):
764 // 20: The activation function: A value indicating the activation function:
765 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
766 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
767 // If set to 0.0 then clipping is disabled.
768 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
769 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
770 ActivationFn activation;
771 float cellClip;
772 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100773 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
774 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
775 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100776 {
777 return Fail("%s: Operation has invalid scalar inputs", __func__);
778 }
779
780 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100781 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
782 // with CIFG, or [batch_size, num_units * 3] without CIFG.
783 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100784 if (!scratchBuffer)
785 {
786 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
787 }
788 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100789 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100790 if (!outputStateOut)
791 {
792 return Fail("%s: Could not read output 1: outputStateOut", __func__);
793 }
794 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100795 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100796 if (!cellStateOut)
797 {
798 return Fail("%s: Could not read output 2: cellStateOut", __func__);
799 }
800 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
801 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100802 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100803 if (!output)
804 {
805 return Fail("%s: Could not read output 3: output", __func__);
806 }
807
808 // set the params structure for the AddLstmLayer call
809 armnn::LstmInputParams params;
810 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
811 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
812 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
813 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
814 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
815 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
816 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
817 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
818 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
819 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
820 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
821 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
822 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
823 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
824 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
825 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
826 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
827
828 // set the layer descriptor
829 armnn::LstmDescriptor desc;
830 desc.m_ActivationFunc = activation;
831 desc.m_ClippingThresCell = cellClip;
832 desc.m_ClippingThresProj = projClip;
833 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
834 params.m_RecurrentToInputWeights == nullptr ||
835 params.m_InputGateBias == nullptr);
836 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
837 params.m_CellToOutputWeights != nullptr);
838 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
839
840 // validate the optional input groups
841 if (desc.m_CifgEnabled &&
842 (params.m_InputToInputWeights != nullptr ||
843 params.m_RecurrentToInputWeights != nullptr ||
844 params.m_InputGateBias != nullptr))
845 {
846 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
847 " and input gate bias must be provided", __func__);
848 }
849
850 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
851 {
852 return Fail("%s: projection bias should not be provided without projection weights", __func__);
853 }
854
855 if (desc.m_PeepholeEnabled &&
856 (params.m_CellToForgetWeights == nullptr ||
857 params.m_CellToOutputWeights == nullptr ||
858 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
859 {
860 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
861 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
862 }
863
864 // Check if the layer is supported
865 // Inputs
866 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
867 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
868 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
869
870 // Outputs
871 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
872 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
873 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
874 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
875
876 // Basic parameters
877 const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
878 const armnn::TensorInfo& inputToCellWeights = params.m_InputToCellWeights->GetInfo();
879 const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
880 const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
881 const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
882 const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
883 const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
884 const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
885 const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
886
887 //Optional parameters
888 const armnn::TensorInfo* inputToInputWeights = nullptr;
889 const armnn::TensorInfo* recurrentToInputWeights = nullptr;
890 const armnn::TensorInfo* cellToInputWeights = nullptr;
891 const armnn::TensorInfo* inputGateBias = nullptr;
892 const armnn::TensorInfo* projectionWeights = nullptr;
893 const armnn::TensorInfo* projectionBias = nullptr;
894 const armnn::TensorInfo* cellToForgetWeights = nullptr;
895 const armnn::TensorInfo* cellToOutputWeights = nullptr;
896
897 if(!desc.m_CifgEnabled)
898 {
899 inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
900 recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
901 if (params.m_CellToInputWeights != nullptr)
902 {
903 cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
904 }
905 inputGateBias = &(params.m_InputGateBias->GetInfo());
906 }
907
908 if(desc.m_ProjectionEnabled)
909 {
910 projectionWeights = &(params.m_ProjectionWeights->GetInfo());
911 if (params.m_ProjectionBias != nullptr)
912 {
913 projectionBias = &(params.m_ProjectionBias->GetInfo());
914 }
915 }
916
917 if(desc.m_PeepholeEnabled)
918 {
919 cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
920 cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
921 }
922
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100923 if (!IsLayerSupportedForAnyBackend(__func__,
924 armnn::IsLstmSupported,
925 data.m_Backends,
926 inputInfo,
927 outputStateInInfo,
928 cellStateInInfo,
929 scratchBufferInfo,
930 outputStateOutInfo,
931 cellStateOutInfo,
932 outputInfo,
933 desc,
934 inputToForgetWeights,
935 inputToCellWeights,
936 inputToOutputWeights,
937 recurrentToForgetWeights,
938 recurrentToCellWeights,
939 recurrentToOutputWeights,
940 forgetGateBias,
941 cellBias,
942 outputGateBias,
943 inputToInputWeights,
944 recurrentToInputWeights,
945 cellToInputWeights,
946 inputGateBias,
947 projectionWeights,
948 projectionBias,
949 cellToForgetWeights,
950 cellToOutputWeights))
arovir01b0717b52018-09-05 17:03:25 +0100951 {
952 return false;
953 }
954
955 // Add the layer
956 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
957
958 input.Connect(layer->GetInputSlot(0));
959 outputStateIn.Connect(layer->GetInputSlot(1));
960 cellStateIn.Connect(layer->GetInputSlot(2));
961
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100962 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
963 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
964 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
965 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100966}
967
968bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
969{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100970 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100971 if (!input.IsValid())
972 {
973 return Fail("%s: Operation has invalid inputs", __func__);
974 }
975
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100976 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100977 if (!output)
978 {
979 return Fail("%s: Could not read output 0", __func__);
980 }
981
982 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
983 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
984
Matteo Martincigh58f71092018-09-25 15:58:52 +0100985 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +0100986 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +0100987
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100988 if (!IsLayerSupportedForAnyBackend(__func__,
989 armnn::IsL2NormalizationSupported,
990 data.m_Backends,
991 inputInfo,
992 outputInfo,
993 desc))
arovir01b0717b52018-09-05 17:03:25 +0100994 {
995 return false;
996 }
997
Matteo Martincigh58f71092018-09-25 15:58:52 +0100998 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +0100999 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001000 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001001
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001002 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001003}
1004
1005bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1006{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001007 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001008}
1009
1010bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1011{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001012 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001013}
1014
1015bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1016{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001017 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1018 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001019
1020 if (!input0.IsValid() || !input1.IsValid())
1021 {
1022 return Fail("%s: Operation has invalid inputs", __func__);
1023 }
1024
1025 // The FuseActivation parameter is always the input index 2
1026 // and it should be optional
1027 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001028 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001029 {
1030 return Fail("%s: Operation has invalid inputs", __func__);
1031 }
1032
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001033 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001034
1035 if (outputOperand == nullptr)
1036 {
1037 return false;
1038 }
1039
1040 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1041
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001042 if (!IsLayerSupportedForAnyBackend(__func__,
1043 armnn::IsMultiplicationSupported,
1044 data.m_Backends,
1045 input0.GetTensorInfo(),
1046 input1.GetTensorInfo(),
1047 outInfo))
arovir01b0717b52018-09-05 17:03:25 +01001048 {
1049 return false;
1050 }
1051
1052 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1053 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1054
1055 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1056 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1057
1058 if (endLayer != nullptr)
1059 {
1060 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001061 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001062 }
1063 else
1064 {
1065 return Fail("%s: ProcessActivation failed", __func__);
1066 }
1067}
1068
1069bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1070{
1071 armnn::ActivationDescriptor desc;
1072 desc.m_Function = armnn::ActivationFunction::ReLu;
1073
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001074 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001075}
1076
1077bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1078{
1079 armnn::ActivationDescriptor desc;
1080 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1081 desc.m_A = 1.0f;
1082 desc.m_B = -1.0f;
1083
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001084 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001085}
1086
1087bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1088{
1089 armnn::ActivationDescriptor desc;
1090 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1091 desc.m_A = 6.0f;
1092
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001093 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001094}
1095
1096bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1097{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001098 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001099 if (!input.IsValid())
1100 {
1101 return Fail("%s: Operation has invalid inputs", __func__);
1102 }
1103
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001104 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001105 if (!outputOperand)
1106 {
1107 return Fail("%s: Operation has no outputs", __func__);
1108 }
1109
1110 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1111
1112 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001113 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001114 {
1115 return Fail("%s: Operation has invalid inputs", __func__);
1116 }
1117
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001118 if (!IsLayerSupportedForAnyBackend(__func__,
1119 armnn::IsSoftmaxSupported,
1120 data.m_Backends,
1121 input.GetTensorInfo(),
1122 outInfo,
1123 desc))
arovir01b0717b52018-09-05 17:03:25 +01001124 {
1125 return false;
1126 }
1127
1128 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1129 assert(layer != nullptr);
1130 input.Connect(layer->GetInputSlot(0));
1131
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001132 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001133}
1134
Keith Davisa6bc52f2019-06-26 09:39:49 +01001135bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1136{
1137 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1138
1139 if (!input.IsValid() )
1140 {
1141 return Fail("%s: Operation has invalid inputs", __func__);
1142 }
1143
1144 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1145 unsigned int rank = inputInfo.GetNumDimensions();
1146
1147 if (rank != 4)
1148 {
1149 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1150 }
1151
1152 armnn::SpaceToDepthDescriptor desc;
1153 bool dataLayoutCheck;
1154
1155 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1156
1157 if (desc.m_BlockSize <= 1)
1158 {
1159 return Fail("%s: Block size must be at least 1 in all dimensions");
1160 }
1161
1162 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1163 if (!output)
1164 {
1165 return Fail("%s: Could not read output 0", __func__);
1166 }
1167
1168 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1169 if (!IsLayerSupportedForAnyBackend(__func__,
1170 armnn::IsSpaceToDepthSupported,
1171 data.m_Backends,
1172 inputInfo,
1173 outputInfo,
1174 desc))
1175 {
1176 return false;
1177 }
1178
1179 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1180 assert(layer != nullptr);
1181 input.Connect(layer->GetInputSlot(0));
1182
1183 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1184}
1185
arovir01b0717b52018-09-05 17:03:25 +01001186bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1187{
1188 armnn::ActivationDescriptor desc;
1189 desc.m_Function = armnn::ActivationFunction::TanH;
1190 desc.m_A = 1.0f; // android nn does not support tanH parameters
1191 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1192
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001193 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001194}
1195
1196bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1197{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001198 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1199 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1200 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001201
1202 if (inputOperand == nullptr
1203 || requestedShapeOperand == nullptr
1204 || outputOperand == nullptr)
1205 {
1206 return Fail("%s: Operation has invalid inputs", __func__);
1207 }
1208
1209
1210 if (requestedShapeOperand->dimensions.size() != 1)
1211 {
1212 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1213 __func__, requestedShapeOperand->dimensions.size());
1214 }
1215
1216 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001217 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001218 {
1219 return Fail("%s: Could not read values of input 1", __func__);
1220 }
1221
1222 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1223
1224 Shape requestedShape;
1225 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1226 // function that resolves these values into a fully specified tensor shape.
1227 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1228 {
1229 return Fail("%s: Failed to resolve the requested shape", __func__);
1230 }
1231
1232 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1233 if (!SameShape(requestedShape, outputOperandShape))
1234 {
1235 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1236 }
1237
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001238 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001239 if (!input.IsValid())
1240 {
1241 return Fail("%s: Could not read input 0", __func__);
1242 }
1243
arovir01b0717b52018-09-05 17:03:25 +01001244 armnn::ReshapeDescriptor reshapeDescriptor;
1245 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1246 requestedShape.dimensions.data());
1247
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001248 if (!IsLayerSupportedForAnyBackend(__func__,
1249 armnn::IsReshapeSupported,
1250 data.m_Backends,
1251 input.GetTensorInfo(),
1252 reshapeDescriptor))
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001253 {
1254 return false;
1255 }
1256
arovir01b0717b52018-09-05 17:03:25 +01001257 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1258 assert(layer != nullptr);
1259 input.Connect(layer->GetInputSlot(0));
1260
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001261 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001262}
1263
1264bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1265{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001266 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001267 if (!input.IsValid())
1268 {
1269 return Fail("%s: Could not read input 0", __func__);
1270 }
1271
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001272 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001273 if (!output)
1274 {
1275 return Fail("%s: Could not read output 0", __func__);
1276 }
1277
1278 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1279 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1280
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001281 armnn::ResizeDescriptor desc;
1282 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001283 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001284
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001285 if (!IsLayerSupportedForAnyBackend(__func__,
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001286 armnn::IsResizeSupported,
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001287 data.m_Backends,
1288 inputInfo,
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001289 outputInfo,
1290 desc))
arovir01b0717b52018-09-05 17:03:25 +01001291 {
1292 return false;
1293 }
1294
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001295 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1296 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001297 {
1298 return Fail("%s: Operation has invalid inputs", __func__);
1299 }
1300
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001301 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001302
arovir01b0717b52018-09-05 17:03:25 +01001303 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001304
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001305 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1306 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001307
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001308 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001309
1310}
1311
1312} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001313} // namespace armnn_driver