blob: 8dd603a6b940a48651914898daa4da9227fd67bc [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +010011#include "OutputShapeUtils.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +010012#include "Utils.hpp"
arovir015602b192018-10-04 16:15:02 +010013
arovir01b0717b52018-09-05 17:03:25 +010014namespace armnn_driver
15{
16namespace hal_1_0
17{
18
19bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
20{
21 switch (operation.type)
22 {
23 case V1_0::OperationType::ADD:
24 return ConvertAdd(operation, model, data);
25 case V1_0::OperationType::AVERAGE_POOL_2D:
26 return ConvertAveragePool2d(operation, model, data);
27 case V1_0::OperationType::CONCATENATION:
28 return ConvertConcatenation(operation, model, data);
29 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010030 return ConvertConv2d(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010031 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010032 return ConvertDepthwiseConv2d(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010033 case V1_0::OperationType::DEQUANTIZE:
34 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010035 case V1_0::OperationType::FLOOR:
36 return ConvertFloor(operation, model, data);
37 case V1_0::OperationType::FULLY_CONNECTED:
38 return ConvertFullyConnected(operation, model, data);
39 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
40 return ConvertLocalResponseNormalization(operation, model, data);
41 case V1_0::OperationType::LOGISTIC:
42 return ConvertLogistic(operation, model, data);
43 case V1_0::OperationType::LSTM:
44 return ConvertLstm(operation, model, data);
45 case V1_0::OperationType::L2_NORMALIZATION:
46 return ConvertL2Normalization(operation, model, data);
47 case V1_0::OperationType::L2_POOL_2D:
48 return ConvertL2Pool2d(operation, model, data);
49 case V1_0::OperationType::MAX_POOL_2D:
50 return ConvertMaxPool2d(operation, model, data);
51 case V1_0::OperationType::MUL:
52 return ConvertMul(operation, model, data);
53 case V1_0::OperationType::RELU:
54 return ConvertReLu(operation, model, data);
55 case V1_0::OperationType::RELU1:
56 return ConvertReLu1(operation, model, data);
57 case V1_0::OperationType::RELU6:
58 return ConvertReLu6(operation, model, data);
59 case V1_0::OperationType::SOFTMAX:
60 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010061 case V1_0::OperationType::SPACE_TO_DEPTH:
62 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010063 case V1_0::OperationType::TANH:
64 return ConvertTanH(operation, model, data);
65 case V1_0::OperationType::RESHAPE:
66 return ConvertReshape(operation, model, data);
67 case V1_0::OperationType::RESIZE_BILINEAR:
68 return ConvertResizeBilinear(operation, model, data);
69 default:
70 return Fail("%s: Operation type %s not supported in ArmnnDriver",
71 __func__, toString(operation.type).c_str());
72 }
73}
74
Mike Kellyb5fdf382019-06-11 16:35:25 +010075bool HalPolicy::ValidateConv2dParameters(const Operation &operation)
76{
77 if (operation.inputs.size() != 10 && operation.inputs.size() != 7)
78 {
79 return Fail("%s: Unsupported number of operation inputs", __func__);
80 }
81 return true;
82}
83
84bool HalPolicy::ValidateDepthwiseConv2dParameters(const Operation &operation)
85{
86 if (operation.inputs.size() != 11 && operation.inputs.size() != 8)
87 {
88 return Fail("%s: Unsupported number of operation inputs", __func__);
89 }
90 return true;
91}
92
arovir01b0717b52018-09-05 17:03:25 +010093bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
94{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010095 ALOGV("hal_1_0::HalPolicy::ConvertAdd()");
96
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010097 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
98 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010099
100 if (!input0.IsValid() || !input1.IsValid())
101 {
102 return Fail("%s: Operation has invalid inputs", __func__);
103 }
104
105 // The FuseActivation parameter is always the input index 2
106 // and it should be optional
107 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100108 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100109 {
110 return Fail("%s: Operation has invalid inputs", __func__);
111 }
112
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100113 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100114 if (!outputOperand)
115 {
116 return false;
117 }
118
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100119 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
120 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
121
122 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
123 if (IsDynamicTensor(outputInfo))
124 {
125 return Fail("%s: Dynamic output shapes are not supported in this HAL version", __func__);
126 }
arovir01b0717b52018-09-05 17:03:25 +0100127
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100128 bool isSupported = false;
129 FORWARD_LAYER_SUPPORT_FUNC(__func__,
130 IsAdditionSupported,
131 data.m_Backends,
132 isSupported,
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100133 inputInfo0,
134 inputInfo1,
135 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100136 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100137 {
138 return false;
139 }
140
141 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100142 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
arovir01b0717b52018-09-05 17:03:25 +0100143
144 if (endLayer != nullptr)
145 {
146 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100147 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100148 }
149 else
150 {
151 return Fail("%s: ProcessActivation failed", __func__);
152 }
153}
154
155bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
156{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100157 ALOGV("hal_1_0::HalPolicy::ConvertAveragePool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100158 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100159}
160
161bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
162{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100163 ALOGV("hal_1_0::HalPolicy::ConvertConcatenation()");
164
arovir01b0717b52018-09-05 17:03:25 +0100165 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
166 if (operation.inputs.size() <= 1)
167 {
168 return Fail("%s: Operation has insufficient arguments", __func__);
169 }
170
171 // Get inputs and outputs
172 const std::size_t numInputTensors = operation.inputs.size() - 1;
173
174 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100175 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100176 {
177 return Fail("%s: Operation has invalid inputs", __func__);
178 }
179
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100180 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100181 if (!outputOperand)
182 {
183 return Fail("%s: Operation has no outputs", __func__);
184 }
185
186
187 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
188 armnn::TensorShape outputShape = outputInfo.GetShape();
189
190 //
191 // handle negative concat dims along the lines of tensorflow as described here:
192 // https://www.tensorflow.org/api_docs/python/tf/concat
193 // "negative axis refers to axis + rank(values)-th dimension"
194 //
195 if (concatDim < 0)
196 {
197 concatDim += outputShape.GetNumDimensions();
198 }
199
200 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
201 {
202 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
203 }
204
205 std::vector<LayerInputHandle> inputHandles;
206 std::vector<armnn::TensorShape> inputShapes;
207
208 inputHandles.reserve(numInputTensors);
209 inputShapes.reserve(numInputTensors);
210
211 bool inputsHaveBeenReshaped = false;
212 unsigned int tensorDimensionsAdded = 0;
213
214 for (uint32_t i = 0; i < numInputTensors; ++i)
215 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100216 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100217 if (!operand)
218 {
219 return Fail("%s: Operation has invalid inputs", __func__);
220 }
221
222 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100223 LayerInputHandle operandInputHandle =
224 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100225
226 if (operandShape.GetNumDimensions() == 0)
227 {
228 return Fail("%s: Operands with rank 0 are not supported", __func__);
229 }
230
231 if (RequiresReshape(operandShape))
232 {
233 inputsHaveBeenReshaped = true;
234
235 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
236
237 // Expand the tensor to three dimensions
238 if (operandShape.GetNumDimensions() == 2)
239 {
240 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
241 tensorDimensionsAdded = 1;
242 }
243 else
244 {
245 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
246 tensorDimensionsAdded = 2;
247 }
248
249 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
250 *data.m_Network,
251 operandInputHandle,
252 reshapeInfo
253 );
254
255 // Point to the reshape operation rather then the input operation
256 operandShape = reshapeInfo.GetShape();
257 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
258 }
259
260 inputShapes.emplace_back(operandShape);
261 inputHandles.emplace_back(operandInputHandle);
262
263 if (!inputHandles.back().IsValid())
264 {
265 return Fail("%s: Operation has invalid inputs", __func__);
266 }
267 }
268
269 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
270
271 if (inputsHaveBeenReshaped)
272 {
273 // Adjust the concatenation dimension by the amount of dimensions added (if any)
274 concatDim += tensorDimensionsAdded;
275
276 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
277 if (tensorDimensionsAdded == 1)
278 {
279 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
280 }
281 else if (tensorDimensionsAdded == 2)
282 {
narpra01f176d5a2018-11-18 20:17:48 +0000283 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100284 }
285 }
286
narpra01f176d5a2018-11-18 20:17:48 +0000287 // Check if permutations is required and get the pair of permutations required for the concatenation.
288 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100289 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
290 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
291
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100292 bool needPermute =
293 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100294
narpra01f176d5a2018-11-18 20:17:48 +0000295 if (needPermute)
296 {
297 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
298 }
299
arovir01b0717b52018-09-05 17:03:25 +0100300 outputInfo.SetShape(outputShape);
301
302 // this is no-op for identity swizzles, otherwise it replaces both
303 // the handles and shapes with the swizzled layer output handles and shapes
304 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
305
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100306 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
307 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000308
arovir01b0717b52018-09-05 17:03:25 +0100309 try
310 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100311 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000312 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100313 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100314 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100315 }
316 catch (const armnn::Exception& error)
317 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100318 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100319 }
320
321 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000322 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100323 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
324 {
325 return Fail("%s: Error validating the output shape for concat", __func__);
326 }
327
328 std::vector<const armnn::TensorInfo*> inputTensorInfos;
329 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
330 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100331
332 bool isSupported = false;
333 FORWARD_LAYER_SUPPORT_FUNC(__func__,
334 IsConcatSupported,
335 data.m_Backends,
336 isSupported,
337 inputTensorInfos,
338 outputInfo,
339 concatDescriptor);
340 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100341 {
342 return false;
343 }
344
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100345 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100346 assert(layer != nullptr);
347 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
348
349 // Connect inputs to the layer
350 const int numInputSlots = layer->GetNumInputSlots();
351 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
352 for (int i = 0; i < numInputSlots; ++i)
353 {
354 // connect the input directly to the merge (concat) layer
355 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
356 }
357
narpra01f176d5a2018-11-18 20:17:48 +0000358 if (needPermute)
359 {
360 // Add permutation layer and connect the output to it, the permutation becomes the output layer
361 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
362 layer->GetOutputSlot(0),
363 permutationPair.second);
364 layer = &deswizzleLayer;
365 }
arovir01b0717b52018-09-05 17:03:25 +0100366
367 if (inputsHaveBeenReshaped)
368 {
369 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
370
371 // Undo the reshape knowing the amount of dimensions added
372 if (tensorDimensionsAdded == 1)
373 {
374 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
375 afterConcatInfo.GetShape()[2] }));
376 }
377 else if (tensorDimensionsAdded == 2)
378 {
narpra01f176d5a2018-11-18 20:17:48 +0000379 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100380 }
381
382 layer = &AddReshapeLayer(
383 *data.m_Network,
384 layer->GetOutputSlot(0),
385 afterConcatInfo
386 );
387 }
388
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100389 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100390}
391
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100392bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
393{
394 ALOGV("hal_1_0::HalPolicy::ConvertConv2d()");
395 return ValidateConv2dParameters(operation) && ::ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
396}
397
398bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
399{
400 ALOGV("hal_1_0::HalPolicy::ConvertDepthwiseConv2d()");
401 return ValidateDepthwiseConv2dParameters(operation) &&
402 ::ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
403}
404
David Monahanacf479a2019-05-29 14:27:04 +0100405bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
406{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100407 ALOGV("hal_1_0::HalPolicy::ConvertDequantize()");
David Monahanacf479a2019-05-29 14:27:04 +0100408
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100409 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100410 if (!input.IsValid())
411 {
412 return Fail("%s: Operation has invalid input", __func__);
413 }
414
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100415 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100416 if (!outputOperand)
417 {
418 return Fail("%s: Operation has invalid outputs", __func__);
419 }
420
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100421 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100422 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100423 {
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100424 ALOGD("Output shape not set, will infer from input");
425 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100426 }
427
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100428 bool isSupported = false;
429 FORWARD_LAYER_SUPPORT_FUNC(__func__,
430 IsDequantizeSupported,
431 data.m_Backends,
432 isSupported,
433 input.GetTensorInfo(),
434 GetTensorInfoForOperand(*outputOperand));
435 if (!isSupported)
David Monahanacf479a2019-05-29 14:27:04 +0100436 {
437 return false;
438 }
439
440 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
441 assert(layer != nullptr);
442 input.Connect(layer->GetInputSlot(0));
443
Aron Virginas-Tarc16c9c12019-07-11 11:14:11 +0100444 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
445 0,
446 *layer,
447 model,
448 data,
449 armnn::Optional<armnn::TensorInfo>(outputInfo));
David Monahanacf479a2019-05-29 14:27:04 +0100450}
451
arovir01b0717b52018-09-05 17:03:25 +0100452bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
453{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100454 ALOGV("hal_1_0::HalPolicy::ConvertFloor()");
455
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100456 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100457 if (!input.IsValid())
458 {
459 return Fail("%s: Operation has invalid inputs", __func__);
460 }
461
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100462 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100463 if (!outputOperand)
464 {
465 return Fail("%s: Operation has invalid outputs", __func__);
466 }
467
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100468 bool isSupported = false;
469 FORWARD_LAYER_SUPPORT_FUNC(__func__,
470 IsFloorSupported,
471 data.m_Backends,
472 isSupported,
473 input.GetTensorInfo(),
474 GetTensorInfoForOperand(*outputOperand));
475 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100476 {
477 return false;
478 }
479
480 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
481 assert(layer != nullptr);
482 input.Connect(layer->GetInputSlot(0));
483
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100484 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100485}
486
487bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
488{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100489 ALOGV("hal_1_0::HalPolicy::ConvertFullyConnected()");
490
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100491 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100492 if (!input.IsValid())
493 {
494 return Fail("%s: Operation has invalid inputs", __func__);
495 }
496
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100497 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100498 if (!output)
499 {
500 return Fail("%s: Could not read output 0", __func__);
501 }
502
503 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100504 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
505
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100506 if (IsDynamicTensor(outputInfo))
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100507 {
508 ALOGD("Output shape not set, will infer from inputs");
509 outputInfo.SetShape(inputInfo.GetShape());
510 }
arovir01b0717b52018-09-05 17:03:25 +0100511
512 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100513 ConstTensorPin weightsPin =
514 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
515 ConstTensorPin biasPin =
516 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100517
518 if (!weightsPin.IsValid() || !biasPin.IsValid())
519 {
520 return Fail("%s: Operation has invalid inputs", __func__);
521 }
522
523 armnn::ConstTensor weights = weightsPin.GetConstTensor();
524 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100525 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100526
527 try
arovir01b0717b52018-09-05 17:03:25 +0100528 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100529 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
530 } catch (const std::exception &e) {
531 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100532 }
533
534 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
535 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
536
537 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100538 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100539 {
540 return Fail("%s: Operation has invalid inputs", __func__);
541 }
542
543 armnn::FullyConnectedDescriptor desc;
544 desc.m_TransposeWeightMatrix = true;
545 desc.m_BiasEnabled = true;
546
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100547 bool isSupported = false;
548 FORWARD_LAYER_SUPPORT_FUNC(__func__,
549 IsFullyConnectedSupported,
550 data.m_Backends,
551 isSupported,
552 reshapedInfo,
553 outputInfo,
554 weights.GetInfo(),
555 bias.GetInfo(),
556 desc);
557 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100558 {
559 return false;
560 }
561
Matteo Martincighba01f372019-05-14 13:28:21 +0100562 armnn::IConnectableLayer* startLayer =
563 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100564 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
565
566 if (endLayer != nullptr)
567 {
568 if (inputInfo.GetNumDimensions() > 2U)
569 {
570 armnn::ReshapeDescriptor reshapeDescriptor;
571 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
572
573 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
574 assert(reshapeLayer != nullptr);
575 input.Connect(reshapeLayer->GetInputSlot(0));
576 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
577 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
578 }
579 else
580 {
581 input.Connect(startLayer->GetInputSlot(0));
582 }
583
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100584 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
585 0,
586 *endLayer,
587 model,
588 data,
589 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +0100590 }
591 else
592 {
593 return Fail("%s: ProcessActivation failed", __func__);
594 }
595}
596
597bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
598 const Model& model,
599 ConversionData& data)
600{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100601 ALOGV("hal_1_0::HalPolicy::ConvertLocalResponseNormalization()");
602
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100603 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100604 if (!input.IsValid())
605 {
606 return Fail("%s: Operation has invalid inputs", __func__);
607 }
608
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100609 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100610 if (!output)
611 {
612 return Fail("%s: Could not read output 0", __func__);
613 }
614
narpra012fb804a2018-10-22 14:52:32 +0100615 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100616 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
617
arovir01b0717b52018-09-05 17:03:25 +0100618 armnn::NormalizationDescriptor descriptor;
619
narpra012fb804a2018-10-22 14:52:32 +0100620 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100621 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
narpra012fb804a2018-10-22 14:52:32 +0100622 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100623
624 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100625 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
626 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
627 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
628 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100629 {
630 return Fail("%s: Operation has invalid inputs", __func__);
631 }
632
633 // ArmNN expects normSize to be the full size of the normalization
634 // window rather than the radius as in AndroidNN.
635 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
636
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100637 bool isSupported = false;
638 FORWARD_LAYER_SUPPORT_FUNC(__func__,
639 IsNormalizationSupported,
640 data.m_Backends,
641 isSupported,
642 inputInfo,
643 outputInfo,
644 descriptor);
645 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100646 {
647 return false;
648 }
649
650
651 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
652 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100653 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100654
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100655 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100656}
657
658bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
659{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100660 ALOGV("hal_1_0::HalPolicy::ConvertLogistic()");
661
arovir01b0717b52018-09-05 17:03:25 +0100662 armnn::ActivationDescriptor desc;
663 desc.m_Function = armnn::ActivationFunction::Sigmoid;
664
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100665 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100666}
667
668bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
669{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100670 ALOGV("hal_1_0::HalPolicy::ConvertLstm()");
671
arovir01b0717b52018-09-05 17:03:25 +0100672 // Inputs:
673 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
674 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100675 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100676 if (!input.IsValid())
677 {
678 return Fail("%s: Could not read input 0: input", __func__);
679 }
680 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100681 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100682 if (!outputStateIn.IsValid())
683 {
684 return Fail("%s: Could not read input 18: outputStateIn", __func__);
685 }
686 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100687 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100688 if (!cellStateIn.IsValid())
689 {
690 return Fail("%s: Could not read input 19: cellStateIn", __func__);
691 }
692
693 // Get the mandatory input tensors:
694 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
695 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100696 const ConstTensorPin inputToForgetWeightsPin =
697 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
698 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
699 // [num_units, input_size].
700 const ConstTensorPin inputToCellWeightsPin =
701 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100702 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
703 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100704 const ConstTensorPin inputToOutputWeightsPin =
705 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100706 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
707 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100708 const ConstTensorPin recurrentToForgetWeightsPin =
709 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100710 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
711 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100712 const ConstTensorPin recurrentToCellWeightsPin =
713 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100714 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
715 // [num_units, output_size].
716 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100717 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100718 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100719 const ConstTensorPin forgetGateBiasPin =
720 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100721 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100722 const ConstTensorPin cellBiasPin =
723 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100724 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100725 const ConstTensorPin outputGateBiasPin =
726 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100727
728 if (!inputToForgetWeightsPin.IsValid() ||
729 !inputToCellWeightsPin.IsValid() ||
730 !inputToOutputWeightsPin.IsValid() ||
731 !recurrentToForgetWeightsPin.IsValid() ||
732 !recurrentToCellWeightsPin.IsValid() ||
733 !recurrentToOutputWeightsPin.IsValid() ||
734 !forgetGateBiasPin.IsValid() ||
735 !cellBiasPin.IsValid() ||
736 !outputGateBiasPin.IsValid())
737 {
738 return Fail("%s: Operation has invalid tensor inputs", __func__);
739 }
740
741 // Get the optional input tensors:
742 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
743 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100744 const ConstTensorPin inputToInputWeightsPin =
745 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
746 1,
747 model,
748 data,
749 g_DontPermute,
750 nullptr,
751 true);
752
arovir01b0717b52018-09-05 17:03:25 +0100753 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
754 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
755 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100756 const ConstTensorPin recurrentToInputWeightsPin =
757 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
758 5,
759 model,
760 data,
761 g_DontPermute,
762 nullptr,
763 true);
764
arovir01b0717b52018-09-05 17:03:25 +0100765 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100766 const ConstTensorPin cellToInputWeightsPin =
767 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
768 9,
769 model,
770 data,
771 g_DontPermute,
772 nullptr,
773 true);
774
arovir01b0717b52018-09-05 17:03:25 +0100775 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100776 const ConstTensorPin cellToForgetWeightsPin =
777 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
778 10,
779 model,
780 data,
781 g_DontPermute,
782 nullptr,
783 true);
784
arovir01b0717b52018-09-05 17:03:25 +0100785 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100786 const ConstTensorPin cellToOutputWeightsPin =
787 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
788 11,
789 model,
790 data,
791 g_DontPermute,
792 nullptr,
793 true);
794
arovir01b0717b52018-09-05 17:03:25 +0100795 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100796 const ConstTensorPin inputGateBiasPin =
797 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
798 12,
799 model,
800 data,
801 g_DontPermute,
802 nullptr,
803 true);
804
arovir01b0717b52018-09-05 17:03:25 +0100805 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
806 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100807 const ConstTensorPin projectionWeightsPin =
808 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
809 16,
810 model,
811 data,
812 g_DontPermute,
813 nullptr,
814 true);
815
arovir01b0717b52018-09-05 17:03:25 +0100816 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100817 const ConstTensorPin projectionBiasPin =
818 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
819 17,
820 model,
821 data,
822 g_DontPermute,
823 nullptr,
824 true);
arovir01b0717b52018-09-05 17:03:25 +0100825
826 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
827 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
828 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
829 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
830 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
831 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
832 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
833 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
834 {
835 return Fail("%s: Operation has invalid tensor inputs", __func__);
836 }
837
838 // Get the mandatory input scalars (actually 1-D tensors of size 1):
839 // 20: The activation function: A value indicating the activation function:
840 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
841 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
842 // If set to 0.0 then clipping is disabled.
843 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
844 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
845 ActivationFn activation;
846 float cellClip;
847 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100848 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
849 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
850 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100851 {
852 return Fail("%s: Operation has invalid scalar inputs", __func__);
853 }
854
855 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100856 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
857 // with CIFG, or [batch_size, num_units * 3] without CIFG.
858 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100859 if (!scratchBuffer)
860 {
861 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
862 }
863 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100864 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100865 if (!outputStateOut)
866 {
867 return Fail("%s: Could not read output 1: outputStateOut", __func__);
868 }
869 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100870 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100871 if (!cellStateOut)
872 {
873 return Fail("%s: Could not read output 2: cellStateOut", __func__);
874 }
875 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
876 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100877 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100878 if (!output)
879 {
880 return Fail("%s: Could not read output 3: output", __func__);
881 }
882
883 // set the params structure for the AddLstmLayer call
884 armnn::LstmInputParams params;
885 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
886 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
887 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
888 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
889 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
890 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
891 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
892 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
893 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
894 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
895 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
896 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
897 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
898 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
899 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
900 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
901 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
902
903 // set the layer descriptor
904 armnn::LstmDescriptor desc;
905 desc.m_ActivationFunc = activation;
906 desc.m_ClippingThresCell = cellClip;
907 desc.m_ClippingThresProj = projClip;
908 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
909 params.m_RecurrentToInputWeights == nullptr ||
910 params.m_InputGateBias == nullptr);
911 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
912 params.m_CellToOutputWeights != nullptr);
913 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
914
915 // validate the optional input groups
916 if (desc.m_CifgEnabled &&
917 (params.m_InputToInputWeights != nullptr ||
918 params.m_RecurrentToInputWeights != nullptr ||
919 params.m_InputGateBias != nullptr))
920 {
921 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
922 " and input gate bias must be provided", __func__);
923 }
924
925 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
926 {
927 return Fail("%s: projection bias should not be provided without projection weights", __func__);
928 }
929
930 if (desc.m_PeepholeEnabled &&
931 (params.m_CellToForgetWeights == nullptr ||
932 params.m_CellToOutputWeights == nullptr ||
933 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
934 {
935 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
936 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
937 }
938
939 // Check if the layer is supported
940 // Inputs
941 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
942 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
943 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
944
945 // Outputs
946 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
947 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
948 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
949 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
950
951 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100952 armnn::LstmInputParamsInfo paramsInfo;
953 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
954 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
955 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
956 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
957 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
958 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
959 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
960 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
961 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100962
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100963 // Optional parameters
arovir01b0717b52018-09-05 17:03:25 +0100964 if(!desc.m_CifgEnabled)
965 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100966 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
967 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100968 if (params.m_CellToInputWeights != nullptr)
969 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100970 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100971 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100972 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100973 }
974
975 if(desc.m_ProjectionEnabled)
976 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100977 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100978 if (params.m_ProjectionBias != nullptr)
979 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100980 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100981 }
982 }
983
984 if(desc.m_PeepholeEnabled)
985 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100986 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
987 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100988 }
989
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100990 bool isSupported = false;
991 FORWARD_LAYER_SUPPORT_FUNC(__func__,
992 IsLstmSupported,
993 data.m_Backends,
994 isSupported,
995 inputInfo,
996 outputStateInInfo,
997 cellStateInInfo,
998 scratchBufferInfo,
999 outputStateOutInfo,
1000 cellStateOutInfo,
1001 outputInfo,
1002 desc,
1003 paramsInfo);
1004 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001005 {
1006 return false;
1007 }
1008
1009 // Add the layer
1010 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1011
1012 input.Connect(layer->GetInputSlot(0));
1013 outputStateIn.Connect(layer->GetInputSlot(1));
1014 cellStateIn.Connect(layer->GetInputSlot(2));
1015
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001016 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1017 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1018 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1019 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +01001020}
1021
1022bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
1023{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001024 ALOGV("hal_1_0::HalPolicy::ConvertL2Normalization()");
1025
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001026 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001027 if (!input.IsValid())
1028 {
1029 return Fail("%s: Operation has invalid inputs", __func__);
1030 }
1031
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001032 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001033 if (!output)
1034 {
1035 return Fail("%s: Could not read output 0", __func__);
1036 }
1037
1038 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001039 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
arovir01b0717b52018-09-05 17:03:25 +01001040
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001041 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001042 {
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001043 ALOGD("Output shape not set, will infer from inputs");
1044 outputInfo.SetShape(inputInfo.GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001045 }
1046
Matteo Martincigh58f71092018-09-25 15:58:52 +01001047 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001048 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001049
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001050 bool isSupported = false;
1051 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1052 IsL2NormalizationSupported,
1053 data.m_Backends,
1054 isSupported,
1055 inputInfo,
1056 outputInfo,
1057 desc);
1058 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001059 {
1060 return false;
1061 }
1062
Matteo Martincigh58f71092018-09-25 15:58:52 +01001063 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001064 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001065 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001066
FinnWilliamsArm6bda94a2019-07-11 17:02:57 +01001067 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1068 0,
1069 *layer,
1070 model,
1071 data,
1072 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001073}
1074
1075bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1076{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001077 ALOGV("hal_1_0::HalPolicy::ConvertL2Pool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001078 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001079}
1080
1081bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1082{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001083 ALOGV("hal_1_0::HalPolicy::ConvertMaxPool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001084 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001085}
1086
1087bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1088{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001089 ALOGV("hal_1_0::HalPolicy::ConvertMul()");
1090
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001091 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1092 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001093
1094 if (!input0.IsValid() || !input1.IsValid())
1095 {
1096 return Fail("%s: Operation has invalid inputs", __func__);
1097 }
1098
1099 // The FuseActivation parameter is always the input index 2
1100 // and it should be optional
1101 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001102 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001103 {
1104 return Fail("%s: Operation has invalid inputs", __func__);
1105 }
1106
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001107 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001108
1109 if (outputOperand == nullptr)
1110 {
1111 return false;
1112 }
1113
1114 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1115
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001116 bool isSupported = false;
1117 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1118 IsMultiplicationSupported,
1119 data.m_Backends,
1120 isSupported,
1121 input0.GetTensorInfo(),
1122 input1.GetTensorInfo(),
1123 outInfo);
1124 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001125 {
1126 return false;
1127 }
1128
1129 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
1130 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
1131
1132 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1133 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1134
1135 if (endLayer != nullptr)
1136 {
1137 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001138 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001139 }
1140 else
1141 {
1142 return Fail("%s: ProcessActivation failed", __func__);
1143 }
1144}
1145
1146bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1147{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001148 ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
Sadik Armagan61113162019-07-25 09:09:40 +01001149 return ::ConvertReLu<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001150}
1151
1152bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1153{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001154 ALOGV("hal_1_0::HalPolicy::ConvertReLu1()");
Sadik Armagan61113162019-07-25 09:09:40 +01001155 return ::ConvertReLu1<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001156}
1157
1158bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1159{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001160 ALOGV("hal_1_0::HalPolicy::ConvertReLu6()");
Sadik Armagan61113162019-07-25 09:09:40 +01001161 return ::ConvertReLu6<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001162}
1163
1164bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1165{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001166 ALOGV("hal_1_0::HalPolicy::ConvertSoftmax()");
1167
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001168 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001169 if (!input.IsValid())
1170 {
1171 return Fail("%s: Operation has invalid inputs", __func__);
1172 }
1173
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001174 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001175 if (!outputOperand)
1176 {
1177 return Fail("%s: Operation has no outputs", __func__);
1178 }
1179
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001180 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001181 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001182 {
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001183 ALOGD("Output shape not set, will infer from input");
1184 outputInfo.SetShape(input.GetTensorInfo().GetShape());
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001185 }
arovir01b0717b52018-09-05 17:03:25 +01001186
1187 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001188 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001189 {
1190 return Fail("%s: Operation has invalid inputs", __func__);
1191 }
1192
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001193 bool isSupported = false;
1194 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1195 IsSoftmaxSupported,
1196 data.m_Backends,
1197 isSupported,
1198 input.GetTensorInfo(),
1199 outputInfo,
1200 desc);
1201 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001202 {
1203 return false;
1204 }
1205
1206 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1207 assert(layer != nullptr);
1208 input.Connect(layer->GetInputSlot(0));
1209
Aron Virginas-Tar9adbb352019-07-11 11:00:43 +01001210 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation,
1211 0,
1212 *layer,
1213 model,
1214 data,
1215 armnn::Optional<armnn::TensorInfo>(outputInfo));
arovir01b0717b52018-09-05 17:03:25 +01001216}
1217
Keith Davisa6bc52f2019-06-26 09:39:49 +01001218bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1219{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001220 ALOGV("hal_1_0::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +01001221
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001222 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001223 if (!input.IsValid() )
1224 {
1225 return Fail("%s: Operation has invalid inputs", __func__);
1226 }
1227
1228 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1229 unsigned int rank = inputInfo.GetNumDimensions();
1230
1231 if (rank != 4)
1232 {
1233 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1234 }
1235
1236 armnn::SpaceToDepthDescriptor desc;
1237 bool dataLayoutCheck;
1238
1239 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1240
1241 if (desc.m_BlockSize <= 1)
1242 {
1243 return Fail("%s: Block size must be at least 1 in all dimensions");
1244 }
1245
1246 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1247 if (!output)
1248 {
1249 return Fail("%s: Could not read output 0", __func__);
1250 }
1251
1252 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001253
1254 bool isSupported = false;
1255 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1256 IsSpaceToDepthSupported,
1257 data.m_Backends,
1258 isSupported,
1259 inputInfo,
1260 outputInfo,
1261 desc);
1262 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001263 {
1264 return false;
1265 }
1266
1267 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1268 assert(layer != nullptr);
1269 input.Connect(layer->GetInputSlot(0));
1270
1271 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1272}
1273
arovir01b0717b52018-09-05 17:03:25 +01001274bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1275{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001276 ALOGV("hal_1_0::HalPolicy::ConvertTanH()");
Sadik Armagan61113162019-07-25 09:09:40 +01001277 return ::ConvertTanH<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001278}
1279
1280bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1281{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001282 ALOGV("hal_1_0::HalPolicy::ConvertReshape()");
1283
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001284 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1285 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1286 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001287
1288 if (inputOperand == nullptr
1289 || requestedShapeOperand == nullptr
1290 || outputOperand == nullptr)
1291 {
1292 return Fail("%s: Operation has invalid inputs", __func__);
1293 }
1294
1295
1296 if (requestedShapeOperand->dimensions.size() != 1)
1297 {
1298 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1299 __func__, requestedShapeOperand->dimensions.size());
1300 }
1301
1302 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001303 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001304 {
1305 return Fail("%s: Could not read values of input 1", __func__);
1306 }
1307
1308 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1309
1310 Shape requestedShape;
1311 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1312 // function that resolves these values into a fully specified tensor shape.
1313 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1314 {
1315 return Fail("%s: Failed to resolve the requested shape", __func__);
1316 }
1317
1318 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1319 if (!SameShape(requestedShape, outputOperandShape))
1320 {
1321 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1322 }
1323
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001324 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001325 if (!input.IsValid())
1326 {
1327 return Fail("%s: Could not read input 0", __func__);
1328 }
1329
arovir01b0717b52018-09-05 17:03:25 +01001330 armnn::ReshapeDescriptor reshapeDescriptor;
1331 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1332 requestedShape.dimensions.data());
1333
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001334 bool isSupported = false;
1335 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1336 IsReshapeSupported,
1337 data.m_Backends,
1338 isSupported,
1339 input.GetTensorInfo(),
1340 reshapeDescriptor);
1341 if (!isSupported)
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001342 {
1343 return false;
1344 }
1345
arovir01b0717b52018-09-05 17:03:25 +01001346 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1347 assert(layer != nullptr);
1348 input.Connect(layer->GetInputSlot(0));
1349
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001350 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001351}
1352
1353bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1354{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001355 ALOGV("hal_1_0::HalPolicy::ConvertResizeBilinear()");
1356
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001357 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001358 if (!input.IsValid())
1359 {
1360 return Fail("%s: Could not read input 0", __func__);
1361 }
1362
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001363 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001364 if (!output)
1365 {
1366 return Fail("%s: Could not read output 0", __func__);
1367 }
1368
1369 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1370 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1371
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001372 armnn::ResizeDescriptor desc;
1373 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001374 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001375
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001376 bool isSupported = false;
1377 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1378 IsResizeSupported,
1379 data.m_Backends,
1380 isSupported,
1381 inputInfo,
1382 outputInfo,
1383 desc);
1384 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001385 {
1386 return false;
1387 }
1388
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001389 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1390 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001391 {
1392 return Fail("%s: Operation has invalid inputs", __func__);
1393 }
1394
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001395 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001396
arovir01b0717b52018-09-05 17:03:25 +01001397 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001398
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001399 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1400 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001401
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001402 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001403
1404}
1405
1406} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001407} // namespace armnn_driver