blob: a3c0e63b966845a5cb1ade7e703cf75f5acb42f3 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Matthew Benthamf61c2702019-04-23 16:43:27 +01008#include <armnn/Optional.hpp>
9
10#include "FullyConnected.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +010011#include "Utils.hpp"
arovir015602b192018-10-04 16:15:02 +010012
arovir01b0717b52018-09-05 17:03:25 +010013namespace armnn_driver
14{
15namespace hal_1_0
16{
17
18bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
19{
20 switch (operation.type)
21 {
22 case V1_0::OperationType::ADD:
23 return ConvertAdd(operation, model, data);
24 case V1_0::OperationType::AVERAGE_POOL_2D:
25 return ConvertAveragePool2d(operation, model, data);
26 case V1_0::OperationType::CONCATENATION:
27 return ConvertConcatenation(operation, model, data);
28 case V1_0::OperationType::CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010029 return ConvertConv2d(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010030 case V1_0::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010031 return ConvertDepthwiseConv2d(operation, model, data);
David Monahanacf479a2019-05-29 14:27:04 +010032 case V1_0::OperationType::DEQUANTIZE:
33 return ConvertDequantize(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010034 case V1_0::OperationType::FLOOR:
35 return ConvertFloor(operation, model, data);
36 case V1_0::OperationType::FULLY_CONNECTED:
37 return ConvertFullyConnected(operation, model, data);
38 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
39 return ConvertLocalResponseNormalization(operation, model, data);
40 case V1_0::OperationType::LOGISTIC:
41 return ConvertLogistic(operation, model, data);
42 case V1_0::OperationType::LSTM:
43 return ConvertLstm(operation, model, data);
44 case V1_0::OperationType::L2_NORMALIZATION:
45 return ConvertL2Normalization(operation, model, data);
46 case V1_0::OperationType::L2_POOL_2D:
47 return ConvertL2Pool2d(operation, model, data);
48 case V1_0::OperationType::MAX_POOL_2D:
49 return ConvertMaxPool2d(operation, model, data);
50 case V1_0::OperationType::MUL:
51 return ConvertMul(operation, model, data);
52 case V1_0::OperationType::RELU:
53 return ConvertReLu(operation, model, data);
54 case V1_0::OperationType::RELU1:
55 return ConvertReLu1(operation, model, data);
56 case V1_0::OperationType::RELU6:
57 return ConvertReLu6(operation, model, data);
58 case V1_0::OperationType::SOFTMAX:
59 return ConvertSoftmax(operation, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +010060 case V1_0::OperationType::SPACE_TO_DEPTH:
61 return ConvertSpaceToDepth(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010062 case V1_0::OperationType::TANH:
63 return ConvertTanH(operation, model, data);
64 case V1_0::OperationType::RESHAPE:
65 return ConvertReshape(operation, model, data);
66 case V1_0::OperationType::RESIZE_BILINEAR:
67 return ConvertResizeBilinear(operation, model, data);
68 default:
69 return Fail("%s: Operation type %s not supported in ArmnnDriver",
70 __func__, toString(operation.type).c_str());
71 }
72}
73
74bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
75{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +010076 ALOGV("hal_1_0::HalPolicy::ConvertAdd()");
77
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010078 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
79 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +010080
81 if (!input0.IsValid() || !input1.IsValid())
82 {
83 return Fail("%s: Operation has invalid inputs", __func__);
84 }
85
86 // The FuseActivation parameter is always the input index 2
87 // and it should be optional
88 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010089 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +010090 {
91 return Fail("%s: Operation has invalid inputs", __func__);
92 }
93
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +010094 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +010095 if (!outputOperand)
96 {
97 return false;
98 }
99
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100100 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
101 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
102
103 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
104 if (IsDynamicTensor(outputInfo))
105 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100106 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100107 }
arovir01b0717b52018-09-05 17:03:25 +0100108
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100109 bool isSupported = false;
110 FORWARD_LAYER_SUPPORT_FUNC(__func__,
111 IsAdditionSupported,
112 data.m_Backends,
113 isSupported,
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100114 inputInfo0,
115 inputInfo1,
116 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100117 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100118 {
119 return false;
120 }
121
122 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Aron Virginas-Tar4b862132019-07-24 16:26:57 +0100123 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
arovir01b0717b52018-09-05 17:03:25 +0100124
125 if (endLayer != nullptr)
126 {
127 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100128 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100129 }
130 else
131 {
132 return Fail("%s: ProcessActivation failed", __func__);
133 }
134}
135
136bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
137{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100138 ALOGV("hal_1_0::HalPolicy::ConvertAveragePool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100139 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100140}
141
142bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
143{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100144 ALOGV("hal_1_0::HalPolicy::ConvertConcatenation()");
145
arovir01b0717b52018-09-05 17:03:25 +0100146 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
147 if (operation.inputs.size() <= 1)
148 {
149 return Fail("%s: Operation has insufficient arguments", __func__);
150 }
151
152 // Get inputs and outputs
153 const std::size_t numInputTensors = operation.inputs.size() - 1;
154
155 int32_t concatDim;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100156 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100157 {
158 return Fail("%s: Operation has invalid inputs", __func__);
159 }
160
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100161 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100162 if (!outputOperand)
163 {
164 return Fail("%s: Operation has no outputs", __func__);
165 }
166
167
168 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
169 armnn::TensorShape outputShape = outputInfo.GetShape();
170
171 //
172 // handle negative concat dims along the lines of tensorflow as described here:
173 // https://www.tensorflow.org/api_docs/python/tf/concat
174 // "negative axis refers to axis + rank(values)-th dimension"
175 //
176 if (concatDim < 0)
177 {
178 concatDim += outputShape.GetNumDimensions();
179 }
180
181 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
182 {
183 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
184 }
185
186 std::vector<LayerInputHandle> inputHandles;
187 std::vector<armnn::TensorShape> inputShapes;
188
189 inputHandles.reserve(numInputTensors);
190 inputShapes.reserve(numInputTensors);
191
192 bool inputsHaveBeenReshaped = false;
193 unsigned int tensorDimensionsAdded = 0;
194
195 for (uint32_t i = 0; i < numInputTensors; ++i)
196 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100197 const Operand* const operand = GetInputOperand<hal_1_0::HalPolicy>(operation, i, model);
arovir01b0717b52018-09-05 17:03:25 +0100198 if (!operand)
199 {
200 return Fail("%s: Operation has invalid inputs", __func__);
201 }
202
203 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100204 LayerInputHandle operandInputHandle =
205 ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, i, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100206
207 if (operandShape.GetNumDimensions() == 0)
208 {
209 return Fail("%s: Operands with rank 0 are not supported", __func__);
210 }
211
212 if (RequiresReshape(operandShape))
213 {
214 inputsHaveBeenReshaped = true;
215
216 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
217
218 // Expand the tensor to three dimensions
219 if (operandShape.GetNumDimensions() == 2)
220 {
221 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
222 tensorDimensionsAdded = 1;
223 }
224 else
225 {
226 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
227 tensorDimensionsAdded = 2;
228 }
229
230 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
231 *data.m_Network,
232 operandInputHandle,
233 reshapeInfo
234 );
235
236 // Point to the reshape operation rather then the input operation
237 operandShape = reshapeInfo.GetShape();
238 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
239 }
240
241 inputShapes.emplace_back(operandShape);
242 inputHandles.emplace_back(operandInputHandle);
243
244 if (!inputHandles.back().IsValid())
245 {
246 return Fail("%s: Operation has invalid inputs", __func__);
247 }
248 }
249
250 BOOST_ASSERT(inputShapes.size() == inputHandles.size());
251
252 if (inputsHaveBeenReshaped)
253 {
254 // Adjust the concatenation dimension by the amount of dimensions added (if any)
255 concatDim += tensorDimensionsAdded;
256
257 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
258 if (tensorDimensionsAdded == 1)
259 {
260 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
261 }
262 else if (tensorDimensionsAdded == 2)
263 {
narpra01f176d5a2018-11-18 20:17:48 +0000264 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
arovir01b0717b52018-09-05 17:03:25 +0100265 }
266 }
267
narpra01f176d5a2018-11-18 20:17:48 +0000268 // Check if permutations is required and get the pair of permutations required for the concatenation.
269 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100270 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
271 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
272
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100273 bool needPermute =
274 CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
arovir01b0717b52018-09-05 17:03:25 +0100275
narpra01f176d5a2018-11-18 20:17:48 +0000276 if (needPermute)
277 {
278 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
279 }
280
arovir01b0717b52018-09-05 17:03:25 +0100281 outputInfo.SetShape(outputShape);
282
283 // this is no-op for identity swizzles, otherwise it replaces both
284 // the handles and shapes with the swizzled layer output handles and shapes
285 SwizzleInputs(*data.m_Network, inputHandles, inputShapes, permutationPair.first);
286
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100287 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
288 armnn::OriginsDescriptor concatDescriptor;
narpra01f176d5a2018-11-18 20:17:48 +0000289
arovir01b0717b52018-09-05 17:03:25 +0100290 try
291 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100292 // The concat descriptor is always created across the only supported concat dimension
narpra01f176d5a2018-11-18 20:17:48 +0000293 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100294 concatDescriptor =
Jim Flynn52aa9352019-05-20 12:52:30 +0100295 armnn::CreateDescriptorForConcatenation(inputShapes.begin(), inputShapes.end(), concatDim);
arovir01b0717b52018-09-05 17:03:25 +0100296 }
297 catch (const armnn::Exception& error)
298 {
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100299 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
arovir01b0717b52018-09-05 17:03:25 +0100300 }
301
302 // Validate the output shape is correct given the input shapes based on the
narpra01f176d5a2018-11-18 20:17:48 +0000303 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
arovir01b0717b52018-09-05 17:03:25 +0100304 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
305 {
306 return Fail("%s: Error validating the output shape for concat", __func__);
307 }
308
309 std::vector<const armnn::TensorInfo*> inputTensorInfos;
310 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
311 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100312
313 bool isSupported = false;
314 FORWARD_LAYER_SUPPORT_FUNC(__func__,
315 IsConcatSupported,
316 data.m_Backends,
317 isSupported,
318 inputTensorInfos,
319 outputInfo,
320 concatDescriptor);
321 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100322 {
323 return false;
324 }
325
Jim Flynn7b1e41f2019-05-22 18:00:04 +0100326 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
arovir01b0717b52018-09-05 17:03:25 +0100327 assert(layer != nullptr);
328 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
329
330 // Connect inputs to the layer
331 const int numInputSlots = layer->GetNumInputSlots();
332 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
333 for (int i = 0; i < numInputSlots; ++i)
334 {
335 // connect the input directly to the merge (concat) layer
336 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
337 }
338
narpra01f176d5a2018-11-18 20:17:48 +0000339 if (needPermute)
340 {
341 // Add permutation layer and connect the output to it, the permutation becomes the output layer
342 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*data.m_Network,
343 layer->GetOutputSlot(0),
344 permutationPair.second);
345 layer = &deswizzleLayer;
346 }
arovir01b0717b52018-09-05 17:03:25 +0100347
348 if (inputsHaveBeenReshaped)
349 {
350 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
351
352 // Undo the reshape knowing the amount of dimensions added
353 if (tensorDimensionsAdded == 1)
354 {
355 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
356 afterConcatInfo.GetShape()[2] }));
357 }
358 else if (tensorDimensionsAdded == 2)
359 {
narpra01f176d5a2018-11-18 20:17:48 +0000360 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2] }));
arovir01b0717b52018-09-05 17:03:25 +0100361 }
362
363 layer = &AddReshapeLayer(
364 *data.m_Network,
365 layer->GetOutputSlot(0),
366 afterConcatInfo
367 );
368 }
369
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100370 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100371}
372
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100373bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
374{
375 ALOGV("hal_1_0::HalPolicy::ConvertConv2d()");
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +0100376 return ::ConvertConv2d<hal_1_0::HalPolicy>(operation, model, data);
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100377}
378
379bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
380{
381 ALOGV("hal_1_0::HalPolicy::ConvertDepthwiseConv2d()");
Aron Virginas-Tara5e2a452019-07-29 16:13:19 +0100382 return ::ConvertDepthwiseConv2d<hal_1_0::HalPolicy>(operation, model, data);
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100383}
384
David Monahanacf479a2019-05-29 14:27:04 +0100385bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
386{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100387 ALOGV("hal_1_0::HalPolicy::ConvertDequantize()");
David Monahanacf479a2019-05-29 14:27:04 +0100388
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100389 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100390 if (!input.IsValid())
391 {
392 return Fail("%s: Operation has invalid input", __func__);
393 }
394
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100395 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
David Monahanacf479a2019-05-29 14:27:04 +0100396 if (!outputOperand)
397 {
398 return Fail("%s: Operation has invalid outputs", __func__);
399 }
400
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100401 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100402 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100403 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100404 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100405 }
406
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100407 bool isSupported = false;
408 FORWARD_LAYER_SUPPORT_FUNC(__func__,
409 IsDequantizeSupported,
410 data.m_Backends,
411 isSupported,
412 input.GetTensorInfo(),
413 GetTensorInfoForOperand(*outputOperand));
414 if (!isSupported)
David Monahanacf479a2019-05-29 14:27:04 +0100415 {
416 return false;
417 }
418
419 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
420 assert(layer != nullptr);
421 input.Connect(layer->GetInputSlot(0));
422
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100423 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
David Monahanacf479a2019-05-29 14:27:04 +0100424}
425
arovir01b0717b52018-09-05 17:03:25 +0100426bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
427{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100428 ALOGV("hal_1_0::HalPolicy::ConvertFloor()");
429
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100430 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100431 if (!input.IsValid())
432 {
433 return Fail("%s: Operation has invalid inputs", __func__);
434 }
435
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100436 const Operand* const outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100437 if (!outputOperand)
438 {
439 return Fail("%s: Operation has invalid outputs", __func__);
440 }
441
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100442 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
443 if (IsDynamicTensor(outputInfo))
444 {
445 return Fail("%s: Dynamic output tensors are not supported", __func__);
446 }
447
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100448 bool isSupported = false;
449 FORWARD_LAYER_SUPPORT_FUNC(__func__,
450 IsFloorSupported,
451 data.m_Backends,
452 isSupported,
453 input.GetTensorInfo(),
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100454 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100455 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100456 {
457 return false;
458 }
459
460 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
461 assert(layer != nullptr);
462 input.Connect(layer->GetInputSlot(0));
463
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100464 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100465}
466
467bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
468{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100469 ALOGV("hal_1_0::HalPolicy::ConvertFullyConnected()");
470
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100471 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100472 if (!input.IsValid())
473 {
474 return Fail("%s: Operation has invalid inputs", __func__);
475 }
476
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100477 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100478 if (!output)
479 {
480 return Fail("%s: Could not read output 0", __func__);
481 }
482
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100483 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
484 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100485
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100486 if (IsDynamicTensor(outputInfo))
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100487 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100488 return Fail("%s: Dynamic output tensors are not supported", __func__);
FinnWilliamsArm92ec7252019-07-16 12:15:18 +0100489 }
arovir01b0717b52018-09-05 17:03:25 +0100490
491 // ArmNN does not currently support non-fixed weights or bias
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100492 ConstTensorPin weightsPin =
493 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 1, model, data); // 2D
494 ConstTensorPin biasPin =
495 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data); // 1D
arovir01b0717b52018-09-05 17:03:25 +0100496
497 if (!weightsPin.IsValid() || !biasPin.IsValid())
498 {
499 return Fail("%s: Operation has invalid inputs", __func__);
500 }
501
502 armnn::ConstTensor weights = weightsPin.GetConstTensor();
503 armnn::ConstTensor bias = biasPin.GetConstTensor();
arovir01b0717b52018-09-05 17:03:25 +0100504 armnn::TensorInfo reshapedInfo = inputInfo;
Matthew Benthamf61c2702019-04-23 16:43:27 +0100505
506 try
arovir01b0717b52018-09-05 17:03:25 +0100507 {
Matthew Benthamf61c2702019-04-23 16:43:27 +0100508 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weights.GetInfo().GetShape()));
509 } catch (const std::exception &e) {
510 return Fail("%s: %s", __func__, e.what());
arovir01b0717b52018-09-05 17:03:25 +0100511 }
512
513 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
514 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
515
516 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100517 if (!GetInputActivationFunction<hal_1_0::HalPolicy>(operation, 3, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100518 {
519 return Fail("%s: Operation has invalid inputs", __func__);
520 }
521
522 armnn::FullyConnectedDescriptor desc;
523 desc.m_TransposeWeightMatrix = true;
524 desc.m_BiasEnabled = true;
525
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100526 bool isSupported = false;
527 FORWARD_LAYER_SUPPORT_FUNC(__func__,
528 IsFullyConnectedSupported,
529 data.m_Backends,
530 isSupported,
531 reshapedInfo,
532 outputInfo,
533 weights.GetInfo(),
534 bias.GetInfo(),
535 desc);
536 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100537 {
538 return false;
539 }
540
Matteo Martincighba01f372019-05-14 13:28:21 +0100541 armnn::IConnectableLayer* startLayer =
542 data.m_Network->AddFullyConnectedLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
arovir01b0717b52018-09-05 17:03:25 +0100543 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
544
545 if (endLayer != nullptr)
546 {
547 if (inputInfo.GetNumDimensions() > 2U)
548 {
549 armnn::ReshapeDescriptor reshapeDescriptor;
550 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
551
552 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
553 assert(reshapeLayer != nullptr);
554 input.Connect(reshapeLayer->GetInputSlot(0));
555 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
556 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
557 }
558 else
559 {
560 input.Connect(startLayer->GetInputSlot(0));
561 }
562
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100563 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100564 }
565 else
566 {
567 return Fail("%s: ProcessActivation failed", __func__);
568 }
569}
570
571bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
572 const Model& model,
573 ConversionData& data)
574{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100575 ALOGV("hal_1_0::HalPolicy::ConvertLocalResponseNormalization()");
576
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100577 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100578 if (!input.IsValid())
579 {
580 return Fail("%s: Operation has invalid inputs", __func__);
581 }
582
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100583 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100584 if (!output)
585 {
586 return Fail("%s: Could not read output 0", __func__);
587 }
588
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100589 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +0100590 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
591
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100592 if (IsDynamicTensor(outputInfo))
593 {
594 return Fail("%s: Dynamic output tensors are not supported", __func__);
595 }
arovir01b0717b52018-09-05 17:03:25 +0100596
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100597 armnn::NormalizationDescriptor descriptor;
598 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100599 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +0100600 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
arovir01b0717b52018-09-05 17:03:25 +0100601
602 if (!input.IsValid() ||
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100603 !GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
604 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 2, descriptor.m_K, model, data) ||
605 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 3, descriptor.m_Alpha, model, data) ||
606 !GetInputFloat32<hal_1_0::HalPolicy>(operation, 4, descriptor.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100607 {
608 return Fail("%s: Operation has invalid inputs", __func__);
609 }
610
611 // ArmNN expects normSize to be the full size of the normalization
612 // window rather than the radius as in AndroidNN.
613 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
614
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100615 bool isSupported = false;
616 FORWARD_LAYER_SUPPORT_FUNC(__func__,
617 IsNormalizationSupported,
618 data.m_Backends,
619 isSupported,
620 inputInfo,
621 outputInfo,
622 descriptor);
623 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100624 {
625 return false;
626 }
627
628
629 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
630 assert(layer != nullptr);
narpra012fb804a2018-10-22 14:52:32 +0100631 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +0100632
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100633 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100634}
635
636bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
637{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100638 ALOGV("hal_1_0::HalPolicy::ConvertLogistic()");
639
arovir01b0717b52018-09-05 17:03:25 +0100640 armnn::ActivationDescriptor desc;
641 desc.m_Function = armnn::ActivationFunction::Sigmoid;
642
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100643 return ConvertToActivation<hal_1_0::HalPolicy>(operation, __func__, desc, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100644}
645
646bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
647{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100648 ALOGV("hal_1_0::HalPolicy::ConvertLstm()");
649
arovir01b0717b52018-09-05 17:03:25 +0100650 // Inputs:
651 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
652 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100653 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100654 if (!input.IsValid())
655 {
656 return Fail("%s: Could not read input 0: input", __func__);
657 }
658 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100659 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 18, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100660 if (!outputStateIn.IsValid())
661 {
662 return Fail("%s: Could not read input 18: outputStateIn", __func__);
663 }
664 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100665 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 19, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100666 if (!cellStateIn.IsValid())
667 {
668 return Fail("%s: Could not read input 19: cellStateIn", __func__);
669 }
670
671 // Get the mandatory input tensors:
672 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
673 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100674 const ConstTensorPin inputToForgetWeightsPin =
675 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 2, model, data);
676 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
677 // [num_units, input_size].
678 const ConstTensorPin inputToCellWeightsPin =
679 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 3, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100680 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
681 // [num_units, input_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100682 const ConstTensorPin inputToOutputWeightsPin =
683 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 4, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100684 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
685 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100686 const ConstTensorPin recurrentToForgetWeightsPin =
687 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 6, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100688 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
689 // [num_units, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100690 const ConstTensorPin recurrentToCellWeightsPin =
691 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 7, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100692 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
693 // [num_units, output_size].
694 const ConstTensorPin recurrentToOutputWeightsPin =
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100695 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 8, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100696 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100697 const ConstTensorPin forgetGateBiasPin =
698 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 13, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100699 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100700 const ConstTensorPin cellBiasPin =
701 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 14, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100702 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100703 const ConstTensorPin outputGateBiasPin =
704 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation, 15, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100705
706 if (!inputToForgetWeightsPin.IsValid() ||
707 !inputToCellWeightsPin.IsValid() ||
708 !inputToOutputWeightsPin.IsValid() ||
709 !recurrentToForgetWeightsPin.IsValid() ||
710 !recurrentToCellWeightsPin.IsValid() ||
711 !recurrentToOutputWeightsPin.IsValid() ||
712 !forgetGateBiasPin.IsValid() ||
713 !cellBiasPin.IsValid() ||
714 !outputGateBiasPin.IsValid())
715 {
716 return Fail("%s: Operation has invalid tensor inputs", __func__);
717 }
718
719 // Get the optional input tensors:
720 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
721 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100722 const ConstTensorPin inputToInputWeightsPin =
723 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
724 1,
725 model,
726 data,
727 g_DontPermute,
728 nullptr,
729 true);
730
arovir01b0717b52018-09-05 17:03:25 +0100731 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
732 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
733 // “num_units”), or the second dimension of the “projection_weights”, if defined.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100734 const ConstTensorPin recurrentToInputWeightsPin =
735 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
736 5,
737 model,
738 data,
739 g_DontPermute,
740 nullptr,
741 true);
742
arovir01b0717b52018-09-05 17:03:25 +0100743 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100744 const ConstTensorPin cellToInputWeightsPin =
745 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
746 9,
747 model,
748 data,
749 g_DontPermute,
750 nullptr,
751 true);
752
arovir01b0717b52018-09-05 17:03:25 +0100753 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100754 const ConstTensorPin cellToForgetWeightsPin =
755 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
756 10,
757 model,
758 data,
759 g_DontPermute,
760 nullptr,
761 true);
762
arovir01b0717b52018-09-05 17:03:25 +0100763 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100764 const ConstTensorPin cellToOutputWeightsPin =
765 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
766 11,
767 model,
768 data,
769 g_DontPermute,
770 nullptr,
771 true);
772
arovir01b0717b52018-09-05 17:03:25 +0100773 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100774 const ConstTensorPin inputGateBiasPin =
775 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
776 12,
777 model,
778 data,
779 g_DontPermute,
780 nullptr,
781 true);
782
arovir01b0717b52018-09-05 17:03:25 +0100783 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
784 // [output_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100785 const ConstTensorPin projectionWeightsPin =
786 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
787 16,
788 model,
789 data,
790 g_DontPermute,
791 nullptr,
792 true);
793
arovir01b0717b52018-09-05 17:03:25 +0100794 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100795 const ConstTensorPin projectionBiasPin =
796 ConvertOperationInputToConstTensorPin<hal_1_0::HalPolicy>(operation,
797 17,
798 model,
799 data,
800 g_DontPermute,
801 nullptr,
802 true);
arovir01b0717b52018-09-05 17:03:25 +0100803
804 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
805 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
806 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
807 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
808 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
809 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
810 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
811 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
812 {
813 return Fail("%s: Operation has invalid tensor inputs", __func__);
814 }
815
816 // Get the mandatory input scalars (actually 1-D tensors of size 1):
817 // 20: The activation function: A value indicating the activation function:
818 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
819 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
820 // If set to 0.0 then clipping is disabled.
821 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
822 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
823 ActivationFn activation;
824 float cellClip;
825 float projClip;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100826 if (!GetInputActivationFunctionFromTensor<hal_1_0::HalPolicy>(operation, 20, activation, model, data) ||
827 !GetInputScalar<hal_1_0::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
828 !GetInputScalar<hal_1_0::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100829 {
830 return Fail("%s: Operation has invalid scalar inputs", __func__);
831 }
832
833 // Outputs:
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100834 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
835 // with CIFG, or [batch_size, num_units * 3] without CIFG.
836 const Operand* scratchBuffer = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100837 if (!scratchBuffer)
838 {
839 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
840 }
841 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100842 const Operand* outputStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 1, model);
arovir01b0717b52018-09-05 17:03:25 +0100843 if (!outputStateOut)
844 {
845 return Fail("%s: Could not read output 1: outputStateOut", __func__);
846 }
847 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100848 const Operand* cellStateOut = GetOutputOperand<hal_1_0::HalPolicy>(operation, 2, model);
arovir01b0717b52018-09-05 17:03:25 +0100849 if (!cellStateOut)
850 {
851 return Fail("%s: Could not read output 2: cellStateOut", __func__);
852 }
853 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
854 // effectively the same as the current “output state (out)” value.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100855 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 3, model);
arovir01b0717b52018-09-05 17:03:25 +0100856 if (!output)
857 {
858 return Fail("%s: Could not read output 3: output", __func__);
859 }
860
861 // set the params structure for the AddLstmLayer call
862 armnn::LstmInputParams params;
863 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
864 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
865 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
866 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
867 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
868 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
869 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
870 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
871 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
872 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
873 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
874 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
875 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
876 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
877 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
878 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
879 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
880
881 // set the layer descriptor
882 armnn::LstmDescriptor desc;
883 desc.m_ActivationFunc = activation;
884 desc.m_ClippingThresCell = cellClip;
885 desc.m_ClippingThresProj = projClip;
886 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
887 params.m_RecurrentToInputWeights == nullptr ||
888 params.m_InputGateBias == nullptr);
889 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
890 params.m_CellToOutputWeights != nullptr);
891 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
892
893 // validate the optional input groups
894 if (desc.m_CifgEnabled &&
895 (params.m_InputToInputWeights != nullptr ||
896 params.m_RecurrentToInputWeights != nullptr ||
897 params.m_InputGateBias != nullptr))
898 {
899 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
900 " and input gate bias must be provided", __func__);
901 }
902
903 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
904 {
905 return Fail("%s: projection bias should not be provided without projection weights", __func__);
906 }
907
908 if (desc.m_PeepholeEnabled &&
909 (params.m_CellToForgetWeights == nullptr ||
910 params.m_CellToOutputWeights == nullptr ||
911 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
912 {
913 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
914 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
915 }
916
917 // Check if the layer is supported
918 // Inputs
919 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
920 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
921 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
922
923 // Outputs
924 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
925 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
926 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
927 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
928
929 // Basic parameters
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100930 armnn::LstmInputParamsInfo paramsInfo;
931 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
932 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
933 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
934 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
935 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
936 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
937 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
938 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
939 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100940
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100941 // Optional parameters
arovir01b0717b52018-09-05 17:03:25 +0100942 if(!desc.m_CifgEnabled)
943 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100944 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
945 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100946 if (params.m_CellToInputWeights != nullptr)
947 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100948 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100949 }
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100950 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100951 }
952
953 if(desc.m_ProjectionEnabled)
954 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100955 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100956 if (params.m_ProjectionBias != nullptr)
957 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100958 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100959 }
960 }
961
962 if(desc.m_PeepholeEnabled)
963 {
Ferran Balaguer177fa0b2019-07-02 17:34:46 +0100964 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
965 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
arovir01b0717b52018-09-05 17:03:25 +0100966 }
967
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100968 bool isSupported = false;
969 FORWARD_LAYER_SUPPORT_FUNC(__func__,
970 IsLstmSupported,
971 data.m_Backends,
972 isSupported,
973 inputInfo,
974 outputStateInInfo,
975 cellStateInInfo,
976 scratchBufferInfo,
977 outputStateOutInfo,
978 cellStateOutInfo,
979 outputInfo,
980 desc,
981 paramsInfo);
982 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100983 {
984 return false;
985 }
986
987 // Add the layer
988 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
989
990 input.Connect(layer->GetInputSlot(0));
991 outputStateIn.Connect(layer->GetInputSlot(1));
992 cellStateIn.Connect(layer->GetInputSlot(2));
993
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100994 return (SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, 0, model, data) &&
995 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 1, *layer, 1, model, data) &&
996 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 2, *layer, 2, model, data) &&
997 SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 3, *layer, 3, model, data));
arovir01b0717b52018-09-05 17:03:25 +0100998}
999
1000bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
1001{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001002 ALOGV("hal_1_0::HalPolicy::ConvertL2Normalization()");
1003
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001004 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001005 if (!input.IsValid())
1006 {
1007 return Fail("%s: Operation has invalid inputs", __func__);
1008 }
1009
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001010 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001011 if (!output)
1012 {
1013 return Fail("%s: Could not read output 0", __func__);
1014 }
1015
1016 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001017 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
arovir01b0717b52018-09-05 17:03:25 +01001018
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001019 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001020 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001021 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001022 }
1023
Matteo Martincigh58f71092018-09-25 15:58:52 +01001024 armnn::L2NormalizationDescriptor desc;
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001025 desc.m_DataLayout = armnn::DataLayout::NHWC;
Matteo Martincigh58f71092018-09-25 15:58:52 +01001026
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001027 bool isSupported = false;
1028 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1029 IsL2NormalizationSupported,
1030 data.m_Backends,
1031 isSupported,
1032 inputInfo,
1033 outputInfo,
1034 desc);
1035 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001036 {
1037 return false;
1038 }
1039
Matteo Martincigh58f71092018-09-25 15:58:52 +01001040 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
arovir01b0717b52018-09-05 17:03:25 +01001041 assert(layer != nullptr);
Matteo Martincigh5e0ed9f2018-10-01 09:26:32 +01001042 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001043
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001044 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001045}
1046
1047bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
1048{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001049 ALOGV("hal_1_0::HalPolicy::ConvertL2Pool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001050 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001051}
1052
1053bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1054{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001055 ALOGV("hal_1_0::HalPolicy::ConvertMaxPool2d()");
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001056 return ConvertPooling2d<hal_1_0::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001057}
1058
1059bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1060{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001061 ALOGV("hal_1_0::HalPolicy::ConvertMul()");
1062
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001063 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
1064 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001065
1066 if (!input0.IsValid() || !input1.IsValid())
1067 {
1068 return Fail("%s: Operation has invalid inputs", __func__);
1069 }
1070
1071 // The FuseActivation parameter is always the input index 2
1072 // and it should be optional
1073 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001074 if (!GetOptionalInputActivation<hal_1_0::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001075 {
1076 return Fail("%s: Operation has invalid inputs", __func__);
1077 }
1078
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001079 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001080
1081 if (outputOperand == nullptr)
1082 {
1083 return false;
1084 }
1085
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001086 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1087 if (IsDynamicTensor(outputInfo))
1088 {
1089 return Fail("%s: Dynamic output tensors are not supported", __func__);
1090 }
arovir01b0717b52018-09-05 17:03:25 +01001091
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001092 bool isSupported = false;
1093 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1094 IsMultiplicationSupported,
1095 data.m_Backends,
1096 isSupported,
1097 input0.GetTensorInfo(),
1098 input1.GetTensorInfo(),
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001099 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001100 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001101 {
1102 return false;
1103 }
1104
1105 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001106 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
arovir01b0717b52018-09-05 17:03:25 +01001107
1108 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1109 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1110
1111 if (endLayer != nullptr)
1112 {
1113 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001114 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001115 }
1116 else
1117 {
1118 return Fail("%s: ProcessActivation failed", __func__);
1119 }
1120}
1121
1122bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1123{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001124 ALOGV("hal_1_0::HalPolicy::ConvertReLu()");
Sadik Armagan61113162019-07-25 09:09:40 +01001125 return ::ConvertReLu<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001126}
1127
1128bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1129{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001130 ALOGV("hal_1_0::HalPolicy::ConvertReLu1()");
Sadik Armagan61113162019-07-25 09:09:40 +01001131 return ::ConvertReLu1<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001132}
1133
1134bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1135{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001136 ALOGV("hal_1_0::HalPolicy::ConvertReLu6()");
Sadik Armagan61113162019-07-25 09:09:40 +01001137 return ::ConvertReLu6<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001138}
1139
1140bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1141{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001142 ALOGV("hal_1_0::HalPolicy::ConvertSoftmax()");
1143
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001144 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001145 if (!input.IsValid())
1146 {
1147 return Fail("%s: Operation has invalid inputs", __func__);
1148 }
1149
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001150 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001151 if (!outputOperand)
1152 {
1153 return Fail("%s: Operation has no outputs", __func__);
1154 }
1155
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001156 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001157 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001158 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001159 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001160 }
arovir01b0717b52018-09-05 17:03:25 +01001161
1162 armnn::SoftmaxDescriptor desc;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001163 if (!GetInputFloat32<hal_1_0::HalPolicy>(operation, 1, desc.m_Beta, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001164 {
1165 return Fail("%s: Operation has invalid inputs", __func__);
1166 }
1167
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001168 bool isSupported = false;
1169 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1170 IsSoftmaxSupported,
1171 data.m_Backends,
1172 isSupported,
1173 input.GetTensorInfo(),
1174 outputInfo,
1175 desc);
1176 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001177 {
1178 return false;
1179 }
1180
1181 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1182 assert(layer != nullptr);
1183 input.Connect(layer->GetInputSlot(0));
1184
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001185 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001186}
1187
Keith Davisa6bc52f2019-06-26 09:39:49 +01001188bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1189{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001190 ALOGV("hal_1_0::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +01001191
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001192 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001193 if (!input.IsValid() )
1194 {
1195 return Fail("%s: Operation has invalid inputs", __func__);
1196 }
1197
1198 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1199 unsigned int rank = inputInfo.GetNumDimensions();
1200
1201 if (rank != 4)
1202 {
1203 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1204 }
1205
1206 armnn::SpaceToDepthDescriptor desc;
1207 bool dataLayoutCheck;
1208
1209 GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1210
1211 if (desc.m_BlockSize <= 1)
1212 {
1213 return Fail("%s: Block size must be at least 1 in all dimensions");
1214 }
1215
1216 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1217 if (!output)
1218 {
1219 return Fail("%s: Could not read output 0", __func__);
1220 }
1221
1222 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001223 if (IsDynamicTensor(outputInfo))
1224 {
1225 return Fail("%s: Dynamic output tensors are not supported", __func__);
1226 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001227
1228 bool isSupported = false;
1229 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1230 IsSpaceToDepthSupported,
1231 data.m_Backends,
1232 isSupported,
1233 inputInfo,
1234 outputInfo,
1235 desc);
1236 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001237 {
1238 return false;
1239 }
1240
1241 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1242 assert(layer != nullptr);
1243 input.Connect(layer->GetInputSlot(0));
1244
1245 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
1246}
1247
arovir01b0717b52018-09-05 17:03:25 +01001248bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1249{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001250 ALOGV("hal_1_0::HalPolicy::ConvertTanH()");
Sadik Armagan61113162019-07-25 09:09:40 +01001251 return ::ConvertTanH<hal_1_0::HalPolicy>(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001252}
1253
1254bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1255{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001256 ALOGV("hal_1_0::HalPolicy::ConvertReshape()");
1257
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001258 const Operand* inputOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 0, model);
1259 const Operand* requestedShapeOperand = GetInputOperand<hal_1_0::HalPolicy>(operation, 1, model);
1260 const Operand* outputOperand = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001261
1262 if (inputOperand == nullptr
1263 || requestedShapeOperand == nullptr
1264 || outputOperand == nullptr)
1265 {
1266 return Fail("%s: Operation has invalid inputs", __func__);
1267 }
1268
1269
1270 if (requestedShapeOperand->dimensions.size() != 1)
1271 {
1272 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1273 __func__, requestedShapeOperand->dimensions.size());
1274 }
1275
1276 std::vector<int32_t> targetDimensions;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001277 if (!GetTensorInt32Values<hal_1_0::HalPolicy>(*requestedShapeOperand, targetDimensions, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001278 {
1279 return Fail("%s: Could not read values of input 1", __func__);
1280 }
1281
1282 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1283
1284 Shape requestedShape;
1285 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1286 // function that resolves these values into a fully specified tensor shape.
1287 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1288 {
1289 return Fail("%s: Failed to resolve the requested shape", __func__);
1290 }
1291
1292 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1293 if (!SameShape(requestedShape, outputOperandShape))
1294 {
1295 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1296 }
1297
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001298 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001299 if (!input.IsValid())
1300 {
1301 return Fail("%s: Could not read input 0", __func__);
1302 }
1303
arovir01b0717b52018-09-05 17:03:25 +01001304 armnn::ReshapeDescriptor reshapeDescriptor;
1305 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1306 requestedShape.dimensions.data());
1307
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001308 bool isSupported = false;
1309 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1310 IsReshapeSupported,
1311 data.m_Backends,
1312 isSupported,
1313 input.GetTensorInfo(),
1314 reshapeDescriptor);
1315 if (!isSupported)
Matteo Martincigh265d1ad2019-01-08 18:14:53 +00001316 {
1317 return false;
1318 }
1319
arovir01b0717b52018-09-05 17:03:25 +01001320 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
1321 assert(layer != nullptr);
1322 input.Connect(layer->GetInputSlot(0));
1323
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001324 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001325}
1326
1327bool HalPolicy::ConvertResizeBilinear(const Operation& operation, const Model& model, ConversionData& data)
1328{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001329 ALOGV("hal_1_0::HalPolicy::ConvertResizeBilinear()");
1330
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001331 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_0::HalPolicy>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001332 if (!input.IsValid())
1333 {
1334 return Fail("%s: Could not read input 0", __func__);
1335 }
1336
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001337 const Operand* output = GetOutputOperand<hal_1_0::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001338 if (!output)
1339 {
1340 return Fail("%s: Could not read output 0", __func__);
1341 }
1342
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001343 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
arovir01b0717b52018-09-05 17:03:25 +01001344 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1345
Aron Virginas-Tara3609cc2019-07-29 10:50:25 +01001346 if (IsDynamicTensor(outputInfo))
1347 {
1348 return Fail("%s: Dynamic output tensors are not supported", __func__);
1349 }
1350
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001351 armnn::ResizeDescriptor desc;
1352 desc.m_Method = armnn::ResizeMethod::Bilinear;
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001353 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001354
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001355 bool isSupported = false;
1356 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1357 IsResizeSupported,
1358 data.m_Backends,
1359 isSupported,
1360 inputInfo,
1361 outputInfo,
1362 desc);
1363 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +01001364 {
1365 return false;
1366 }
1367
Aron Virginas-Tar535607d2019-07-03 15:46:15 +01001368 if (!GetInputScalar<hal_1_0::HalPolicy>(operation, 1, OperandType::INT32, desc.m_TargetWidth, model, data) ||
1369 !GetInputScalar<hal_1_0::HalPolicy>(operation, 2, OperandType::INT32, desc.m_TargetHeight, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001370 {
1371 return Fail("%s: Operation has invalid inputs", __func__);
1372 }
1373
Aron Virginas-Tara5daf862019-07-01 19:07:20 +01001374 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(desc);
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001375
arovir01b0717b52018-09-05 17:03:25 +01001376 assert(layer != nullptr);
arovir01b0717b52018-09-05 17:03:25 +01001377
Mohamed Nour Abouelseoud81afa302018-10-29 14:32:55 +00001378 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1379 input.Connect(layer->GetInputSlot(0));
arovir01b0717b52018-09-05 17:03:25 +01001380
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +01001381 return SetupAndTrackLayerOutputSlot<hal_1_0::HalPolicy>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001382
1383}
1384
1385} // namespace hal_1_0
Matteo Martincigh58f71092018-09-25 15:58:52 +01001386} // namespace armnn_driver