blob: 8502640ce8f7cd1553d3c72b154b9472751c8497 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
Mike Kellyb5fdf382019-06-11 16:35:25 +010028 case V1_0::OperationType::CONCATENATION:
29 case V1_0::OperationType::DEPTH_TO_SPACE:
30 case V1_0::OperationType::DEQUANTIZE:
31 case V1_0::OperationType::EMBEDDING_LOOKUP:
32 case V1_0::OperationType::FLOOR:
33 case V1_0::OperationType::FULLY_CONNECTED:
34 case V1_0::OperationType::HASHTABLE_LOOKUP:
35 case V1_0::OperationType::L2_NORMALIZATION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010036 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
37 case V1_0::OperationType::LOGISTIC:
38 case V1_0::OperationType::LSH_PROJECTION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010039 case V1_0::OperationType::MUL:
Mike Kellyb5fdf382019-06-11 16:35:25 +010040 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010041 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010042 case V1_0::OperationType::SVDF:
Mike Kellyb5fdf382019-06-11 16:35:25 +010043 case V1_0::OperationType::OEM_OPERATION:
44 return true;
45 default:
46 return false;
47 }
48}
49
50bool HandledByV1_1(V1_2::OperationType operationType)
51{
52 if (HandledByV1_0(operationType))
53 {
54 return true;
55 }
56 switch (static_cast<V1_1::OperationType>(operationType))
57 {
58 case V1_1::OperationType::BATCH_TO_SPACE_ND:
59 case V1_1::OperationType::DIV:
60 case V1_1::OperationType::MEAN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010061 case V1_1::OperationType::SPACE_TO_BATCH_ND:
62 case V1_1::OperationType::SQUEEZE:
63 case V1_1::OperationType::STRIDED_SLICE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010064 case V1_1::OperationType::TRANSPOSE:
65 return true;
66 default:
67 return false;
68 }
69}
70
71bool HandledByV1_0(const V1_2::Operation& operation)
72{
73 return HandledByV1_0(operation.type);
74}
75
76bool HandledByV1_1(const V1_2::Operation& operation)
77{
78 return HandledByV1_1(operation.type);
79}
80
81V1_0::OperationType CastToV1_0(V1_2::OperationType type)
82{
83 return static_cast<V1_0::OperationType>(type);
84}
85
86V1_1::OperationType CastToV1_1(V1_2::OperationType type)
87{
88 return static_cast<V1_1::OperationType>(type);
89}
90
91V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
92{
93 V1_0::Operation op;
94 op.type = CastToV1_0(operation.type);
95 op.inputs = operation.inputs;
96 op.outputs = operation.outputs;
97 return op;
98}
99
100V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
101{
102 V1_1::Operation op;
103 op.type = CastToV1_1(operation.type);
104 op.inputs = operation.inputs;
105 op.outputs = operation.outputs;
106 return op;
107}
108
109bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
110{
111 if (HandledByV1_0(operation) && compliantWithV1_0(model))
112 {
113 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
114 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
115
116 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
117 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100118
119 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100120 {
121 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
122 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
123
124 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
125 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100126
Mike Kellyb5fdf382019-06-11 16:35:25 +0100127 switch (operation.type)
128 {
Sadik Armagan15d63e22019-07-26 16:59:35 +0100129 case V1_2::OperationType::AVERAGE_POOL_2D:
130 return ConvertAveragePool2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100131 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100132 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100133 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100134 return ConvertDepthwiseConv2d(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +0100135 case V1_2::OperationType::L2_POOL_2D:
136 return ConvertL2Pool2d(operation, model, data);
137 case V1_2::OperationType::MAX_POOL_2D:
138 return ConvertMaxPool2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100139 case V1_2::OperationType::MAXIMUM:
140 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100141 case V1_2::OperationType::MINIMUM:
142 return ConvertMinimum(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +0100143 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100144 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100145 case V1_2::OperationType::PAD_V2:
146 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100147 case V1_2::OperationType::PRELU:
148 return ConvertPrelu(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100149 case V1_2::OperationType::RELU:
150 return ConvertReLu(operation, model, data);
151 case V1_2::OperationType::RELU1:
152 return ConvertReLu1(operation, model, data);
153 case V1_2::OperationType::RELU6:
154 return ConvertReLu6(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100155 case V1_2::OperationType::RESIZE_BILINEAR:
156 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100157 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100158 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100159 case V1_2::OperationType::SOFTMAX:
160 return ConvertSoftmax(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100161 case V1_2::OperationType::SPACE_TO_DEPTH:
162 return ConvertSpaceToDepth(operation, model, data);
Mike Kelly0a879362019-07-29 16:56:31 +0100163 case V1_2::OperationType::SUB:
164 return ConvertSub(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100165 case V1_2::OperationType::TANH:
166 return ConvertTanH(operation, model, data);
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100167 case V1_2::OperationType::LSTM:
168 return ConvertLstm(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100169 default:
170 return Fail("%s: Operation type %s not supported in ArmnnDriver",
171 __func__, toString(operation.type).c_str());
172 }
173}
174
Sadik Armagan15d63e22019-07-26 16:59:35 +0100175bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
176{
177 ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
178 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
179}
180
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100181bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
182{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100183 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
184
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100185 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
186 if (!input.IsValid())
187 {
188 return Fail("%s: Operation has invalid inputs", __func__);
189 }
190
191 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
192 if (!output)
193 {
194 return Fail("%s: Could not read output 0", __func__);
195 }
196
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100197 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
198 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
199
200 if (IsDynamicTensor(outputInfo))
201 {
202 return Fail("%s: Dynamic output tensors are not supported", __func__);
203 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100204
Mike Kellye1d60bb2019-07-11 11:44:52 +0100205 armnn::Convolution2dDescriptor desc;
206 desc.m_DataLayout = armnn::DataLayout::NHWC;
207
208 // Determine whether padding is implicit or explicit
209 bool implicitPadding = operation.inputs.size() == 7 ||
210 (operation.inputs.size() >= 8 &&
211 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
212
213 if (implicitPadding)
214 {
215 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
216 }
217 else if (operation.inputs.size() >= 10)
218 {
219 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
220 }
221
222 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
223
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100224 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100225 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
226 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
227 // the DataLayout is NCHW
228 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
229 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
230 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100231 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100232 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100233
234 if (!weightsPin.IsValid())
235 {
236 return Fail("%s: Operation has invalid weights", __func__);
237 }
238
239 if (!biasPin.IsValid())
240 {
241 return Fail("%s: Operation has invalid biases", __func__);
242 }
243
244 armnn::ConstTensor weights = weightsPin.GetConstTensor();
245 armnn::ConstTensor bias = biasPin.GetConstTensor();
246 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
247
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100248 ActivationFn activation;
249
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100250 if (implicitPadding)
251 {
252 android::nn::PaddingScheme paddingScheme;
253 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
254 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
255 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
256 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
257 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
258 {
259 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
260 }
261
Mike Kellye1d60bb2019-07-11 11:44:52 +0100262 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
263 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
264 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
265 const uint32_t kernelX = weights.GetShape()[widthIndex];
266 const uint32_t kernelY = weights.GetShape()[heightIndex];
267 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
268 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100269
Mike Kelly86b36d42019-07-12 16:39:33 +0100270 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
271 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100272
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100273 }
274 else if (operation.inputs.size() >= 10)
275 {
276 // explicit padding
277 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
278 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
279 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
280 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
281 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
282 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
283 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
284 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
285 {
286 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
287 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100288 }
289 else
290 {
291 return Fail("%s: Unsupported number of operation inputs", __func__);
292 }
293
294 desc.m_BiasEnabled = true;
295 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
296
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100297 bool isSupported = false;
298 FORWARD_LAYER_SUPPORT_FUNC(__func__,
299 IsConvolution2dSupported,
300 data.m_Backends,
301 isSupported,
302 inputInfo,
303 outputInfo,
304 desc,
305 weights.GetInfo(),
306 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100307
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100308 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100309 {
310 return false;
311 }
312
313 armnn::IConnectableLayer* startLayer =
314 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
315
316 if (!startLayer)
317 {
318 return Fail("%s: AddConvolution2dLayer failed", __func__);
319 }
320
321 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
322
323 if (!endLayer)
324 {
325 return Fail("%s: ProcessActivation failed", __func__);
326 }
327
328 input.Connect(startLayer->GetInputSlot(0));
329
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100330 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100331}
332
333bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
334{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100335 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
336
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100337 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
338
339 if (!input.IsValid())
340 {
341 return Fail("%s: Operation has invalid inputs", __func__);
342 }
343
344 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
345
346 if (!output)
347 {
348 return Fail("%s: Could not read output 0", __func__);
349 }
350
351 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100352 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
353
354 if (IsDynamicTensor(outputInfo))
355 {
356 return Fail("%s: Dynamic output tensors are not supported", __func__);
357 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100358
359 // ArmNN does not currently support non-fixed weights or bias
360 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
361 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
362
363 if (weightsOperand == nullptr)
364 {
365 return Fail("%s: Operand is invalid", __func__);
366 }
367 armnn::DepthwiseConvolution2dDescriptor desc;
368 desc.m_DataLayout = armnn::DataLayout::NHWC;
369
370 // Determine whether padding is implicit or explicit
371 bool implicitPadding = operation.inputs.size() == 8 ||
372 (operation.inputs.size() >= 9 &&
373 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
374
375 // Look ahead to find the optional DataLayout, if present
376 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
377 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
378
379 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
380 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
381 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
382 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
383
384 // Reinterpret weight data as [ H, W, I, M ]
385 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
386 weightsOperand->dimensions[2],
387 inputInfo.GetShape()[channelsIndex],
388 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
389
390 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
391 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
392
393 const ConstTensorPin weightsPin =
394 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
395 1,
396 model,
397 data,
398 HWIMToMIHW,
399 &weightsShape);
400
401 // Bias is a 1D tensor
402 const ConstTensorPin biasPin =
403 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
404
405 if (!weightsPin.IsValid())
406 {
407 return Fail("%s: Operation has invalid weights", __func__);
408 }
409
410 if (!biasPin.IsValid())
411 {
412 return Fail("%s: Operation has invalid biases", __func__);
413 }
414
415 armnn::ConstTensor weights = weightsPin.GetConstTensor();
416 armnn::ConstTensor bias = biasPin.GetConstTensor();
417 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
418
419 ActivationFn activation;
420
421 if (implicitPadding)
422 {
423 android::nn::PaddingScheme paddingScheme;
424 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
425 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
426 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
427 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
428 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
429 {
430 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
431 }
432
433 const uint32_t kernelX = weights.GetShape()[3];
434 const uint32_t kernelY = weights.GetShape()[2];
435 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
436 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
437
Mike Kelly86b36d42019-07-12 16:39:33 +0100438 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
439 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100440 }
441 else if (operation.inputs.size() >= 11)
442 {
443 // explicit padding
444 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
445 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
446 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
447 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
448 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
449 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
450 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
451 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
452 {
453 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
454 }
455 }
456 else
457 {
458 return Fail("%s: Unsupported number of operation inputs", __func__);
459 }
460
461 desc.m_BiasEnabled = true;
462 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
463
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100464 bool isSupported = false;
465 FORWARD_LAYER_SUPPORT_FUNC(__func__,
466 IsDepthwiseConvolutionSupported,
467 data.m_Backends,
468 isSupported,
469 inputInfo,
470 outputInfo,
471 desc,
472 weights.GetInfo(),
473 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100474
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100475 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100476 {
477 return false;
478 }
479
480 armnn::IConnectableLayer* startLayer =
481 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100482
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100483 if (!startLayer)
484 {
485 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
486 }
487
488 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
489 if (!endLayer)
490 {
491 return Fail("%s: ProcessActivation failed", __func__);
492 }
493
494 input.Connect(startLayer->GetInputSlot(0));
495
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100496 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100497}
498
Sadik Armagan15d63e22019-07-26 16:59:35 +0100499bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
500{
501 ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
502 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
503}
504
505bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
506{
507 ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
508 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
509}
510
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100511bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
512{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100513 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
514
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100515 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
516 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
517
518 if (!input0.IsValid() || !input1.IsValid())
519 {
520 return Fail("%s: Operation has invalid inputs", __func__);
521 }
522
523 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
524 if (!outputOperand)
525 {
526 return Fail("%s: Could not read output", __func__);
527 }
528
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100529 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100530 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100531 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100532 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100533 }
534
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100535 bool isSupported = false;
536 FORWARD_LAYER_SUPPORT_FUNC(__func__,
537 IsMaximumSupported,
538 data.m_Backends,
539 isSupported,
540 input0.GetTensorInfo(),
541 input1.GetTensorInfo(),
542 outInfo);
543
544 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100545 {
546 return false;
547 }
548
549 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
550 assert(layer != nullptr);
551 BroadcastTensor(input0, input1, layer, *data.m_Network);
552
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100553 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100554}
555
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100556bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
557{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100558 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
559
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100560 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
561 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
562
563 if (!input0.IsValid() || !input1.IsValid())
564 {
565 return Fail("%s: Operation has invalid inputs", __func__);
566 }
567
568 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
569 if (!output)
570 {
571 return Fail("%s: Could not read output 0", __func__);
572 }
573
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100574 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100575 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100576 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100577 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100578 }
579
580 bool isSupported = false;
581 FORWARD_LAYER_SUPPORT_FUNC(__func__,
582 IsMinimumSupported,
583 data.m_Backends,
584 isSupported,
585 input0.GetTensorInfo(),
586 input1.GetTensorInfo(),
587 outputInfo);
588
589 if (!isSupported)
590 {
591 return false;
592 }
593
594 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
595 assert(layer != nullptr);
596 BroadcastTensor(input0, input1, layer, *data.m_Network);
597
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100598 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100599}
600
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100601bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
602{
603 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
604 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
605}
606
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100607bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
608{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100609 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
610
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100611 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
612 if (!input.IsValid())
613 {
614 return Fail("%s: Could not read input 0", __func__);
615 }
616
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100617 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
618 if (!output)
619 {
620 return Fail("%s: Could not read output", __func__);
621 }
622
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100623 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
624 unsigned int rank = inputInfo.GetNumDimensions();
625
626 armnn::PadDescriptor descriptor;
627 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
628 {
629 return Fail("%s: Could not convert paddings", __func__);
630 }
631
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100632 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100633 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100634 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100635 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100636 }
637
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100638 // Determine type of padding value
639 OperandType operandType0;
640 OperandType operandType2;
641
642 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
643 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
644 {
645 return Fail("%s: Operation has invalid inputs", __func__);
646 }
647
648 // Read value to use for padding
649 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
650 {
651 armnn::Half f16PadValue;
652 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
653 {
654 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
655 }
656
657 descriptor.m_PadValue = f16PadValue;
658 }
659 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
660 {
661 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
662 {
663 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
664 }
665 }
666 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
667 {
Mike Kelly3c673942019-07-25 09:26:06 +0100668 int32_t intPadValue = 0;
669 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100670 {
671 return Fail("%s: Could not read input 2 (INT32)", __func__);
672 }
Mike Kelly3c673942019-07-25 09:26:06 +0100673 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100674 }
675 else
676 {
677 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
678 }
679
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100680 bool isSupported = false;
681 FORWARD_LAYER_SUPPORT_FUNC(__func__,
682 IsPadSupported,
683 data.m_Backends,
684 isSupported,
685 inputInfo,
686 outputInfo,
687 descriptor);
688 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100689 {
690 return false;
691 }
692
693 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
694 assert(layer != nullptr);
695 input.Connect(layer->GetInputSlot(0));
696 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
697
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100698 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100699}
700
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100701bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
702{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100703 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
704
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100705 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
706 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
707
708 if (!input.IsValid() || !alpha.IsValid())
709 {
710 return Fail("%s: Operation has invalid inputs", __func__);
711 }
712
713 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
714
715 if (!output)
716 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100717 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100718 }
719
720 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
721 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100722 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100723
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100724 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100725 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100726 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100727 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100728
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100729 bool isSupported = false;
730 FORWARD_LAYER_SUPPORT_FUNC(__func__,
731 IsPreluSupported,
732 data.m_Backends,
733 isSupported,
734 inputInfo,
735 alphaInfo,
736 outputInfo);
737 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100738 {
739 return false;
740 }
741
742 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
743
744 if (!layer)
745 {
746 return Fail("%s: AddPreluLayer failed", __func__);
747 }
748
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100749 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100750
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100751 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100752}
753
Sadik Armagan61113162019-07-25 09:09:40 +0100754bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
755{
756 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
757 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
758}
759
760bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
761{
762 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
763 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
764}
765
766bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
767{
768 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
769 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
770}
771
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100772bool HalPolicy::ConvertResize(const Operation& operation,
773 const Model& model,
774 ConversionData& data,
775 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100776{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100777 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
778
779 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100780 if (!input.IsValid())
781 {
782 return Fail("%s: Could not read input 0", __func__);
783 }
784
785 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
786 if (!output)
787 {
788 return Fail("%s: Could not read output 0", __func__);
789 }
790
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100791 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
792 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
793
794 if (IsDynamicTensor(outputInfo))
795 {
796 return Fail("%s: Dynamic output tensors are not supported", __func__);
797 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100798
799 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100800 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100801 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
802
803 OperandType operandType1;
804 OperandType operandType2;
805
806 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
807 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
808 {
809 return Fail("%s: Operation has invalid inputs", __func__);
810 }
811
812 if (operandType1 != operandType2)
813 {
814 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
815 }
816
817 if (operandType1 == OperandType::INT32)
818 {
819 // Case 1: resizing by shape
820 int32_t targetWidth = 0;
821 int32_t targetHeight = 0;
822
823 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
824 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
825 {
826 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
827 }
828
829 if (targetWidth < 0 || targetHeight < 0)
830 {
831 return Fail("%s: Operation has invalid inputs for resizing by shape. "
832 "Target width/height cannot be < 0", __func__);
833 }
834
835 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100836 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100837 }
838 else if (operandType1 == OperandType::FLOAT32)
839 {
840 // Case 2: resizing by scale
841 float widthScale = 1.0f;
842 float heightScale = 1.0f;
843
844 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
845 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
846 {
847 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
848 }
849
850 const armnn::TensorShape& inputShape = inputInfo.GetShape();
851 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
852
853 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
854 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
855
856 descriptor.m_TargetWidth = std::floor(width * widthScale);
857 descriptor.m_TargetHeight = std::floor(height * heightScale);
858 }
859 else
860 {
861 // NOTE: FLOAT16 scales are not supported
862 return false;
863 }
864
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100865 bool isSupported = false;
866 FORWARD_LAYER_SUPPORT_FUNC(__func__,
867 IsResizeSupported,
868 data.m_Backends,
869 isSupported,
870 inputInfo,
871 outputInfo,
872 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100873
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100874 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100875 {
876 return false;
877 }
878
879 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
880
881 assert(layer != nullptr);
882
883 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
884 input.Connect(layer->GetInputSlot(0));
885
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100886 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100887}
888
Keith Davisa6bc52f2019-06-26 09:39:49 +0100889bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
890{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100891 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100892
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100893 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100894 if (!input.IsValid() )
895 {
896 return Fail("%s: Operation has invalid inputs", __func__);
897 }
898
899 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
900 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +0100901 if (rank != 4)
902 {
903 return Fail("%s: Only inputs with rank 4 are supported", __func__);
904 }
905
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100906 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
907 if (!output)
908 {
909 return Fail("%s: Could not read output 0", __func__);
910 }
911
912 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
913 if (IsDynamicTensor(outputInfo))
914 {
915 return Fail("%s: Dynamic output tensors are not supported", __func__);
916 }
917
Keith Davisa6bc52f2019-06-26 09:39:49 +0100918 armnn::SpaceToDepthDescriptor desc;
919
920 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
921
922 if (desc.m_BlockSize <= 1)
923 {
924 return Fail("%s: Block size must be at least 1 in all dimensions");
925 }
926
927 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
928
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100929 bool isSupported = false;
930 FORWARD_LAYER_SUPPORT_FUNC(__func__,
931 IsSpaceToDepthSupported,
932 data.m_Backends,
933 isSupported,
934 inputInfo,
935 outputInfo,
936 desc);
937 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100938 {
939 return false;
940 }
941
942 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
943 assert(layer != nullptr);
944 input.Connect(layer->GetInputSlot(0));
945
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100946 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100947}
948
Francis Murtagh074c25a2019-07-22 16:40:57 +0100949bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
950{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100951 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
952
Francis Murtagh074c25a2019-07-22 16:40:57 +0100953 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
954 if (!input.IsValid())
955 {
956 return Fail("%s: Operation has invalid inputs", __func__);
957 }
958
959 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
960 if (!outputOperand)
961 {
962 return Fail("%s: Operation has no outputs", __func__);
963 }
964
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100965 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100966 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +0100967 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100968 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100969 }
970
971 armnn::SoftmaxDescriptor desc;
972 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
973 {
974 return Fail("%s: Operation has invalid inputs", __func__);
975 }
976
977 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
978 2,
979 HalPolicy::OperandType::INT32,
980 desc.m_Axis,
981 model,
982 data))
983 {
984 return Fail("%s: Operation has invalid inputs", __func__);
985 }
986
987 bool isSupported = false;
988 FORWARD_LAYER_SUPPORT_FUNC(__func__,
989 IsSoftmaxSupported,
990 data.m_Backends,
991 isSupported,
992 input.GetTensorInfo(),
993 outputInfo,
994 desc);
995 if (!isSupported)
996 {
997 return false;
998 }
999
1000 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1001 assert(layer != nullptr);
1002 input.Connect(layer->GetInputSlot(0));
1003
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001004 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001005}
1006
Mike Kelly0a879362019-07-29 16:56:31 +01001007bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
1008{
1009 ALOGV("hal_1_2::HalPolicy::ConvertSub()");
1010 return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
1011}
1012
Sadik Armagan61113162019-07-25 09:09:40 +01001013bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1014{
1015 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1016 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1017}
1018
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001019bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
1020{
1021 // Inputs:
1022 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1023 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1024 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1025 if (!input.IsValid())
1026 {
1027 return Fail("%s: Could not read input 0: input", __func__);
1028 }
1029 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1030 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 18, model, data);
1031 if (!outputStateIn.IsValid())
1032 {
1033 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1034 }
1035 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1036 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 19, model, data);
1037 if (!cellStateIn.IsValid())
1038 {
1039 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1040 }
1041
1042 // Get the mandatory input tensors:
1043 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1044 // [num_units, input_size].
1045 const ConstTensorPin inputToForgetWeightsPin =
1046 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1047 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1048 // [num_units, input_size].
1049 const ConstTensorPin inputToCellWeightsPin =
1050 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
1051 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1052 // [num_units, input_size].
1053 const ConstTensorPin inputToOutputWeightsPin =
1054 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
1055 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1056 // [num_units, output_size].
1057 const ConstTensorPin recurrentToForgetWeightsPin =
1058 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
1059 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1060 // [num_units, output_size].
1061 const ConstTensorPin recurrentToCellWeightsPin =
1062 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
1063 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1064 // [num_units, output_size].
1065 const ConstTensorPin recurrentToOutputWeightsPin =
1066 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
1067 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1068 const ConstTensorPin forgetGateBiasPin =
1069 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 13, model, data);
1070 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1071 const ConstTensorPin cellBiasPin =
1072 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 14, model, data);
1073 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1074 const ConstTensorPin outputGateBiasPin =
1075 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 15, model, data);
1076
1077 if (!inputToForgetWeightsPin.IsValid() ||
1078 !inputToCellWeightsPin.IsValid() ||
1079 !inputToOutputWeightsPin.IsValid() ||
1080 !recurrentToForgetWeightsPin.IsValid() ||
1081 !recurrentToCellWeightsPin.IsValid() ||
1082 !recurrentToOutputWeightsPin.IsValid() ||
1083 !forgetGateBiasPin.IsValid() ||
1084 !cellBiasPin.IsValid() ||
1085 !outputGateBiasPin.IsValid())
1086 {
1087 return Fail("%s: Operation has invalid tensor inputs", __func__);
1088 }
1089
1090 // Get the optional input tensors:
1091 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1092 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1093 const ConstTensorPin inputToInputWeightsPin =
1094 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1095 1,
1096 model,
1097 data,
1098 g_DontPermute,
1099 nullptr,
1100 true);
1101
1102 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1103 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1104 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1105 const ConstTensorPin recurrentToInputWeightsPin =
1106 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1107 5,
1108 model,
1109 data,
1110 g_DontPermute,
1111 nullptr,
1112 true);
1113
1114 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1115 const ConstTensorPin cellToInputWeightsPin =
1116 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1117 9,
1118 model,
1119 data,
1120 g_DontPermute,
1121 nullptr,
1122 true);
1123
1124 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1125 const ConstTensorPin cellToForgetWeightsPin =
1126 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1127 10,
1128 model,
1129 data,
1130 g_DontPermute,
1131 nullptr,
1132 true);
1133
1134 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1135 const ConstTensorPin cellToOutputWeightsPin =
1136 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1137 11,
1138 model,
1139 data,
1140 g_DontPermute,
1141 nullptr,
1142 true);
1143
1144 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1145 const ConstTensorPin inputGateBiasPin =
1146 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1147 12,
1148 model,
1149 data,
1150 g_DontPermute,
1151 nullptr,
1152 true);
1153
1154 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1155 // [output_size, num_units].
1156 const ConstTensorPin projectionWeightsPin =
1157 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1158 16,
1159 model,
1160 data,
1161 g_DontPermute,
1162 nullptr,
1163 true);
1164
1165 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1166 const ConstTensorPin projectionBiasPin =
1167 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1168 17,
1169 model,
1170 data,
1171 g_DontPermute,
1172 nullptr,
1173 true);
1174
1175 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
1176 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
1177 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
1178 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
1179 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
1180 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
1181 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
1182 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
1183 {
1184 return Fail("%s: Operation has invalid tensor inputs", __func__);
1185 }
1186
1187 // Get the mandatory input scalars (actually 1-D tensors of size 1):
1188 // 20: The activation function: A value indicating the activation function:
1189 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1190 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1191 // If set to 0.0 then clipping is disabled.
1192 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1193 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1194 ActivationFn activation;
1195 float cellClip;
1196 float projClip;
1197 if (!GetInputActivationFunctionFromTensor<hal_1_2::HalPolicy>(operation, 20, activation, model, data) ||
1198 !GetInputScalar<hal_1_2::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
1199 !GetInputScalar<hal_1_2::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
1200 {
1201 return Fail("%s: Operation has invalid scalar inputs", __func__);
1202 }
1203
1204 // Get the normalization tensors
1205 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
1206 // Used to rescale normalized inputs to activation at input gate.
1207 const ConstTensorPin inputLayerNormWeightsPin =
1208 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1209 23,
1210 model,
1211 data,
1212 g_DontPermute,
1213 nullptr,
1214 true);
1215
1216 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
1217 // Used to rescale normalized inputs to activation at forget gate.
1218 const ConstTensorPin forgetLayerNormWeightsPin =
1219 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1220 24,
1221 model,
1222 data,
1223 g_DontPermute,
1224 nullptr,
1225 true);
1226
1227 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
1228 // Used to rescale normalized inputs to activation at cell gate.
1229 const ConstTensorPin cellLayerNormWeightsPin =
1230 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1231 25,
1232 model,
1233 data,
1234 g_DontPermute,
1235 nullptr,
1236 true);
1237
1238 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
1239 // Used to rescale normalized inputs to activation at output gate.
1240 const ConstTensorPin outputLayerNormWeightsPin =
1241 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1242 26,
1243 model,
1244 data,
1245 g_DontPermute,
1246 nullptr,
1247 true);
1248
1249 // Outputs:
1250 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
1251 // with CIFG, or [batch_size, num_units * 3] without CIFG.
1252 const Operand* scratchBuffer = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1253 if (!scratchBuffer)
1254 {
1255 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
1256 }
1257 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1258 const Operand* outputStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1259 if (!outputStateOut)
1260 {
1261 return Fail("%s: Could not read output 1: outputStateOut", __func__);
1262 }
1263 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1264 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 2, model);
1265 if (!cellStateOut)
1266 {
1267 return Fail("%s: Could not read output 2: cellStateOut", __func__);
1268 }
1269 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1270 // effectively the same as the current “output state (out)” value.
1271 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 3, model);
1272 if (!output)
1273 {
1274 return Fail("%s: Could not read output 3: output", __func__);
1275 }
1276
1277 // set the params structure for the AddLstmLayer call
1278 armnn::LstmInputParams params;
1279 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1280 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1281 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1282 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1283 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1284 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1285 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1286 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1287 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
1288 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
1289 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
1290 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1291 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1292 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1293 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1294 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
1295 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
1296 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
1297 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
1298 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
1299 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
1300
1301 // set the layer descriptor
1302 armnn::LstmDescriptor desc;
1303 desc.m_ActivationFunc = activation;
1304 desc.m_ClippingThresCell = cellClip;
1305 desc.m_ClippingThresProj = projClip;
1306 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
1307 params.m_RecurrentToInputWeights == nullptr ||
1308 params.m_InputGateBias == nullptr);
1309 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
1310 params.m_CellToOutputWeights != nullptr);
1311 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
1312 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
1313 params.m_ForgetLayerNormWeights != nullptr ||
1314 params.m_CellLayerNormWeights != nullptr ||
1315 params.m_OutputLayerNormWeights != nullptr);
1316
1317 // validate the optional input groups
1318 if (desc.m_CifgEnabled &&
1319 (params.m_InputToInputWeights != nullptr ||
1320 params.m_RecurrentToInputWeights != nullptr ||
1321 params.m_InputGateBias != nullptr))
1322 {
1323 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
1324 " and input gate bias must be provided", __func__);
1325 }
1326
1327 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
1328 {
1329 return Fail("%s: projection bias should not be provided without projection weights", __func__);
1330 }
1331
1332 if (desc.m_PeepholeEnabled &&
1333 (params.m_CellToForgetWeights == nullptr ||
1334 params.m_CellToOutputWeights == nullptr ||
1335 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
1336 {
1337 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
1338 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
1339 }
1340
1341 if (desc.m_LayerNormEnabled &&
1342 (params.m_ForgetLayerNormWeights == nullptr ||
1343 params.m_CellLayerNormWeights == nullptr ||
1344 params.m_OutputLayerNormWeights == nullptr ||
1345 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
1346 {
1347 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
1348 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
1349 }
1350
1351 // Check if the layer is supported
1352 // Inputs
1353 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1354 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
1355 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
1356
1357 // Outputs
1358 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
1359 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
1360 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1361 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1362
1363 // Basic parameters
1364 armnn::LstmInputParamsInfo paramsInfo;
1365 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1366 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1367 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1368 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1369 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1370 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1371 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1372 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1373 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1374
1375 // Optional parameters
1376 if(!desc.m_CifgEnabled)
1377 {
1378 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1379 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1380 if (params.m_CellToInputWeights != nullptr)
1381 {
1382 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1383 }
1384 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1385 }
1386
1387 if(desc.m_ProjectionEnabled)
1388 {
1389 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
1390 if (params.m_ProjectionBias != nullptr)
1391 {
1392 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
1393 }
1394 }
1395
1396 if(desc.m_PeepholeEnabled)
1397 {
1398 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1399 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1400 }
1401
1402 if (desc.m_LayerNormEnabled)
1403 {
1404 if(!desc.m_CifgEnabled)
1405 {
1406 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
1407 }
1408 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
1409 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
1410 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
1411 }
1412
1413 bool isSupported = false;
1414 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1415 IsLstmSupported,
1416 data.m_Backends,
1417 isSupported,
1418 inputInfo,
1419 outputStateInInfo,
1420 cellStateInInfo,
1421 scratchBufferInfo,
1422 outputStateOutInfo,
1423 cellStateOutInfo,
1424 outputInfo,
1425 desc,
1426 paramsInfo);
1427 if (!isSupported)
1428 {
1429 return false;
1430 }
1431
1432 // Add the layer
1433 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1434
1435 input.Connect(layer->GetInputSlot(0));
1436 outputStateIn.Connect(layer->GetInputSlot(1));
1437 cellStateIn.Connect(layer->GetInputSlot(2));
1438
1439 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1440 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1441 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1442 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 3, *layer, 3, model, data));
1443}
1444
Mike Kellyb5fdf382019-06-11 16:35:25 +01001445} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001446} // namespace armnn_driver