blob: 8dbfd89732ab135d369218ba8946ccbadcedc71b [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
Mike Kellyb5fdf382019-06-11 16:35:25 +010028 case V1_0::OperationType::CONCATENATION:
29 case V1_0::OperationType::DEPTH_TO_SPACE:
30 case V1_0::OperationType::DEQUANTIZE:
31 case V1_0::OperationType::EMBEDDING_LOOKUP:
32 case V1_0::OperationType::FLOOR:
33 case V1_0::OperationType::FULLY_CONNECTED:
34 case V1_0::OperationType::HASHTABLE_LOOKUP:
35 case V1_0::OperationType::L2_NORMALIZATION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010036 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
37 case V1_0::OperationType::LOGISTIC:
38 case V1_0::OperationType::LSH_PROJECTION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010039 case V1_0::OperationType::MUL:
Mike Kellyb5fdf382019-06-11 16:35:25 +010040 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010041 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010042 case V1_0::OperationType::SVDF:
Mike Kellyb5fdf382019-06-11 16:35:25 +010043 case V1_0::OperationType::OEM_OPERATION:
44 return true;
45 default:
46 return false;
47 }
48}
49
50bool HandledByV1_1(V1_2::OperationType operationType)
51{
52 if (HandledByV1_0(operationType))
53 {
54 return true;
55 }
56 switch (static_cast<V1_1::OperationType>(operationType))
57 {
58 case V1_1::OperationType::BATCH_TO_SPACE_ND:
59 case V1_1::OperationType::DIV:
60 case V1_1::OperationType::MEAN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010061 case V1_1::OperationType::SPACE_TO_BATCH_ND:
62 case V1_1::OperationType::SQUEEZE:
63 case V1_1::OperationType::STRIDED_SLICE:
64 case V1_1::OperationType::SUB:
65 case V1_1::OperationType::TRANSPOSE:
66 return true;
67 default:
68 return false;
69 }
70}
71
72bool HandledByV1_0(const V1_2::Operation& operation)
73{
74 return HandledByV1_0(operation.type);
75}
76
77bool HandledByV1_1(const V1_2::Operation& operation)
78{
79 return HandledByV1_1(operation.type);
80}
81
82V1_0::OperationType CastToV1_0(V1_2::OperationType type)
83{
84 return static_cast<V1_0::OperationType>(type);
85}
86
87V1_1::OperationType CastToV1_1(V1_2::OperationType type)
88{
89 return static_cast<V1_1::OperationType>(type);
90}
91
92V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
93{
94 V1_0::Operation op;
95 op.type = CastToV1_0(operation.type);
96 op.inputs = operation.inputs;
97 op.outputs = operation.outputs;
98 return op;
99}
100
101V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
102{
103 V1_1::Operation op;
104 op.type = CastToV1_1(operation.type);
105 op.inputs = operation.inputs;
106 op.outputs = operation.outputs;
107 return op;
108}
109
110bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
111{
112 if (HandledByV1_0(operation) && compliantWithV1_0(model))
113 {
114 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
115 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
116
117 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
118 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100119
120 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100121 {
122 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
123 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
124
125 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
126 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100127
Mike Kellyb5fdf382019-06-11 16:35:25 +0100128 switch (operation.type)
129 {
Sadik Armagan15d63e22019-07-26 16:59:35 +0100130 case V1_2::OperationType::AVERAGE_POOL_2D:
131 return ConvertAveragePool2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100133 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100134 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100135 return ConvertDepthwiseConv2d(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +0100136 case V1_2::OperationType::L2_POOL_2D:
137 return ConvertL2Pool2d(operation, model, data);
138 case V1_2::OperationType::MAX_POOL_2D:
139 return ConvertMaxPool2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100140 case V1_2::OperationType::MAXIMUM:
141 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100142 case V1_2::OperationType::MINIMUM:
143 return ConvertMinimum(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +0100144 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100145 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100146 case V1_2::OperationType::PAD_V2:
147 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100148 case V1_2::OperationType::PRELU:
149 return ConvertPrelu(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100150 case V1_2::OperationType::RELU:
151 return ConvertReLu(operation, model, data);
152 case V1_2::OperationType::RELU1:
153 return ConvertReLu1(operation, model, data);
154 case V1_2::OperationType::RELU6:
155 return ConvertReLu6(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100156 case V1_2::OperationType::RESIZE_BILINEAR:
157 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100158 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100159 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100160 case V1_2::OperationType::SOFTMAX:
161 return ConvertSoftmax(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100162 case V1_2::OperationType::SPACE_TO_DEPTH:
163 return ConvertSpaceToDepth(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100164 case V1_2::OperationType::TANH:
165 return ConvertTanH(operation, model, data);
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100166 case V1_2::OperationType::LSTM:
167 return ConvertLstm(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100168 default:
169 return Fail("%s: Operation type %s not supported in ArmnnDriver",
170 __func__, toString(operation.type).c_str());
171 }
172}
173
Sadik Armagan15d63e22019-07-26 16:59:35 +0100174bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
175{
176 ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
177 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
178}
179
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100180bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
181{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100182 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
183
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100184 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
185 if (!input.IsValid())
186 {
187 return Fail("%s: Operation has invalid inputs", __func__);
188 }
189
190 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
191 if (!output)
192 {
193 return Fail("%s: Could not read output 0", __func__);
194 }
195
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100196 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
197 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
198
199 if (IsDynamicTensor(outputInfo))
200 {
201 return Fail("%s: Dynamic output tensors are not supported", __func__);
202 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100203
Mike Kellye1d60bb2019-07-11 11:44:52 +0100204 armnn::Convolution2dDescriptor desc;
205 desc.m_DataLayout = armnn::DataLayout::NHWC;
206
207 // Determine whether padding is implicit or explicit
208 bool implicitPadding = operation.inputs.size() == 7 ||
209 (operation.inputs.size() >= 8 &&
210 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
211
212 if (implicitPadding)
213 {
214 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
215 }
216 else if (operation.inputs.size() >= 10)
217 {
218 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
219 }
220
221 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
222
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100223 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100224 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
225 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
226 // the DataLayout is NCHW
227 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
228 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
229 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100230 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100231 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100232
233 if (!weightsPin.IsValid())
234 {
235 return Fail("%s: Operation has invalid weights", __func__);
236 }
237
238 if (!biasPin.IsValid())
239 {
240 return Fail("%s: Operation has invalid biases", __func__);
241 }
242
243 armnn::ConstTensor weights = weightsPin.GetConstTensor();
244 armnn::ConstTensor bias = biasPin.GetConstTensor();
245 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
246
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100247 ActivationFn activation;
248
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100249 if (implicitPadding)
250 {
251 android::nn::PaddingScheme paddingScheme;
252 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
253 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
254 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
255 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
256 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
257 {
258 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
259 }
260
Mike Kellye1d60bb2019-07-11 11:44:52 +0100261 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
262 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
263 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
264 const uint32_t kernelX = weights.GetShape()[widthIndex];
265 const uint32_t kernelY = weights.GetShape()[heightIndex];
266 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
267 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100268
Mike Kelly86b36d42019-07-12 16:39:33 +0100269 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
270 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100271
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100272 }
273 else if (operation.inputs.size() >= 10)
274 {
275 // explicit padding
276 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
277 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
278 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
279 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
280 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
281 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
282 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
283 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
284 {
285 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
286 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100287 }
288 else
289 {
290 return Fail("%s: Unsupported number of operation inputs", __func__);
291 }
292
293 desc.m_BiasEnabled = true;
294 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
295
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100296 bool isSupported = false;
297 FORWARD_LAYER_SUPPORT_FUNC(__func__,
298 IsConvolution2dSupported,
299 data.m_Backends,
300 isSupported,
301 inputInfo,
302 outputInfo,
303 desc,
304 weights.GetInfo(),
305 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100306
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100307 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100308 {
309 return false;
310 }
311
312 armnn::IConnectableLayer* startLayer =
313 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
314
315 if (!startLayer)
316 {
317 return Fail("%s: AddConvolution2dLayer failed", __func__);
318 }
319
320 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
321
322 if (!endLayer)
323 {
324 return Fail("%s: ProcessActivation failed", __func__);
325 }
326
327 input.Connect(startLayer->GetInputSlot(0));
328
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100329 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100330}
331
332bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
333{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100334 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
335
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100336 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
337
338 if (!input.IsValid())
339 {
340 return Fail("%s: Operation has invalid inputs", __func__);
341 }
342
343 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
344
345 if (!output)
346 {
347 return Fail("%s: Could not read output 0", __func__);
348 }
349
350 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100351 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
352
353 if (IsDynamicTensor(outputInfo))
354 {
355 return Fail("%s: Dynamic output tensors are not supported", __func__);
356 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100357
358 // ArmNN does not currently support non-fixed weights or bias
359 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
360 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
361
362 if (weightsOperand == nullptr)
363 {
364 return Fail("%s: Operand is invalid", __func__);
365 }
366 armnn::DepthwiseConvolution2dDescriptor desc;
367 desc.m_DataLayout = armnn::DataLayout::NHWC;
368
369 // Determine whether padding is implicit or explicit
370 bool implicitPadding = operation.inputs.size() == 8 ||
371 (operation.inputs.size() >= 9 &&
372 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
373
374 // Look ahead to find the optional DataLayout, if present
375 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
376 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
377
378 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
379 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
380 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
381 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
382
383 // Reinterpret weight data as [ H, W, I, M ]
384 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
385 weightsOperand->dimensions[2],
386 inputInfo.GetShape()[channelsIndex],
387 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
388
389 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
390 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
391
392 const ConstTensorPin weightsPin =
393 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
394 1,
395 model,
396 data,
397 HWIMToMIHW,
398 &weightsShape);
399
400 // Bias is a 1D tensor
401 const ConstTensorPin biasPin =
402 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
403
404 if (!weightsPin.IsValid())
405 {
406 return Fail("%s: Operation has invalid weights", __func__);
407 }
408
409 if (!biasPin.IsValid())
410 {
411 return Fail("%s: Operation has invalid biases", __func__);
412 }
413
414 armnn::ConstTensor weights = weightsPin.GetConstTensor();
415 armnn::ConstTensor bias = biasPin.GetConstTensor();
416 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
417
418 ActivationFn activation;
419
420 if (implicitPadding)
421 {
422 android::nn::PaddingScheme paddingScheme;
423 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
424 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
425 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
426 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
427 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
428 {
429 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
430 }
431
432 const uint32_t kernelX = weights.GetShape()[3];
433 const uint32_t kernelY = weights.GetShape()[2];
434 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
435 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
436
Mike Kelly86b36d42019-07-12 16:39:33 +0100437 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
438 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100439 }
440 else if (operation.inputs.size() >= 11)
441 {
442 // explicit padding
443 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
444 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
445 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
446 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
447 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
448 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
449 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
450 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
451 {
452 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
453 }
454 }
455 else
456 {
457 return Fail("%s: Unsupported number of operation inputs", __func__);
458 }
459
460 desc.m_BiasEnabled = true;
461 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
462
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100463 bool isSupported = false;
464 FORWARD_LAYER_SUPPORT_FUNC(__func__,
465 IsDepthwiseConvolutionSupported,
466 data.m_Backends,
467 isSupported,
468 inputInfo,
469 outputInfo,
470 desc,
471 weights.GetInfo(),
472 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100473
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100474 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100475 {
476 return false;
477 }
478
479 armnn::IConnectableLayer* startLayer =
480 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100481
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100482 if (!startLayer)
483 {
484 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
485 }
486
487 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
488 if (!endLayer)
489 {
490 return Fail("%s: ProcessActivation failed", __func__);
491 }
492
493 input.Connect(startLayer->GetInputSlot(0));
494
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100495 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100496}
497
Sadik Armagan15d63e22019-07-26 16:59:35 +0100498bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
499{
500 ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
501 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
502}
503
504bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
505{
506 ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
507 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
508}
509
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100510bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
511{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100512 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
513
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100514 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
515 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
516
517 if (!input0.IsValid() || !input1.IsValid())
518 {
519 return Fail("%s: Operation has invalid inputs", __func__);
520 }
521
522 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
523 if (!outputOperand)
524 {
525 return Fail("%s: Could not read output", __func__);
526 }
527
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100528 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100529 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100530 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100531 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100532 }
533
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100534 bool isSupported = false;
535 FORWARD_LAYER_SUPPORT_FUNC(__func__,
536 IsMaximumSupported,
537 data.m_Backends,
538 isSupported,
539 input0.GetTensorInfo(),
540 input1.GetTensorInfo(),
541 outInfo);
542
543 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100544 {
545 return false;
546 }
547
548 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
549 assert(layer != nullptr);
550 BroadcastTensor(input0, input1, layer, *data.m_Network);
551
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100552 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100553}
554
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100555bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
556{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100557 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
558
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100559 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
560 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
561
562 if (!input0.IsValid() || !input1.IsValid())
563 {
564 return Fail("%s: Operation has invalid inputs", __func__);
565 }
566
567 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
568 if (!output)
569 {
570 return Fail("%s: Could not read output 0", __func__);
571 }
572
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100573 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100574 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100575 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100576 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100577 }
578
579 bool isSupported = false;
580 FORWARD_LAYER_SUPPORT_FUNC(__func__,
581 IsMinimumSupported,
582 data.m_Backends,
583 isSupported,
584 input0.GetTensorInfo(),
585 input1.GetTensorInfo(),
586 outputInfo);
587
588 if (!isSupported)
589 {
590 return false;
591 }
592
593 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
594 assert(layer != nullptr);
595 BroadcastTensor(input0, input1, layer, *data.m_Network);
596
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100597 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100598}
599
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100600bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
601{
602 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
603 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
604}
605
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100606bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
607{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100608 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
609
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100610 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
611 if (!input.IsValid())
612 {
613 return Fail("%s: Could not read input 0", __func__);
614 }
615
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100616 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
617 if (!output)
618 {
619 return Fail("%s: Could not read output", __func__);
620 }
621
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100622 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
623 unsigned int rank = inputInfo.GetNumDimensions();
624
625 armnn::PadDescriptor descriptor;
626 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
627 {
628 return Fail("%s: Could not convert paddings", __func__);
629 }
630
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100631 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100632 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100633 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100634 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100635 }
636
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100637 // Determine type of padding value
638 OperandType operandType0;
639 OperandType operandType2;
640
641 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
642 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
643 {
644 return Fail("%s: Operation has invalid inputs", __func__);
645 }
646
647 // Read value to use for padding
648 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
649 {
650 armnn::Half f16PadValue;
651 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
652 {
653 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
654 }
655
656 descriptor.m_PadValue = f16PadValue;
657 }
658 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
659 {
660 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
661 {
662 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
663 }
664 }
665 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
666 {
Mike Kelly3c673942019-07-25 09:26:06 +0100667 int32_t intPadValue = 0;
668 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100669 {
670 return Fail("%s: Could not read input 2 (INT32)", __func__);
671 }
Mike Kelly3c673942019-07-25 09:26:06 +0100672 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100673 }
674 else
675 {
676 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
677 }
678
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100679 bool isSupported = false;
680 FORWARD_LAYER_SUPPORT_FUNC(__func__,
681 IsPadSupported,
682 data.m_Backends,
683 isSupported,
684 inputInfo,
685 outputInfo,
686 descriptor);
687 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100688 {
689 return false;
690 }
691
692 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
693 assert(layer != nullptr);
694 input.Connect(layer->GetInputSlot(0));
695 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
696
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100697 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100698}
699
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100700bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
701{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100702 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
703
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100704 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
705 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
706
707 if (!input.IsValid() || !alpha.IsValid())
708 {
709 return Fail("%s: Operation has invalid inputs", __func__);
710 }
711
712 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
713
714 if (!output)
715 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100716 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100717 }
718
719 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
720 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100721 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100722
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100723 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100724 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100725 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100726 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100727
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100728 bool isSupported = false;
729 FORWARD_LAYER_SUPPORT_FUNC(__func__,
730 IsPreluSupported,
731 data.m_Backends,
732 isSupported,
733 inputInfo,
734 alphaInfo,
735 outputInfo);
736 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100737 {
738 return false;
739 }
740
741 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
742
743 if (!layer)
744 {
745 return Fail("%s: AddPreluLayer failed", __func__);
746 }
747
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100748 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100749
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100750 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100751}
752
Sadik Armagan61113162019-07-25 09:09:40 +0100753bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
754{
755 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
756 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
757}
758
759bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
760{
761 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
762 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
763}
764
765bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
766{
767 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
768 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
769}
770
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100771bool HalPolicy::ConvertResize(const Operation& operation,
772 const Model& model,
773 ConversionData& data,
774 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100775{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100776 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
777
778 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100779 if (!input.IsValid())
780 {
781 return Fail("%s: Could not read input 0", __func__);
782 }
783
784 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
785 if (!output)
786 {
787 return Fail("%s: Could not read output 0", __func__);
788 }
789
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100790 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
791 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
792
793 if (IsDynamicTensor(outputInfo))
794 {
795 return Fail("%s: Dynamic output tensors are not supported", __func__);
796 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100797
798 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100799 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100800 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
801
802 OperandType operandType1;
803 OperandType operandType2;
804
805 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
806 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
807 {
808 return Fail("%s: Operation has invalid inputs", __func__);
809 }
810
811 if (operandType1 != operandType2)
812 {
813 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
814 }
815
816 if (operandType1 == OperandType::INT32)
817 {
818 // Case 1: resizing by shape
819 int32_t targetWidth = 0;
820 int32_t targetHeight = 0;
821
822 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
823 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
824 {
825 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
826 }
827
828 if (targetWidth < 0 || targetHeight < 0)
829 {
830 return Fail("%s: Operation has invalid inputs for resizing by shape. "
831 "Target width/height cannot be < 0", __func__);
832 }
833
834 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100835 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100836 }
837 else if (operandType1 == OperandType::FLOAT32)
838 {
839 // Case 2: resizing by scale
840 float widthScale = 1.0f;
841 float heightScale = 1.0f;
842
843 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
844 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
845 {
846 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
847 }
848
849 const armnn::TensorShape& inputShape = inputInfo.GetShape();
850 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
851
852 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
853 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
854
855 descriptor.m_TargetWidth = std::floor(width * widthScale);
856 descriptor.m_TargetHeight = std::floor(height * heightScale);
857 }
858 else
859 {
860 // NOTE: FLOAT16 scales are not supported
861 return false;
862 }
863
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100864 bool isSupported = false;
865 FORWARD_LAYER_SUPPORT_FUNC(__func__,
866 IsResizeSupported,
867 data.m_Backends,
868 isSupported,
869 inputInfo,
870 outputInfo,
871 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100872
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100873 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100874 {
875 return false;
876 }
877
878 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
879
880 assert(layer != nullptr);
881
882 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
883 input.Connect(layer->GetInputSlot(0));
884
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100885 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100886}
887
Keith Davisa6bc52f2019-06-26 09:39:49 +0100888bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
889{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100890 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100891
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100892 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100893 if (!input.IsValid() )
894 {
895 return Fail("%s: Operation has invalid inputs", __func__);
896 }
897
898 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
899 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +0100900 if (rank != 4)
901 {
902 return Fail("%s: Only inputs with rank 4 are supported", __func__);
903 }
904
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100905 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
906 if (!output)
907 {
908 return Fail("%s: Could not read output 0", __func__);
909 }
910
911 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
912 if (IsDynamicTensor(outputInfo))
913 {
914 return Fail("%s: Dynamic output tensors are not supported", __func__);
915 }
916
Keith Davisa6bc52f2019-06-26 09:39:49 +0100917 armnn::SpaceToDepthDescriptor desc;
918
919 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
920
921 if (desc.m_BlockSize <= 1)
922 {
923 return Fail("%s: Block size must be at least 1 in all dimensions");
924 }
925
926 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
927
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100928 bool isSupported = false;
929 FORWARD_LAYER_SUPPORT_FUNC(__func__,
930 IsSpaceToDepthSupported,
931 data.m_Backends,
932 isSupported,
933 inputInfo,
934 outputInfo,
935 desc);
936 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100937 {
938 return false;
939 }
940
941 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
942 assert(layer != nullptr);
943 input.Connect(layer->GetInputSlot(0));
944
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100945 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100946}
947
Francis Murtagh074c25a2019-07-22 16:40:57 +0100948bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
949{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100950 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
951
Francis Murtagh074c25a2019-07-22 16:40:57 +0100952 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
953 if (!input.IsValid())
954 {
955 return Fail("%s: Operation has invalid inputs", __func__);
956 }
957
958 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
959 if (!outputOperand)
960 {
961 return Fail("%s: Operation has no outputs", __func__);
962 }
963
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100964 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100965 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +0100966 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100967 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100968 }
969
970 armnn::SoftmaxDescriptor desc;
971 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
972 {
973 return Fail("%s: Operation has invalid inputs", __func__);
974 }
975
976 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
977 2,
978 HalPolicy::OperandType::INT32,
979 desc.m_Axis,
980 model,
981 data))
982 {
983 return Fail("%s: Operation has invalid inputs", __func__);
984 }
985
986 bool isSupported = false;
987 FORWARD_LAYER_SUPPORT_FUNC(__func__,
988 IsSoftmaxSupported,
989 data.m_Backends,
990 isSupported,
991 input.GetTensorInfo(),
992 outputInfo,
993 desc);
994 if (!isSupported)
995 {
996 return false;
997 }
998
999 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1000 assert(layer != nullptr);
1001 input.Connect(layer->GetInputSlot(0));
1002
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001003 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001004}
1005
Sadik Armagan61113162019-07-25 09:09:40 +01001006bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1007{
1008 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1009 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1010}
1011
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001012bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
1013{
1014 // Inputs:
1015 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1016 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1017 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1018 if (!input.IsValid())
1019 {
1020 return Fail("%s: Could not read input 0: input", __func__);
1021 }
1022 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1023 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 18, model, data);
1024 if (!outputStateIn.IsValid())
1025 {
1026 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1027 }
1028 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1029 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 19, model, data);
1030 if (!cellStateIn.IsValid())
1031 {
1032 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1033 }
1034
1035 // Get the mandatory input tensors:
1036 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1037 // [num_units, input_size].
1038 const ConstTensorPin inputToForgetWeightsPin =
1039 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1040 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1041 // [num_units, input_size].
1042 const ConstTensorPin inputToCellWeightsPin =
1043 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
1044 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1045 // [num_units, input_size].
1046 const ConstTensorPin inputToOutputWeightsPin =
1047 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
1048 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1049 // [num_units, output_size].
1050 const ConstTensorPin recurrentToForgetWeightsPin =
1051 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
1052 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1053 // [num_units, output_size].
1054 const ConstTensorPin recurrentToCellWeightsPin =
1055 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
1056 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1057 // [num_units, output_size].
1058 const ConstTensorPin recurrentToOutputWeightsPin =
1059 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
1060 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1061 const ConstTensorPin forgetGateBiasPin =
1062 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 13, model, data);
1063 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1064 const ConstTensorPin cellBiasPin =
1065 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 14, model, data);
1066 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1067 const ConstTensorPin outputGateBiasPin =
1068 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 15, model, data);
1069
1070 if (!inputToForgetWeightsPin.IsValid() ||
1071 !inputToCellWeightsPin.IsValid() ||
1072 !inputToOutputWeightsPin.IsValid() ||
1073 !recurrentToForgetWeightsPin.IsValid() ||
1074 !recurrentToCellWeightsPin.IsValid() ||
1075 !recurrentToOutputWeightsPin.IsValid() ||
1076 !forgetGateBiasPin.IsValid() ||
1077 !cellBiasPin.IsValid() ||
1078 !outputGateBiasPin.IsValid())
1079 {
1080 return Fail("%s: Operation has invalid tensor inputs", __func__);
1081 }
1082
1083 // Get the optional input tensors:
1084 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1085 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1086 const ConstTensorPin inputToInputWeightsPin =
1087 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1088 1,
1089 model,
1090 data,
1091 g_DontPermute,
1092 nullptr,
1093 true);
1094
1095 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1096 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1097 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1098 const ConstTensorPin recurrentToInputWeightsPin =
1099 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1100 5,
1101 model,
1102 data,
1103 g_DontPermute,
1104 nullptr,
1105 true);
1106
1107 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1108 const ConstTensorPin cellToInputWeightsPin =
1109 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1110 9,
1111 model,
1112 data,
1113 g_DontPermute,
1114 nullptr,
1115 true);
1116
1117 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1118 const ConstTensorPin cellToForgetWeightsPin =
1119 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1120 10,
1121 model,
1122 data,
1123 g_DontPermute,
1124 nullptr,
1125 true);
1126
1127 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1128 const ConstTensorPin cellToOutputWeightsPin =
1129 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1130 11,
1131 model,
1132 data,
1133 g_DontPermute,
1134 nullptr,
1135 true);
1136
1137 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1138 const ConstTensorPin inputGateBiasPin =
1139 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1140 12,
1141 model,
1142 data,
1143 g_DontPermute,
1144 nullptr,
1145 true);
1146
1147 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1148 // [output_size, num_units].
1149 const ConstTensorPin projectionWeightsPin =
1150 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1151 16,
1152 model,
1153 data,
1154 g_DontPermute,
1155 nullptr,
1156 true);
1157
1158 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1159 const ConstTensorPin projectionBiasPin =
1160 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1161 17,
1162 model,
1163 data,
1164 g_DontPermute,
1165 nullptr,
1166 true);
1167
1168 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
1169 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
1170 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
1171 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
1172 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
1173 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
1174 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
1175 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
1176 {
1177 return Fail("%s: Operation has invalid tensor inputs", __func__);
1178 }
1179
1180 // Get the mandatory input scalars (actually 1-D tensors of size 1):
1181 // 20: The activation function: A value indicating the activation function:
1182 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1183 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1184 // If set to 0.0 then clipping is disabled.
1185 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1186 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1187 ActivationFn activation;
1188 float cellClip;
1189 float projClip;
1190 if (!GetInputActivationFunctionFromTensor<hal_1_2::HalPolicy>(operation, 20, activation, model, data) ||
1191 !GetInputScalar<hal_1_2::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
1192 !GetInputScalar<hal_1_2::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
1193 {
1194 return Fail("%s: Operation has invalid scalar inputs", __func__);
1195 }
1196
1197 // Get the normalization tensors
1198 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
1199 // Used to rescale normalized inputs to activation at input gate.
1200 const ConstTensorPin inputLayerNormWeightsPin =
1201 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1202 23,
1203 model,
1204 data,
1205 g_DontPermute,
1206 nullptr,
1207 true);
1208
1209 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
1210 // Used to rescale normalized inputs to activation at forget gate.
1211 const ConstTensorPin forgetLayerNormWeightsPin =
1212 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1213 24,
1214 model,
1215 data,
1216 g_DontPermute,
1217 nullptr,
1218 true);
1219
1220 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
1221 // Used to rescale normalized inputs to activation at cell gate.
1222 const ConstTensorPin cellLayerNormWeightsPin =
1223 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1224 25,
1225 model,
1226 data,
1227 g_DontPermute,
1228 nullptr,
1229 true);
1230
1231 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
1232 // Used to rescale normalized inputs to activation at output gate.
1233 const ConstTensorPin outputLayerNormWeightsPin =
1234 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1235 26,
1236 model,
1237 data,
1238 g_DontPermute,
1239 nullptr,
1240 true);
1241
1242 // Outputs:
1243 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
1244 // with CIFG, or [batch_size, num_units * 3] without CIFG.
1245 const Operand* scratchBuffer = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1246 if (!scratchBuffer)
1247 {
1248 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
1249 }
1250 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1251 const Operand* outputStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1252 if (!outputStateOut)
1253 {
1254 return Fail("%s: Could not read output 1: outputStateOut", __func__);
1255 }
1256 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1257 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 2, model);
1258 if (!cellStateOut)
1259 {
1260 return Fail("%s: Could not read output 2: cellStateOut", __func__);
1261 }
1262 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1263 // effectively the same as the current “output state (out)” value.
1264 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 3, model);
1265 if (!output)
1266 {
1267 return Fail("%s: Could not read output 3: output", __func__);
1268 }
1269
1270 // set the params structure for the AddLstmLayer call
1271 armnn::LstmInputParams params;
1272 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1273 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1274 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1275 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1276 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1277 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1278 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1279 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1280 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
1281 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
1282 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
1283 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1284 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1285 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1286 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1287 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
1288 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
1289 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
1290 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
1291 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
1292 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
1293
1294 // set the layer descriptor
1295 armnn::LstmDescriptor desc;
1296 desc.m_ActivationFunc = activation;
1297 desc.m_ClippingThresCell = cellClip;
1298 desc.m_ClippingThresProj = projClip;
1299 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
1300 params.m_RecurrentToInputWeights == nullptr ||
1301 params.m_InputGateBias == nullptr);
1302 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
1303 params.m_CellToOutputWeights != nullptr);
1304 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
1305 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
1306 params.m_ForgetLayerNormWeights != nullptr ||
1307 params.m_CellLayerNormWeights != nullptr ||
1308 params.m_OutputLayerNormWeights != nullptr);
1309
1310 // validate the optional input groups
1311 if (desc.m_CifgEnabled &&
1312 (params.m_InputToInputWeights != nullptr ||
1313 params.m_RecurrentToInputWeights != nullptr ||
1314 params.m_InputGateBias != nullptr))
1315 {
1316 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
1317 " and input gate bias must be provided", __func__);
1318 }
1319
1320 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
1321 {
1322 return Fail("%s: projection bias should not be provided without projection weights", __func__);
1323 }
1324
1325 if (desc.m_PeepholeEnabled &&
1326 (params.m_CellToForgetWeights == nullptr ||
1327 params.m_CellToOutputWeights == nullptr ||
1328 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
1329 {
1330 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
1331 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
1332 }
1333
1334 if (desc.m_LayerNormEnabled &&
1335 (params.m_ForgetLayerNormWeights == nullptr ||
1336 params.m_CellLayerNormWeights == nullptr ||
1337 params.m_OutputLayerNormWeights == nullptr ||
1338 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
1339 {
1340 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
1341 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
1342 }
1343
1344 // Check if the layer is supported
1345 // Inputs
1346 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1347 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
1348 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
1349
1350 // Outputs
1351 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
1352 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
1353 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1354 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1355
1356 // Basic parameters
1357 armnn::LstmInputParamsInfo paramsInfo;
1358 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1359 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1360 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1361 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1362 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1363 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1364 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1365 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1366 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1367
1368 // Optional parameters
1369 if(!desc.m_CifgEnabled)
1370 {
1371 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1372 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1373 if (params.m_CellToInputWeights != nullptr)
1374 {
1375 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1376 }
1377 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1378 }
1379
1380 if(desc.m_ProjectionEnabled)
1381 {
1382 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
1383 if (params.m_ProjectionBias != nullptr)
1384 {
1385 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
1386 }
1387 }
1388
1389 if(desc.m_PeepholeEnabled)
1390 {
1391 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1392 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1393 }
1394
1395 if (desc.m_LayerNormEnabled)
1396 {
1397 if(!desc.m_CifgEnabled)
1398 {
1399 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
1400 }
1401 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
1402 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
1403 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
1404 }
1405
1406 bool isSupported = false;
1407 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1408 IsLstmSupported,
1409 data.m_Backends,
1410 isSupported,
1411 inputInfo,
1412 outputStateInInfo,
1413 cellStateInInfo,
1414 scratchBufferInfo,
1415 outputStateOutInfo,
1416 cellStateOutInfo,
1417 outputInfo,
1418 desc,
1419 paramsInfo);
1420 if (!isSupported)
1421 {
1422 return false;
1423 }
1424
1425 // Add the layer
1426 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1427
1428 input.Connect(layer->GetInputSlot(0));
1429 outputStateIn.Connect(layer->GetInputSlot(1));
1430 cellStateIn.Connect(layer->GetInputSlot(2));
1431
1432 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1433 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1434 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1435 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 3, *layer, 3, model, data));
1436}
1437
Mike Kellyb5fdf382019-06-11 16:35:25 +01001438} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001439} // namespace armnn_driver