blob: dee2175d11f3f35ebc7dce4d1cd505c275687058 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
Mike Kellyb5fdf382019-06-11 16:35:25 +010028 case V1_0::OperationType::CONCATENATION:
29 case V1_0::OperationType::DEPTH_TO_SPACE:
30 case V1_0::OperationType::DEQUANTIZE:
31 case V1_0::OperationType::EMBEDDING_LOOKUP:
32 case V1_0::OperationType::FLOOR:
33 case V1_0::OperationType::FULLY_CONNECTED:
34 case V1_0::OperationType::HASHTABLE_LOOKUP:
35 case V1_0::OperationType::L2_NORMALIZATION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010036 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
37 case V1_0::OperationType::LOGISTIC:
38 case V1_0::OperationType::LSH_PROJECTION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010039 case V1_0::OperationType::MUL:
Mike Kellyb5fdf382019-06-11 16:35:25 +010040 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010041 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010042 case V1_0::OperationType::SVDF:
Mike Kellyb5fdf382019-06-11 16:35:25 +010043 case V1_0::OperationType::OEM_OPERATION:
44 return true;
45 default:
46 return false;
47 }
48}
49
50bool HandledByV1_1(V1_2::OperationType operationType)
51{
52 if (HandledByV1_0(operationType))
53 {
54 return true;
55 }
56 switch (static_cast<V1_1::OperationType>(operationType))
57 {
58 case V1_1::OperationType::BATCH_TO_SPACE_ND:
59 case V1_1::OperationType::DIV:
60 case V1_1::OperationType::MEAN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010061 case V1_1::OperationType::SPACE_TO_BATCH_ND:
62 case V1_1::OperationType::SQUEEZE:
63 case V1_1::OperationType::STRIDED_SLICE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010064 case V1_1::OperationType::TRANSPOSE:
65 return true;
66 default:
67 return false;
68 }
69}
70
71bool HandledByV1_0(const V1_2::Operation& operation)
72{
73 return HandledByV1_0(operation.type);
74}
75
76bool HandledByV1_1(const V1_2::Operation& operation)
77{
78 return HandledByV1_1(operation.type);
79}
80
81V1_0::OperationType CastToV1_0(V1_2::OperationType type)
82{
83 return static_cast<V1_0::OperationType>(type);
84}
85
86V1_1::OperationType CastToV1_1(V1_2::OperationType type)
87{
88 return static_cast<V1_1::OperationType>(type);
89}
90
91V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
92{
93 V1_0::Operation op;
94 op.type = CastToV1_0(operation.type);
95 op.inputs = operation.inputs;
96 op.outputs = operation.outputs;
97 return op;
98}
99
100V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
101{
102 V1_1::Operation op;
103 op.type = CastToV1_1(operation.type);
104 op.inputs = operation.inputs;
105 op.outputs = operation.outputs;
106 return op;
107}
108
109bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
110{
111 if (HandledByV1_0(operation) && compliantWithV1_0(model))
112 {
113 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
114 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
115
116 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
117 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100118
119 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100120 {
121 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
122 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
123
124 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
125 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100126
Mike Kellyb5fdf382019-06-11 16:35:25 +0100127 switch (operation.type)
128 {
Sadik Armagan15d63e22019-07-26 16:59:35 +0100129 case V1_2::OperationType::AVERAGE_POOL_2D:
130 return ConvertAveragePool2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100131 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100132 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100133 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100134 return ConvertDepthwiseConv2d(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +0100135 case V1_2::OperationType::L2_POOL_2D:
136 return ConvertL2Pool2d(operation, model, data);
137 case V1_2::OperationType::MAX_POOL_2D:
138 return ConvertMaxPool2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100139 case V1_2::OperationType::MAXIMUM:
140 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100141 case V1_2::OperationType::MINIMUM:
142 return ConvertMinimum(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +0100143 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100144 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100145 case V1_2::OperationType::PAD_V2:
146 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100147 case V1_2::OperationType::PRELU:
148 return ConvertPrelu(operation, model, data);
Sadik Armagan5a476a82019-07-30 09:43:18 +0100149 case V1_2::OperationType::QUANTIZE:
150 return ConvertQuantize(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100151 case V1_2::OperationType::RELU:
152 return ConvertReLu(operation, model, data);
153 case V1_2::OperationType::RELU1:
154 return ConvertReLu1(operation, model, data);
155 case V1_2::OperationType::RELU6:
156 return ConvertReLu6(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100157 case V1_2::OperationType::RESIZE_BILINEAR:
158 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100159 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100160 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100161 case V1_2::OperationType::SOFTMAX:
162 return ConvertSoftmax(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100163 case V1_2::OperationType::SPACE_TO_DEPTH:
164 return ConvertSpaceToDepth(operation, model, data);
Mike Kelly0a879362019-07-29 16:56:31 +0100165 case V1_2::OperationType::SUB:
166 return ConvertSub(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100167 case V1_2::OperationType::TANH:
168 return ConvertTanH(operation, model, data);
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100169 case V1_2::OperationType::LSTM:
170 return ConvertLstm(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100171 default:
172 return Fail("%s: Operation type %s not supported in ArmnnDriver",
173 __func__, toString(operation.type).c_str());
174 }
175}
176
Sadik Armagan15d63e22019-07-26 16:59:35 +0100177bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
178{
179 ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
180 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
181}
182
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100183bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
184{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100185 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
186
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100187 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
188 if (!input.IsValid())
189 {
190 return Fail("%s: Operation has invalid inputs", __func__);
191 }
192
193 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
194 if (!output)
195 {
196 return Fail("%s: Could not read output 0", __func__);
197 }
198
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100199 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
200 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
201
202 if (IsDynamicTensor(outputInfo))
203 {
204 return Fail("%s: Dynamic output tensors are not supported", __func__);
205 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100206
Mike Kellye1d60bb2019-07-11 11:44:52 +0100207 armnn::Convolution2dDescriptor desc;
208 desc.m_DataLayout = armnn::DataLayout::NHWC;
209
210 // Determine whether padding is implicit or explicit
211 bool implicitPadding = operation.inputs.size() == 7 ||
212 (operation.inputs.size() >= 8 &&
213 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
214
215 if (implicitPadding)
216 {
217 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
218 }
219 else if (operation.inputs.size() >= 10)
220 {
221 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
222 }
223
224 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
225
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100226 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100227 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
228 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
229 // the DataLayout is NCHW
230 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
231 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
232 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100233 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100234 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100235
236 if (!weightsPin.IsValid())
237 {
238 return Fail("%s: Operation has invalid weights", __func__);
239 }
240
241 if (!biasPin.IsValid())
242 {
243 return Fail("%s: Operation has invalid biases", __func__);
244 }
245
246 armnn::ConstTensor weights = weightsPin.GetConstTensor();
247 armnn::ConstTensor bias = biasPin.GetConstTensor();
248 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
249
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100250 ActivationFn activation;
251
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100252 if (implicitPadding)
253 {
254 android::nn::PaddingScheme paddingScheme;
255 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
256 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
257 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
258 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
259 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
260 {
261 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
262 }
263
Mike Kellye1d60bb2019-07-11 11:44:52 +0100264 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
265 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
266 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
267 const uint32_t kernelX = weights.GetShape()[widthIndex];
268 const uint32_t kernelY = weights.GetShape()[heightIndex];
269 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
270 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100271
Mike Kelly86b36d42019-07-12 16:39:33 +0100272 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
273 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100274
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100275 }
276 else if (operation.inputs.size() >= 10)
277 {
278 // explicit padding
279 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
280 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
281 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
282 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
283 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
284 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
285 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
286 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
287 {
288 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
289 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100290 }
291 else
292 {
293 return Fail("%s: Unsupported number of operation inputs", __func__);
294 }
295
296 desc.m_BiasEnabled = true;
297 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
298
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100299 bool isSupported = false;
300 FORWARD_LAYER_SUPPORT_FUNC(__func__,
301 IsConvolution2dSupported,
302 data.m_Backends,
303 isSupported,
304 inputInfo,
305 outputInfo,
306 desc,
307 weights.GetInfo(),
308 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100309
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100310 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100311 {
312 return false;
313 }
314
315 armnn::IConnectableLayer* startLayer =
316 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
317
318 if (!startLayer)
319 {
320 return Fail("%s: AddConvolution2dLayer failed", __func__);
321 }
322
323 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
324
325 if (!endLayer)
326 {
327 return Fail("%s: ProcessActivation failed", __func__);
328 }
329
330 input.Connect(startLayer->GetInputSlot(0));
331
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100332 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100333}
334
335bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
336{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100337 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
338
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100339 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
340
341 if (!input.IsValid())
342 {
343 return Fail("%s: Operation has invalid inputs", __func__);
344 }
345
346 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
347
348 if (!output)
349 {
350 return Fail("%s: Could not read output 0", __func__);
351 }
352
353 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100354 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
355
356 if (IsDynamicTensor(outputInfo))
357 {
358 return Fail("%s: Dynamic output tensors are not supported", __func__);
359 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100360
361 // ArmNN does not currently support non-fixed weights or bias
362 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
363 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
364
365 if (weightsOperand == nullptr)
366 {
367 return Fail("%s: Operand is invalid", __func__);
368 }
369 armnn::DepthwiseConvolution2dDescriptor desc;
370 desc.m_DataLayout = armnn::DataLayout::NHWC;
371
372 // Determine whether padding is implicit or explicit
373 bool implicitPadding = operation.inputs.size() == 8 ||
374 (operation.inputs.size() >= 9 &&
375 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
376
377 // Look ahead to find the optional DataLayout, if present
378 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
379 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
380
381 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
382 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
383 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
384 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
385
386 // Reinterpret weight data as [ H, W, I, M ]
387 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
388 weightsOperand->dimensions[2],
389 inputInfo.GetShape()[channelsIndex],
390 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
391
392 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
393 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
394
395 const ConstTensorPin weightsPin =
396 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
397 1,
398 model,
399 data,
400 HWIMToMIHW,
401 &weightsShape);
402
403 // Bias is a 1D tensor
404 const ConstTensorPin biasPin =
405 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
406
407 if (!weightsPin.IsValid())
408 {
409 return Fail("%s: Operation has invalid weights", __func__);
410 }
411
412 if (!biasPin.IsValid())
413 {
414 return Fail("%s: Operation has invalid biases", __func__);
415 }
416
417 armnn::ConstTensor weights = weightsPin.GetConstTensor();
418 armnn::ConstTensor bias = biasPin.GetConstTensor();
419 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
420
421 ActivationFn activation;
422
423 if (implicitPadding)
424 {
425 android::nn::PaddingScheme paddingScheme;
426 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
427 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
428 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
429 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
430 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
431 {
432 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
433 }
434
435 const uint32_t kernelX = weights.GetShape()[3];
436 const uint32_t kernelY = weights.GetShape()[2];
437 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
438 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
439
Mike Kelly86b36d42019-07-12 16:39:33 +0100440 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
441 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100442 }
443 else if (operation.inputs.size() >= 11)
444 {
445 // explicit padding
446 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
447 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
448 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
449 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
450 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
451 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
452 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
453 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
454 {
455 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
456 }
457 }
458 else
459 {
460 return Fail("%s: Unsupported number of operation inputs", __func__);
461 }
462
463 desc.m_BiasEnabled = true;
464 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
465
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100466 bool isSupported = false;
467 FORWARD_LAYER_SUPPORT_FUNC(__func__,
468 IsDepthwiseConvolutionSupported,
469 data.m_Backends,
470 isSupported,
471 inputInfo,
472 outputInfo,
473 desc,
474 weights.GetInfo(),
475 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100476
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100477 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100478 {
479 return false;
480 }
481
482 armnn::IConnectableLayer* startLayer =
483 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100484
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100485 if (!startLayer)
486 {
487 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
488 }
489
490 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
491 if (!endLayer)
492 {
493 return Fail("%s: ProcessActivation failed", __func__);
494 }
495
496 input.Connect(startLayer->GetInputSlot(0));
497
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100498 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100499}
500
Sadik Armagan15d63e22019-07-26 16:59:35 +0100501bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
502{
503 ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
504 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
505}
506
507bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
508{
509 ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
510 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
511}
512
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100513bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
514{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100515 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
516
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100517 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
518 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
519
520 if (!input0.IsValid() || !input1.IsValid())
521 {
522 return Fail("%s: Operation has invalid inputs", __func__);
523 }
524
525 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
526 if (!outputOperand)
527 {
528 return Fail("%s: Could not read output", __func__);
529 }
530
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100531 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100532 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100533 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100534 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100535 }
536
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100537 bool isSupported = false;
538 FORWARD_LAYER_SUPPORT_FUNC(__func__,
539 IsMaximumSupported,
540 data.m_Backends,
541 isSupported,
542 input0.GetTensorInfo(),
543 input1.GetTensorInfo(),
544 outInfo);
545
546 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100547 {
548 return false;
549 }
550
551 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
552 assert(layer != nullptr);
553 BroadcastTensor(input0, input1, layer, *data.m_Network);
554
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100555 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100556}
557
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100558bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
559{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100560 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
561
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100562 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
563 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
564
565 if (!input0.IsValid() || !input1.IsValid())
566 {
567 return Fail("%s: Operation has invalid inputs", __func__);
568 }
569
570 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
571 if (!output)
572 {
573 return Fail("%s: Could not read output 0", __func__);
574 }
575
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100576 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100577 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100578 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100579 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100580 }
581
582 bool isSupported = false;
583 FORWARD_LAYER_SUPPORT_FUNC(__func__,
584 IsMinimumSupported,
585 data.m_Backends,
586 isSupported,
587 input0.GetTensorInfo(),
588 input1.GetTensorInfo(),
589 outputInfo);
590
591 if (!isSupported)
592 {
593 return false;
594 }
595
596 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
597 assert(layer != nullptr);
598 BroadcastTensor(input0, input1, layer, *data.m_Network);
599
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100600 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100601}
602
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100603bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
604{
605 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
606 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
607}
608
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100609bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
610{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100611 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
612
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100613 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
614 if (!input.IsValid())
615 {
616 return Fail("%s: Could not read input 0", __func__);
617 }
618
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100619 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
620 if (!output)
621 {
622 return Fail("%s: Could not read output", __func__);
623 }
624
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100625 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
626 unsigned int rank = inputInfo.GetNumDimensions();
627
628 armnn::PadDescriptor descriptor;
629 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
630 {
631 return Fail("%s: Could not convert paddings", __func__);
632 }
633
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100634 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100635 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100636 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100637 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100638 }
639
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100640 // Determine type of padding value
641 OperandType operandType0;
642 OperandType operandType2;
643
644 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
645 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
646 {
647 return Fail("%s: Operation has invalid inputs", __func__);
648 }
649
650 // Read value to use for padding
651 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
652 {
653 armnn::Half f16PadValue;
654 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
655 {
656 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
657 }
658
659 descriptor.m_PadValue = f16PadValue;
660 }
661 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
662 {
663 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
664 {
665 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
666 }
667 }
668 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
669 {
Mike Kelly3c673942019-07-25 09:26:06 +0100670 int32_t intPadValue = 0;
671 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100672 {
673 return Fail("%s: Could not read input 2 (INT32)", __func__);
674 }
Mike Kelly3c673942019-07-25 09:26:06 +0100675 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100676 }
677 else
678 {
679 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
680 }
681
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100682 bool isSupported = false;
683 FORWARD_LAYER_SUPPORT_FUNC(__func__,
684 IsPadSupported,
685 data.m_Backends,
686 isSupported,
687 inputInfo,
688 outputInfo,
689 descriptor);
690 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100691 {
692 return false;
693 }
694
695 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
696 assert(layer != nullptr);
697 input.Connect(layer->GetInputSlot(0));
698 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
699
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100700 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100701}
702
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100703bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
704{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100705 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
706
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100707 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
708 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
709
710 if (!input.IsValid() || !alpha.IsValid())
711 {
712 return Fail("%s: Operation has invalid inputs", __func__);
713 }
714
715 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
716
717 if (!output)
718 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100719 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100720 }
721
722 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
723 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100724 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100725
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100726 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100727 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100728 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100729 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100730
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100731 bool isSupported = false;
732 FORWARD_LAYER_SUPPORT_FUNC(__func__,
733 IsPreluSupported,
734 data.m_Backends,
735 isSupported,
736 inputInfo,
737 alphaInfo,
738 outputInfo);
739 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100740 {
741 return false;
742 }
743
744 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
745
746 if (!layer)
747 {
748 return Fail("%s: AddPreluLayer failed", __func__);
749 }
750
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100751 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100752
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100753 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100754}
755
Sadik Armagan5a476a82019-07-30 09:43:18 +0100756bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
757{
758 ALOGV("hal_1_2::HalPolicy::ConvertQuantize()");
759
760 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
761 if (!input.IsValid())
762 {
763 return Fail("%s: Operation has invalid input", __func__);
764 }
765
766 const Operand* const outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
767 if (!outputOperand)
768 {
769 return Fail("%s: Operation has invalid outputs", __func__);
770 }
771
772 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
773 if (IsDynamicTensor(outputInfo))
774 {
775 return Fail("%s: Dynamic output tensors are not supported", __func__);
776 }
777
778 bool isSupported = false;
779 FORWARD_LAYER_SUPPORT_FUNC(__func__,
780 IsQuantizeSupported,
781 data.m_Backends,
782 isSupported,
783 input.GetTensorInfo(),
784 outputInfo);
785 if (!isSupported)
786 {
787 return false;
788 }
789
790 armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
791 assert(layer != nullptr);
792 input.Connect(layer->GetInputSlot(0));
793
794 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
795}
796
Sadik Armagan61113162019-07-25 09:09:40 +0100797bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
798{
799 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
800 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
801}
802
803bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
804{
805 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
806 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
807}
808
809bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
810{
811 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
812 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
813}
814
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100815bool HalPolicy::ConvertResize(const Operation& operation,
816 const Model& model,
817 ConversionData& data,
818 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100819{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100820 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
821
822 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100823 if (!input.IsValid())
824 {
825 return Fail("%s: Could not read input 0", __func__);
826 }
827
828 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
829 if (!output)
830 {
831 return Fail("%s: Could not read output 0", __func__);
832 }
833
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100834 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
835 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
836
837 if (IsDynamicTensor(outputInfo))
838 {
839 return Fail("%s: Dynamic output tensors are not supported", __func__);
840 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100841
842 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100843 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100844 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
845
846 OperandType operandType1;
847 OperandType operandType2;
848
849 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
850 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
851 {
852 return Fail("%s: Operation has invalid inputs", __func__);
853 }
854
855 if (operandType1 != operandType2)
856 {
857 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
858 }
859
860 if (operandType1 == OperandType::INT32)
861 {
862 // Case 1: resizing by shape
863 int32_t targetWidth = 0;
864 int32_t targetHeight = 0;
865
866 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
867 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
868 {
869 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
870 }
871
872 if (targetWidth < 0 || targetHeight < 0)
873 {
874 return Fail("%s: Operation has invalid inputs for resizing by shape. "
875 "Target width/height cannot be < 0", __func__);
876 }
877
878 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100879 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100880 }
881 else if (operandType1 == OperandType::FLOAT32)
882 {
883 // Case 2: resizing by scale
884 float widthScale = 1.0f;
885 float heightScale = 1.0f;
886
887 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
888 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
889 {
890 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
891 }
892
893 const armnn::TensorShape& inputShape = inputInfo.GetShape();
894 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
895
896 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
897 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
898
899 descriptor.m_TargetWidth = std::floor(width * widthScale);
900 descriptor.m_TargetHeight = std::floor(height * heightScale);
901 }
902 else
903 {
904 // NOTE: FLOAT16 scales are not supported
905 return false;
906 }
907
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100908 bool isSupported = false;
909 FORWARD_LAYER_SUPPORT_FUNC(__func__,
910 IsResizeSupported,
911 data.m_Backends,
912 isSupported,
913 inputInfo,
914 outputInfo,
915 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100916
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100917 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100918 {
919 return false;
920 }
921
922 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
923
924 assert(layer != nullptr);
925
926 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
927 input.Connect(layer->GetInputSlot(0));
928
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100929 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100930}
931
Keith Davisa6bc52f2019-06-26 09:39:49 +0100932bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
933{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100934 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100935
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100936 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100937 if (!input.IsValid() )
938 {
939 return Fail("%s: Operation has invalid inputs", __func__);
940 }
941
942 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
943 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +0100944 if (rank != 4)
945 {
946 return Fail("%s: Only inputs with rank 4 are supported", __func__);
947 }
948
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100949 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
950 if (!output)
951 {
952 return Fail("%s: Could not read output 0", __func__);
953 }
954
955 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
956 if (IsDynamicTensor(outputInfo))
957 {
958 return Fail("%s: Dynamic output tensors are not supported", __func__);
959 }
960
Keith Davisa6bc52f2019-06-26 09:39:49 +0100961 armnn::SpaceToDepthDescriptor desc;
962
963 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
964
965 if (desc.m_BlockSize <= 1)
966 {
967 return Fail("%s: Block size must be at least 1 in all dimensions");
968 }
969
970 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
971
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100972 bool isSupported = false;
973 FORWARD_LAYER_SUPPORT_FUNC(__func__,
974 IsSpaceToDepthSupported,
975 data.m_Backends,
976 isSupported,
977 inputInfo,
978 outputInfo,
979 desc);
980 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100981 {
982 return false;
983 }
984
985 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
986 assert(layer != nullptr);
987 input.Connect(layer->GetInputSlot(0));
988
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100989 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100990}
991
Francis Murtagh074c25a2019-07-22 16:40:57 +0100992bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
993{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100994 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
995
Francis Murtagh074c25a2019-07-22 16:40:57 +0100996 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
997 if (!input.IsValid())
998 {
999 return Fail("%s: Operation has invalid inputs", __func__);
1000 }
1001
1002 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1003 if (!outputOperand)
1004 {
1005 return Fail("%s: Operation has no outputs", __func__);
1006 }
1007
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001008 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001009 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +01001010 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001011 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001012 }
1013
1014 armnn::SoftmaxDescriptor desc;
1015 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
1016 {
1017 return Fail("%s: Operation has invalid inputs", __func__);
1018 }
1019
1020 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
1021 2,
1022 HalPolicy::OperandType::INT32,
1023 desc.m_Axis,
1024 model,
1025 data))
1026 {
1027 return Fail("%s: Operation has invalid inputs", __func__);
1028 }
1029
1030 bool isSupported = false;
1031 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1032 IsSoftmaxSupported,
1033 data.m_Backends,
1034 isSupported,
1035 input.GetTensorInfo(),
1036 outputInfo,
1037 desc);
1038 if (!isSupported)
1039 {
1040 return false;
1041 }
1042
1043 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1044 assert(layer != nullptr);
1045 input.Connect(layer->GetInputSlot(0));
1046
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001047 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001048}
1049
Mike Kelly0a879362019-07-29 16:56:31 +01001050bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
1051{
1052 ALOGV("hal_1_2::HalPolicy::ConvertSub()");
1053 return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
1054}
1055
Sadik Armagan61113162019-07-25 09:09:40 +01001056bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1057{
1058 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1059 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1060}
1061
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001062bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
1063{
1064 // Inputs:
1065 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1066 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1067 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1068 if (!input.IsValid())
1069 {
1070 return Fail("%s: Could not read input 0: input", __func__);
1071 }
1072 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1073 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 18, model, data);
1074 if (!outputStateIn.IsValid())
1075 {
1076 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1077 }
1078 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1079 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 19, model, data);
1080 if (!cellStateIn.IsValid())
1081 {
1082 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1083 }
1084
1085 // Get the mandatory input tensors:
1086 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1087 // [num_units, input_size].
1088 const ConstTensorPin inputToForgetWeightsPin =
1089 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1090 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1091 // [num_units, input_size].
1092 const ConstTensorPin inputToCellWeightsPin =
1093 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
1094 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1095 // [num_units, input_size].
1096 const ConstTensorPin inputToOutputWeightsPin =
1097 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
1098 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1099 // [num_units, output_size].
1100 const ConstTensorPin recurrentToForgetWeightsPin =
1101 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
1102 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1103 // [num_units, output_size].
1104 const ConstTensorPin recurrentToCellWeightsPin =
1105 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
1106 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1107 // [num_units, output_size].
1108 const ConstTensorPin recurrentToOutputWeightsPin =
1109 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
1110 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1111 const ConstTensorPin forgetGateBiasPin =
1112 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 13, model, data);
1113 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1114 const ConstTensorPin cellBiasPin =
1115 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 14, model, data);
1116 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1117 const ConstTensorPin outputGateBiasPin =
1118 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 15, model, data);
1119
1120 if (!inputToForgetWeightsPin.IsValid() ||
1121 !inputToCellWeightsPin.IsValid() ||
1122 !inputToOutputWeightsPin.IsValid() ||
1123 !recurrentToForgetWeightsPin.IsValid() ||
1124 !recurrentToCellWeightsPin.IsValid() ||
1125 !recurrentToOutputWeightsPin.IsValid() ||
1126 !forgetGateBiasPin.IsValid() ||
1127 !cellBiasPin.IsValid() ||
1128 !outputGateBiasPin.IsValid())
1129 {
1130 return Fail("%s: Operation has invalid tensor inputs", __func__);
1131 }
1132
1133 // Get the optional input tensors:
1134 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1135 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1136 const ConstTensorPin inputToInputWeightsPin =
1137 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1138 1,
1139 model,
1140 data,
1141 g_DontPermute,
1142 nullptr,
1143 true);
1144
1145 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1146 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1147 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1148 const ConstTensorPin recurrentToInputWeightsPin =
1149 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1150 5,
1151 model,
1152 data,
1153 g_DontPermute,
1154 nullptr,
1155 true);
1156
1157 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1158 const ConstTensorPin cellToInputWeightsPin =
1159 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1160 9,
1161 model,
1162 data,
1163 g_DontPermute,
1164 nullptr,
1165 true);
1166
1167 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1168 const ConstTensorPin cellToForgetWeightsPin =
1169 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1170 10,
1171 model,
1172 data,
1173 g_DontPermute,
1174 nullptr,
1175 true);
1176
1177 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1178 const ConstTensorPin cellToOutputWeightsPin =
1179 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1180 11,
1181 model,
1182 data,
1183 g_DontPermute,
1184 nullptr,
1185 true);
1186
1187 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1188 const ConstTensorPin inputGateBiasPin =
1189 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1190 12,
1191 model,
1192 data,
1193 g_DontPermute,
1194 nullptr,
1195 true);
1196
1197 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1198 // [output_size, num_units].
1199 const ConstTensorPin projectionWeightsPin =
1200 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1201 16,
1202 model,
1203 data,
1204 g_DontPermute,
1205 nullptr,
1206 true);
1207
1208 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1209 const ConstTensorPin projectionBiasPin =
1210 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1211 17,
1212 model,
1213 data,
1214 g_DontPermute,
1215 nullptr,
1216 true);
1217
1218 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
1219 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
1220 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
1221 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
1222 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
1223 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
1224 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
1225 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
1226 {
1227 return Fail("%s: Operation has invalid tensor inputs", __func__);
1228 }
1229
1230 // Get the mandatory input scalars (actually 1-D tensors of size 1):
1231 // 20: The activation function: A value indicating the activation function:
1232 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1233 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1234 // If set to 0.0 then clipping is disabled.
1235 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1236 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1237 ActivationFn activation;
1238 float cellClip;
1239 float projClip;
1240 if (!GetInputActivationFunctionFromTensor<hal_1_2::HalPolicy>(operation, 20, activation, model, data) ||
1241 !GetInputScalar<hal_1_2::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
1242 !GetInputScalar<hal_1_2::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
1243 {
1244 return Fail("%s: Operation has invalid scalar inputs", __func__);
1245 }
1246
1247 // Get the normalization tensors
1248 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
1249 // Used to rescale normalized inputs to activation at input gate.
1250 const ConstTensorPin inputLayerNormWeightsPin =
1251 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1252 23,
1253 model,
1254 data,
1255 g_DontPermute,
1256 nullptr,
1257 true);
1258
1259 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
1260 // Used to rescale normalized inputs to activation at forget gate.
1261 const ConstTensorPin forgetLayerNormWeightsPin =
1262 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1263 24,
1264 model,
1265 data,
1266 g_DontPermute,
1267 nullptr,
1268 true);
1269
1270 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
1271 // Used to rescale normalized inputs to activation at cell gate.
1272 const ConstTensorPin cellLayerNormWeightsPin =
1273 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1274 25,
1275 model,
1276 data,
1277 g_DontPermute,
1278 nullptr,
1279 true);
1280
1281 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
1282 // Used to rescale normalized inputs to activation at output gate.
1283 const ConstTensorPin outputLayerNormWeightsPin =
1284 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1285 26,
1286 model,
1287 data,
1288 g_DontPermute,
1289 nullptr,
1290 true);
1291
1292 // Outputs:
1293 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
1294 // with CIFG, or [batch_size, num_units * 3] without CIFG.
1295 const Operand* scratchBuffer = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1296 if (!scratchBuffer)
1297 {
1298 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
1299 }
1300 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1301 const Operand* outputStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1302 if (!outputStateOut)
1303 {
1304 return Fail("%s: Could not read output 1: outputStateOut", __func__);
1305 }
1306 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1307 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 2, model);
1308 if (!cellStateOut)
1309 {
1310 return Fail("%s: Could not read output 2: cellStateOut", __func__);
1311 }
1312 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1313 // effectively the same as the current “output state (out)” value.
1314 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 3, model);
1315 if (!output)
1316 {
1317 return Fail("%s: Could not read output 3: output", __func__);
1318 }
1319
1320 // set the params structure for the AddLstmLayer call
1321 armnn::LstmInputParams params;
1322 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1323 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1324 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1325 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1326 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1327 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1328 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1329 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1330 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
1331 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
1332 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
1333 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1334 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1335 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1336 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1337 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
1338 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
1339 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
1340 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
1341 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
1342 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
1343
1344 // set the layer descriptor
1345 armnn::LstmDescriptor desc;
1346 desc.m_ActivationFunc = activation;
1347 desc.m_ClippingThresCell = cellClip;
1348 desc.m_ClippingThresProj = projClip;
1349 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
1350 params.m_RecurrentToInputWeights == nullptr ||
1351 params.m_InputGateBias == nullptr);
1352 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
1353 params.m_CellToOutputWeights != nullptr);
1354 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
1355 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
1356 params.m_ForgetLayerNormWeights != nullptr ||
1357 params.m_CellLayerNormWeights != nullptr ||
1358 params.m_OutputLayerNormWeights != nullptr);
1359
1360 // validate the optional input groups
1361 if (desc.m_CifgEnabled &&
1362 (params.m_InputToInputWeights != nullptr ||
1363 params.m_RecurrentToInputWeights != nullptr ||
1364 params.m_InputGateBias != nullptr))
1365 {
1366 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
1367 " and input gate bias must be provided", __func__);
1368 }
1369
1370 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
1371 {
1372 return Fail("%s: projection bias should not be provided without projection weights", __func__);
1373 }
1374
1375 if (desc.m_PeepholeEnabled &&
1376 (params.m_CellToForgetWeights == nullptr ||
1377 params.m_CellToOutputWeights == nullptr ||
1378 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
1379 {
1380 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
1381 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
1382 }
1383
1384 if (desc.m_LayerNormEnabled &&
1385 (params.m_ForgetLayerNormWeights == nullptr ||
1386 params.m_CellLayerNormWeights == nullptr ||
1387 params.m_OutputLayerNormWeights == nullptr ||
1388 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
1389 {
1390 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
1391 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
1392 }
1393
1394 // Check if the layer is supported
1395 // Inputs
1396 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1397 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
1398 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
1399
1400 // Outputs
1401 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
1402 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
1403 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1404 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1405
1406 // Basic parameters
1407 armnn::LstmInputParamsInfo paramsInfo;
1408 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1409 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1410 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1411 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1412 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1413 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1414 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1415 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1416 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1417
1418 // Optional parameters
1419 if(!desc.m_CifgEnabled)
1420 {
1421 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1422 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1423 if (params.m_CellToInputWeights != nullptr)
1424 {
1425 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1426 }
1427 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1428 }
1429
1430 if(desc.m_ProjectionEnabled)
1431 {
1432 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
1433 if (params.m_ProjectionBias != nullptr)
1434 {
1435 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
1436 }
1437 }
1438
1439 if(desc.m_PeepholeEnabled)
1440 {
1441 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1442 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1443 }
1444
1445 if (desc.m_LayerNormEnabled)
1446 {
1447 if(!desc.m_CifgEnabled)
1448 {
1449 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
1450 }
1451 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
1452 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
1453 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
1454 }
1455
1456 bool isSupported = false;
1457 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1458 IsLstmSupported,
1459 data.m_Backends,
1460 isSupported,
1461 inputInfo,
1462 outputStateInInfo,
1463 cellStateInInfo,
1464 scratchBufferInfo,
1465 outputStateOutInfo,
1466 cellStateOutInfo,
1467 outputInfo,
1468 desc,
1469 paramsInfo);
1470 if (!isSupported)
1471 {
1472 return false;
1473 }
1474
1475 // Add the layer
1476 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1477
1478 input.Connect(layer->GetInputSlot(0));
1479 outputStateIn.Connect(layer->GetInputSlot(1));
1480 cellStateIn.Connect(layer->GetInputSlot(2));
1481
1482 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1483 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1484 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1485 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 3, *layer, 3, model, data));
1486}
1487
Mike Kellyb5fdf382019-06-11 16:35:25 +01001488} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001489} // namespace armnn_driver