blob: af310c931b71523d6c7bc15204f9142fe8baae41 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
Mike Kellyb5fdf382019-06-11 16:35:25 +010028 case V1_0::OperationType::CONCATENATION:
29 case V1_0::OperationType::DEPTH_TO_SPACE:
30 case V1_0::OperationType::DEQUANTIZE:
31 case V1_0::OperationType::EMBEDDING_LOOKUP:
32 case V1_0::OperationType::FLOOR:
33 case V1_0::OperationType::FULLY_CONNECTED:
34 case V1_0::OperationType::HASHTABLE_LOOKUP:
35 case V1_0::OperationType::L2_NORMALIZATION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010036 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
37 case V1_0::OperationType::LOGISTIC:
38 case V1_0::OperationType::LSH_PROJECTION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010039 case V1_0::OperationType::MUL:
Mike Kellyb5fdf382019-06-11 16:35:25 +010040 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010041 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010042 case V1_0::OperationType::SVDF:
Mike Kellyb5fdf382019-06-11 16:35:25 +010043 case V1_0::OperationType::OEM_OPERATION:
44 return true;
45 default:
46 return false;
47 }
48}
49
50bool HandledByV1_1(V1_2::OperationType operationType)
51{
52 if (HandledByV1_0(operationType))
53 {
54 return true;
55 }
56 switch (static_cast<V1_1::OperationType>(operationType))
57 {
Mike Kellyb5fdf382019-06-11 16:35:25 +010058 case V1_1::OperationType::DIV:
59 case V1_1::OperationType::MEAN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010060 case V1_1::OperationType::SQUEEZE:
61 case V1_1::OperationType::STRIDED_SLICE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010062 case V1_1::OperationType::TRANSPOSE:
63 return true;
64 default:
65 return false;
66 }
67}
68
69bool HandledByV1_0(const V1_2::Operation& operation)
70{
71 return HandledByV1_0(operation.type);
72}
73
74bool HandledByV1_1(const V1_2::Operation& operation)
75{
76 return HandledByV1_1(operation.type);
77}
78
79V1_0::OperationType CastToV1_0(V1_2::OperationType type)
80{
81 return static_cast<V1_0::OperationType>(type);
82}
83
84V1_1::OperationType CastToV1_1(V1_2::OperationType type)
85{
86 return static_cast<V1_1::OperationType>(type);
87}
88
89V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
90{
91 V1_0::Operation op;
92 op.type = CastToV1_0(operation.type);
93 op.inputs = operation.inputs;
94 op.outputs = operation.outputs;
95 return op;
96}
97
98V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
99{
100 V1_1::Operation op;
101 op.type = CastToV1_1(operation.type);
102 op.inputs = operation.inputs;
103 op.outputs = operation.outputs;
104 return op;
105}
106
107bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
108{
109 if (HandledByV1_0(operation) && compliantWithV1_0(model))
110 {
111 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
112 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
113
114 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
115 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100116
117 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100118 {
119 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
120 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
121
122 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
123 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100124
Mike Kellyb5fdf382019-06-11 16:35:25 +0100125 switch (operation.type)
126 {
Sadik Armagan15d63e22019-07-26 16:59:35 +0100127 case V1_2::OperationType::AVERAGE_POOL_2D:
128 return ConvertAveragePool2d(operation, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +0100129 case V1_2::OperationType::BATCH_TO_SPACE_ND:
130 return ConvertBatchToSpaceNd(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100131 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100132 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100133 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100134 return ConvertDepthwiseConv2d(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +0100135 case V1_2::OperationType::L2_POOL_2D:
136 return ConvertL2Pool2d(operation, model, data);
137 case V1_2::OperationType::MAX_POOL_2D:
138 return ConvertMaxPool2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100139 case V1_2::OperationType::MAXIMUM:
140 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100141 case V1_2::OperationType::MINIMUM:
142 return ConvertMinimum(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +0100143 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100144 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100145 case V1_2::OperationType::PAD_V2:
146 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100147 case V1_2::OperationType::PRELU:
148 return ConvertPrelu(operation, model, data);
Sadik Armagan5a476a82019-07-30 09:43:18 +0100149 case V1_2::OperationType::QUANTIZE:
150 return ConvertQuantize(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100151 case V1_2::OperationType::RELU:
152 return ConvertReLu(operation, model, data);
153 case V1_2::OperationType::RELU1:
154 return ConvertReLu1(operation, model, data);
155 case V1_2::OperationType::RELU6:
156 return ConvertReLu6(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100157 case V1_2::OperationType::RESIZE_BILINEAR:
158 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100159 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100160 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
David Monahan613b49c2019-06-27 11:37:47 +0100161 case V1_2::OperationType::TRANSPOSE_CONV_2D:
Aron Virginas-Tar8b991682019-07-31 12:54:59 +0100162 return ConvertTransposeConv2d(operation, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100163 case V1_2::OperationType::SOFTMAX:
164 return ConvertSoftmax(operation, model, data);
Finn Williamsd74c5052019-07-30 17:06:00 +0100165 case V1_2::OperationType::SPACE_TO_BATCH_ND :
166 return ConvertSpaceToBatchNd(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100167 case V1_2::OperationType::SPACE_TO_DEPTH:
168 return ConvertSpaceToDepth(operation, model, data);
Mike Kelly0a879362019-07-29 16:56:31 +0100169 case V1_2::OperationType::SUB:
170 return ConvertSub(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100171 case V1_2::OperationType::TANH:
172 return ConvertTanH(operation, model, data);
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100173 case V1_2::OperationType::LSTM:
174 return ConvertLstm(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100175 default:
176 return Fail("%s: Operation type %s not supported in ArmnnDriver",
177 __func__, toString(operation.type).c_str());
178 }
179}
180
Sadik Armagan15d63e22019-07-26 16:59:35 +0100181bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
182{
183 ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
184 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
185}
186
Finn Williams23b87b32019-07-30 11:44:05 +0100187bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
188{
189 ALOGV("hal_1_2::HalPolicy::ConvertBatchToSpaceNd()");
190 return ::ConvertBatchToSpaceNd<hal_1_2::HalPolicy>(operation, model, data);
191}
192
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100193bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
194{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100195 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
196
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100197 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
198 if (!input.IsValid())
199 {
200 return Fail("%s: Operation has invalid inputs", __func__);
201 }
202
203 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
204 if (!output)
205 {
206 return Fail("%s: Could not read output 0", __func__);
207 }
208
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100209 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
210 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
211
212 if (IsDynamicTensor(outputInfo))
213 {
214 return Fail("%s: Dynamic output tensors are not supported", __func__);
215 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100216
Mike Kellye1d60bb2019-07-11 11:44:52 +0100217 armnn::Convolution2dDescriptor desc;
218 desc.m_DataLayout = armnn::DataLayout::NHWC;
219
220 // Determine whether padding is implicit or explicit
221 bool implicitPadding = operation.inputs.size() == 7 ||
222 (operation.inputs.size() >= 8 &&
223 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
224
225 if (implicitPadding)
226 {
227 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
228 }
229 else if (operation.inputs.size() >= 10)
230 {
231 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
232 }
233
234 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
235
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100236 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100237 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
238 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
239 // the DataLayout is NCHW
240 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
241 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
242 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100243 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100244 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100245
246 if (!weightsPin.IsValid())
247 {
248 return Fail("%s: Operation has invalid weights", __func__);
249 }
250
251 if (!biasPin.IsValid())
252 {
253 return Fail("%s: Operation has invalid biases", __func__);
254 }
255
256 armnn::ConstTensor weights = weightsPin.GetConstTensor();
257 armnn::ConstTensor bias = biasPin.GetConstTensor();
258 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
259
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100260 ActivationFn activation;
261
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100262 if (implicitPadding)
263 {
264 android::nn::PaddingScheme paddingScheme;
265 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
266 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
267 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
268 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
269 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
270 {
271 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
272 }
273
Mike Kellye1d60bb2019-07-11 11:44:52 +0100274 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
275 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
276 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
277 const uint32_t kernelX = weights.GetShape()[widthIndex];
278 const uint32_t kernelY = weights.GetShape()[heightIndex];
279 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
280 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100281
Mike Kelly86b36d42019-07-12 16:39:33 +0100282 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
283 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100284
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100285 }
286 else if (operation.inputs.size() >= 10)
287 {
288 // explicit padding
289 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
290 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
291 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
292 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
293 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
294 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
295 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
296 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
297 {
298 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
299 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100300 }
301 else
302 {
303 return Fail("%s: Unsupported number of operation inputs", __func__);
304 }
305
306 desc.m_BiasEnabled = true;
307 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
308
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100309 bool isSupported = false;
310 FORWARD_LAYER_SUPPORT_FUNC(__func__,
311 IsConvolution2dSupported,
312 data.m_Backends,
313 isSupported,
314 inputInfo,
315 outputInfo,
316 desc,
317 weights.GetInfo(),
318 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100319
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100320 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100321 {
322 return false;
323 }
324
325 armnn::IConnectableLayer* startLayer =
326 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
327
328 if (!startLayer)
329 {
330 return Fail("%s: AddConvolution2dLayer failed", __func__);
331 }
332
333 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
334
335 if (!endLayer)
336 {
337 return Fail("%s: ProcessActivation failed", __func__);
338 }
339
340 input.Connect(startLayer->GetInputSlot(0));
341
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100342 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100343}
344
345bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
346{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100347 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
348
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100349 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
350
351 if (!input.IsValid())
352 {
353 return Fail("%s: Operation has invalid inputs", __func__);
354 }
355
356 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
357
358 if (!output)
359 {
360 return Fail("%s: Could not read output 0", __func__);
361 }
362
363 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100364 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
365
366 if (IsDynamicTensor(outputInfo))
367 {
368 return Fail("%s: Dynamic output tensors are not supported", __func__);
369 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100370
371 // ArmNN does not currently support non-fixed weights or bias
372 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
373 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
374
375 if (weightsOperand == nullptr)
376 {
377 return Fail("%s: Operand is invalid", __func__);
378 }
379 armnn::DepthwiseConvolution2dDescriptor desc;
380 desc.m_DataLayout = armnn::DataLayout::NHWC;
381
382 // Determine whether padding is implicit or explicit
383 bool implicitPadding = operation.inputs.size() == 8 ||
384 (operation.inputs.size() >= 9 &&
385 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
386
387 // Look ahead to find the optional DataLayout, if present
388 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
389 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
390
391 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
392 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
393 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
394 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
395
396 // Reinterpret weight data as [ H, W, I, M ]
397 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
398 weightsOperand->dimensions[2],
399 inputInfo.GetShape()[channelsIndex],
400 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
401
402 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
403 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
404
405 const ConstTensorPin weightsPin =
406 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
407 1,
408 model,
409 data,
410 HWIMToMIHW,
411 &weightsShape);
412
413 // Bias is a 1D tensor
414 const ConstTensorPin biasPin =
415 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
416
417 if (!weightsPin.IsValid())
418 {
419 return Fail("%s: Operation has invalid weights", __func__);
420 }
421
422 if (!biasPin.IsValid())
423 {
424 return Fail("%s: Operation has invalid biases", __func__);
425 }
426
427 armnn::ConstTensor weights = weightsPin.GetConstTensor();
428 armnn::ConstTensor bias = biasPin.GetConstTensor();
429 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
430
431 ActivationFn activation;
432
433 if (implicitPadding)
434 {
435 android::nn::PaddingScheme paddingScheme;
436 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
437 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
438 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
439 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
440 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
441 {
442 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
443 }
444
445 const uint32_t kernelX = weights.GetShape()[3];
446 const uint32_t kernelY = weights.GetShape()[2];
447 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
448 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
449
Mike Kelly86b36d42019-07-12 16:39:33 +0100450 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
451 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100452 }
453 else if (operation.inputs.size() >= 11)
454 {
455 // explicit padding
456 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
457 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
458 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
459 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
460 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
461 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
462 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
463 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
464 {
465 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
466 }
467 }
468 else
469 {
470 return Fail("%s: Unsupported number of operation inputs", __func__);
471 }
472
473 desc.m_BiasEnabled = true;
474 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
475
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100476 bool isSupported = false;
477 FORWARD_LAYER_SUPPORT_FUNC(__func__,
478 IsDepthwiseConvolutionSupported,
479 data.m_Backends,
480 isSupported,
481 inputInfo,
482 outputInfo,
483 desc,
484 weights.GetInfo(),
485 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100486
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100487 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100488 {
489 return false;
490 }
491
492 armnn::IConnectableLayer* startLayer =
493 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100494
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100495 if (!startLayer)
496 {
497 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
498 }
499
500 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
501 if (!endLayer)
502 {
503 return Fail("%s: ProcessActivation failed", __func__);
504 }
505
506 input.Connect(startLayer->GetInputSlot(0));
507
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100508 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100509}
510
Sadik Armagan15d63e22019-07-26 16:59:35 +0100511bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
512{
513 ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
514 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
515}
516
517bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
518{
519 ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
520 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
521}
522
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100523bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
524{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100525 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
526
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100527 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
528 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
529
530 if (!input0.IsValid() || !input1.IsValid())
531 {
532 return Fail("%s: Operation has invalid inputs", __func__);
533 }
534
535 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
536 if (!outputOperand)
537 {
538 return Fail("%s: Could not read output", __func__);
539 }
540
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100541 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100542 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100543 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100544 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100545 }
546
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100547 bool isSupported = false;
548 FORWARD_LAYER_SUPPORT_FUNC(__func__,
549 IsMaximumSupported,
550 data.m_Backends,
551 isSupported,
552 input0.GetTensorInfo(),
553 input1.GetTensorInfo(),
554 outInfo);
555
556 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100557 {
558 return false;
559 }
560
561 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
562 assert(layer != nullptr);
563 BroadcastTensor(input0, input1, layer, *data.m_Network);
564
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100565 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100566}
567
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100568bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
569{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100570 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
571
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100572 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
573 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
574
575 if (!input0.IsValid() || !input1.IsValid())
576 {
577 return Fail("%s: Operation has invalid inputs", __func__);
578 }
579
580 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
581 if (!output)
582 {
583 return Fail("%s: Could not read output 0", __func__);
584 }
585
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100586 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100587 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100588 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100589 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100590 }
591
592 bool isSupported = false;
593 FORWARD_LAYER_SUPPORT_FUNC(__func__,
594 IsMinimumSupported,
595 data.m_Backends,
596 isSupported,
597 input0.GetTensorInfo(),
598 input1.GetTensorInfo(),
599 outputInfo);
600
601 if (!isSupported)
602 {
603 return false;
604 }
605
606 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
607 assert(layer != nullptr);
608 BroadcastTensor(input0, input1, layer, *data.m_Network);
609
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100610 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100611}
612
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100613bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
614{
615 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
616 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
617}
618
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100619bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
620{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100621 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
622
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100623 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
624 if (!input.IsValid())
625 {
626 return Fail("%s: Could not read input 0", __func__);
627 }
628
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100629 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
630 if (!output)
631 {
632 return Fail("%s: Could not read output", __func__);
633 }
634
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100635 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
636 unsigned int rank = inputInfo.GetNumDimensions();
637
638 armnn::PadDescriptor descriptor;
639 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
640 {
641 return Fail("%s: Could not convert paddings", __func__);
642 }
643
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100644 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100645 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100646 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100647 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100648 }
649
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100650 // Determine type of padding value
651 OperandType operandType0;
652 OperandType operandType2;
653
654 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
655 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
656 {
657 return Fail("%s: Operation has invalid inputs", __func__);
658 }
659
660 // Read value to use for padding
661 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
662 {
663 armnn::Half f16PadValue;
664 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
665 {
666 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
667 }
668
669 descriptor.m_PadValue = f16PadValue;
670 }
671 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
672 {
673 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
674 {
675 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
676 }
677 }
678 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
679 {
Mike Kelly3c673942019-07-25 09:26:06 +0100680 int32_t intPadValue = 0;
681 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100682 {
683 return Fail("%s: Could not read input 2 (INT32)", __func__);
684 }
Mike Kelly3c673942019-07-25 09:26:06 +0100685 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100686 }
687 else
688 {
689 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
690 }
691
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100692 bool isSupported = false;
693 FORWARD_LAYER_SUPPORT_FUNC(__func__,
694 IsPadSupported,
695 data.m_Backends,
696 isSupported,
697 inputInfo,
698 outputInfo,
699 descriptor);
700 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100701 {
702 return false;
703 }
704
705 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
706 assert(layer != nullptr);
707 input.Connect(layer->GetInputSlot(0));
708 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
709
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100710 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100711}
712
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100713bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
714{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100715 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
716
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100717 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
718 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
719
720 if (!input.IsValid() || !alpha.IsValid())
721 {
722 return Fail("%s: Operation has invalid inputs", __func__);
723 }
724
725 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
726
727 if (!output)
728 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100729 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100730 }
731
732 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
733 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100734 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100735
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100736 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100737 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100738 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100739 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100740
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100741 bool isSupported = false;
742 FORWARD_LAYER_SUPPORT_FUNC(__func__,
743 IsPreluSupported,
744 data.m_Backends,
745 isSupported,
746 inputInfo,
747 alphaInfo,
748 outputInfo);
749 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100750 {
751 return false;
752 }
753
754 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
755
756 if (!layer)
757 {
758 return Fail("%s: AddPreluLayer failed", __func__);
759 }
760
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100761 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100762
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100763 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100764}
765
Sadik Armagan5a476a82019-07-30 09:43:18 +0100766bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
767{
768 ALOGV("hal_1_2::HalPolicy::ConvertQuantize()");
769
770 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
771 if (!input.IsValid())
772 {
773 return Fail("%s: Operation has invalid input", __func__);
774 }
775
776 const Operand* const outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
777 if (!outputOperand)
778 {
779 return Fail("%s: Operation has invalid outputs", __func__);
780 }
781
782 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
783 if (IsDynamicTensor(outputInfo))
784 {
785 return Fail("%s: Dynamic output tensors are not supported", __func__);
786 }
787
788 bool isSupported = false;
789 FORWARD_LAYER_SUPPORT_FUNC(__func__,
790 IsQuantizeSupported,
791 data.m_Backends,
792 isSupported,
793 input.GetTensorInfo(),
794 outputInfo);
795 if (!isSupported)
796 {
797 return false;
798 }
799
800 armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
801 assert(layer != nullptr);
802 input.Connect(layer->GetInputSlot(0));
803
804 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
805}
806
Sadik Armagan61113162019-07-25 09:09:40 +0100807bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
808{
809 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
810 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
811}
812
813bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
814{
815 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
816 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
817}
818
819bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
820{
821 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
822 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
823}
824
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100825bool HalPolicy::ConvertResize(const Operation& operation,
826 const Model& model,
827 ConversionData& data,
828 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100829{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100830 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
831
832 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100833 if (!input.IsValid())
834 {
835 return Fail("%s: Could not read input 0", __func__);
836 }
837
838 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
839 if (!output)
840 {
841 return Fail("%s: Could not read output 0", __func__);
842 }
843
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100844 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
845 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
846
847 if (IsDynamicTensor(outputInfo))
848 {
849 return Fail("%s: Dynamic output tensors are not supported", __func__);
850 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100851
852 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100853 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100854 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
855
856 OperandType operandType1;
857 OperandType operandType2;
858
859 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
860 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
861 {
862 return Fail("%s: Operation has invalid inputs", __func__);
863 }
864
865 if (operandType1 != operandType2)
866 {
867 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
868 }
869
870 if (operandType1 == OperandType::INT32)
871 {
872 // Case 1: resizing by shape
873 int32_t targetWidth = 0;
874 int32_t targetHeight = 0;
875
876 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
877 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
878 {
879 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
880 }
881
882 if (targetWidth < 0 || targetHeight < 0)
883 {
884 return Fail("%s: Operation has invalid inputs for resizing by shape. "
885 "Target width/height cannot be < 0", __func__);
886 }
887
888 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100889 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100890 }
891 else if (operandType1 == OperandType::FLOAT32)
892 {
893 // Case 2: resizing by scale
894 float widthScale = 1.0f;
895 float heightScale = 1.0f;
896
897 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
898 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
899 {
900 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
901 }
902
903 const armnn::TensorShape& inputShape = inputInfo.GetShape();
904 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
905
906 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
907 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
908
909 descriptor.m_TargetWidth = std::floor(width * widthScale);
910 descriptor.m_TargetHeight = std::floor(height * heightScale);
911 }
912 else
913 {
914 // NOTE: FLOAT16 scales are not supported
915 return false;
916 }
917
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100918 bool isSupported = false;
919 FORWARD_LAYER_SUPPORT_FUNC(__func__,
920 IsResizeSupported,
921 data.m_Backends,
922 isSupported,
923 inputInfo,
924 outputInfo,
925 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100926
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100927 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100928 {
929 return false;
930 }
931
932 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
933
934 assert(layer != nullptr);
935
936 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
937 input.Connect(layer->GetInputSlot(0));
938
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100939 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100940}
941
Finn Williamsd74c5052019-07-30 17:06:00 +0100942bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
943{
944 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToBatchNd()");
945 return ::ConvertSpaceToBatchNd<hal_1_2::HalPolicy>(operation, model, data);
946}
947
Keith Davisa6bc52f2019-06-26 09:39:49 +0100948bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
949{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100950 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100951
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100952 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100953 if (!input.IsValid() )
954 {
955 return Fail("%s: Operation has invalid inputs", __func__);
956 }
957
958 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
959 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +0100960 if (rank != 4)
961 {
962 return Fail("%s: Only inputs with rank 4 are supported", __func__);
963 }
964
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100965 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
966 if (!output)
967 {
968 return Fail("%s: Could not read output 0", __func__);
969 }
970
971 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
972 if (IsDynamicTensor(outputInfo))
973 {
974 return Fail("%s: Dynamic output tensors are not supported", __func__);
975 }
976
Keith Davisa6bc52f2019-06-26 09:39:49 +0100977 armnn::SpaceToDepthDescriptor desc;
978
979 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
980
981 if (desc.m_BlockSize <= 1)
982 {
983 return Fail("%s: Block size must be at least 1 in all dimensions");
984 }
985
986 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
987
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100988 bool isSupported = false;
989 FORWARD_LAYER_SUPPORT_FUNC(__func__,
990 IsSpaceToDepthSupported,
991 data.m_Backends,
992 isSupported,
993 inputInfo,
994 outputInfo,
995 desc);
996 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100997 {
998 return false;
999 }
1000
1001 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1002 assert(layer != nullptr);
1003 input.Connect(layer->GetInputSlot(0));
1004
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001005 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001006}
1007
Francis Murtagh074c25a2019-07-22 16:40:57 +01001008bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1009{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001010 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
1011
Francis Murtagh074c25a2019-07-22 16:40:57 +01001012 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1013 if (!input.IsValid())
1014 {
1015 return Fail("%s: Operation has invalid inputs", __func__);
1016 }
1017
1018 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1019 if (!outputOperand)
1020 {
1021 return Fail("%s: Operation has no outputs", __func__);
1022 }
1023
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001024 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001025 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +01001026 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001027 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001028 }
1029
1030 armnn::SoftmaxDescriptor desc;
1031 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
1032 {
1033 return Fail("%s: Operation has invalid inputs", __func__);
1034 }
1035
1036 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
1037 2,
1038 HalPolicy::OperandType::INT32,
1039 desc.m_Axis,
1040 model,
1041 data))
1042 {
1043 return Fail("%s: Operation has invalid inputs", __func__);
1044 }
1045
1046 bool isSupported = false;
1047 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1048 IsSoftmaxSupported,
1049 data.m_Backends,
1050 isSupported,
1051 input.GetTensorInfo(),
1052 outputInfo,
1053 desc);
1054 if (!isSupported)
1055 {
1056 return false;
1057 }
1058
1059 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1060 assert(layer != nullptr);
1061 input.Connect(layer->GetInputSlot(0));
1062
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001063 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001064}
1065
Mike Kelly0a879362019-07-29 16:56:31 +01001066bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
1067{
1068 ALOGV("hal_1_2::HalPolicy::ConvertSub()");
1069 return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
1070}
1071
Sadik Armagan61113162019-07-25 09:09:40 +01001072bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1073{
1074 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1075 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1076}
1077
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001078bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
1079{
1080 // Inputs:
1081 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1082 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1083 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1084 if (!input.IsValid())
1085 {
1086 return Fail("%s: Could not read input 0: input", __func__);
1087 }
1088 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1089 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 18, model, data);
1090 if (!outputStateIn.IsValid())
1091 {
1092 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1093 }
1094 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1095 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 19, model, data);
1096 if (!cellStateIn.IsValid())
1097 {
1098 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1099 }
1100
1101 // Get the mandatory input tensors:
1102 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1103 // [num_units, input_size].
1104 const ConstTensorPin inputToForgetWeightsPin =
1105 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1106 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1107 // [num_units, input_size].
1108 const ConstTensorPin inputToCellWeightsPin =
1109 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
1110 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1111 // [num_units, input_size].
1112 const ConstTensorPin inputToOutputWeightsPin =
1113 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
1114 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1115 // [num_units, output_size].
1116 const ConstTensorPin recurrentToForgetWeightsPin =
1117 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
1118 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1119 // [num_units, output_size].
1120 const ConstTensorPin recurrentToCellWeightsPin =
1121 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
1122 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1123 // [num_units, output_size].
1124 const ConstTensorPin recurrentToOutputWeightsPin =
1125 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
1126 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1127 const ConstTensorPin forgetGateBiasPin =
1128 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 13, model, data);
1129 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1130 const ConstTensorPin cellBiasPin =
1131 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 14, model, data);
1132 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1133 const ConstTensorPin outputGateBiasPin =
1134 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 15, model, data);
1135
1136 if (!inputToForgetWeightsPin.IsValid() ||
1137 !inputToCellWeightsPin.IsValid() ||
1138 !inputToOutputWeightsPin.IsValid() ||
1139 !recurrentToForgetWeightsPin.IsValid() ||
1140 !recurrentToCellWeightsPin.IsValid() ||
1141 !recurrentToOutputWeightsPin.IsValid() ||
1142 !forgetGateBiasPin.IsValid() ||
1143 !cellBiasPin.IsValid() ||
1144 !outputGateBiasPin.IsValid())
1145 {
1146 return Fail("%s: Operation has invalid tensor inputs", __func__);
1147 }
1148
1149 // Get the optional input tensors:
1150 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1151 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1152 const ConstTensorPin inputToInputWeightsPin =
1153 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1154 1,
1155 model,
1156 data,
1157 g_DontPermute,
1158 nullptr,
1159 true);
1160
1161 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1162 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1163 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1164 const ConstTensorPin recurrentToInputWeightsPin =
1165 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1166 5,
1167 model,
1168 data,
1169 g_DontPermute,
1170 nullptr,
1171 true);
1172
1173 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1174 const ConstTensorPin cellToInputWeightsPin =
1175 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1176 9,
1177 model,
1178 data,
1179 g_DontPermute,
1180 nullptr,
1181 true);
1182
1183 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1184 const ConstTensorPin cellToForgetWeightsPin =
1185 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1186 10,
1187 model,
1188 data,
1189 g_DontPermute,
1190 nullptr,
1191 true);
1192
1193 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1194 const ConstTensorPin cellToOutputWeightsPin =
1195 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1196 11,
1197 model,
1198 data,
1199 g_DontPermute,
1200 nullptr,
1201 true);
1202
1203 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1204 const ConstTensorPin inputGateBiasPin =
1205 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1206 12,
1207 model,
1208 data,
1209 g_DontPermute,
1210 nullptr,
1211 true);
1212
1213 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1214 // [output_size, num_units].
1215 const ConstTensorPin projectionWeightsPin =
1216 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1217 16,
1218 model,
1219 data,
1220 g_DontPermute,
1221 nullptr,
1222 true);
1223
1224 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1225 const ConstTensorPin projectionBiasPin =
1226 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1227 17,
1228 model,
1229 data,
1230 g_DontPermute,
1231 nullptr,
1232 true);
1233
1234 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
1235 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
1236 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
1237 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
1238 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
1239 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
1240 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
1241 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
1242 {
1243 return Fail("%s: Operation has invalid tensor inputs", __func__);
1244 }
1245
1246 // Get the mandatory input scalars (actually 1-D tensors of size 1):
1247 // 20: The activation function: A value indicating the activation function:
1248 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1249 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1250 // If set to 0.0 then clipping is disabled.
1251 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1252 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1253 ActivationFn activation;
1254 float cellClip;
1255 float projClip;
1256 if (!GetInputActivationFunctionFromTensor<hal_1_2::HalPolicy>(operation, 20, activation, model, data) ||
1257 !GetInputScalar<hal_1_2::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
1258 !GetInputScalar<hal_1_2::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
1259 {
1260 return Fail("%s: Operation has invalid scalar inputs", __func__);
1261 }
1262
1263 // Get the normalization tensors
1264 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
1265 // Used to rescale normalized inputs to activation at input gate.
1266 const ConstTensorPin inputLayerNormWeightsPin =
1267 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1268 23,
1269 model,
1270 data,
1271 g_DontPermute,
1272 nullptr,
1273 true);
1274
1275 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
1276 // Used to rescale normalized inputs to activation at forget gate.
1277 const ConstTensorPin forgetLayerNormWeightsPin =
1278 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1279 24,
1280 model,
1281 data,
1282 g_DontPermute,
1283 nullptr,
1284 true);
1285
1286 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
1287 // Used to rescale normalized inputs to activation at cell gate.
1288 const ConstTensorPin cellLayerNormWeightsPin =
1289 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1290 25,
1291 model,
1292 data,
1293 g_DontPermute,
1294 nullptr,
1295 true);
1296
1297 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
1298 // Used to rescale normalized inputs to activation at output gate.
1299 const ConstTensorPin outputLayerNormWeightsPin =
1300 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1301 26,
1302 model,
1303 data,
1304 g_DontPermute,
1305 nullptr,
1306 true);
1307
1308 // Outputs:
1309 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
1310 // with CIFG, or [batch_size, num_units * 3] without CIFG.
1311 const Operand* scratchBuffer = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1312 if (!scratchBuffer)
1313 {
1314 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
1315 }
1316 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1317 const Operand* outputStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1318 if (!outputStateOut)
1319 {
1320 return Fail("%s: Could not read output 1: outputStateOut", __func__);
1321 }
1322 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1323 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 2, model);
1324 if (!cellStateOut)
1325 {
1326 return Fail("%s: Could not read output 2: cellStateOut", __func__);
1327 }
1328 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1329 // effectively the same as the current “output state (out)” value.
1330 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 3, model);
1331 if (!output)
1332 {
1333 return Fail("%s: Could not read output 3: output", __func__);
1334 }
1335
1336 // set the params structure for the AddLstmLayer call
1337 armnn::LstmInputParams params;
1338 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1339 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1340 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1341 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1342 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1343 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1344 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1345 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1346 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
1347 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
1348 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
1349 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1350 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1351 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1352 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1353 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
1354 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
1355 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
1356 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
1357 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
1358 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
1359
1360 // set the layer descriptor
1361 armnn::LstmDescriptor desc;
1362 desc.m_ActivationFunc = activation;
1363 desc.m_ClippingThresCell = cellClip;
1364 desc.m_ClippingThresProj = projClip;
1365 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
1366 params.m_RecurrentToInputWeights == nullptr ||
1367 params.m_InputGateBias == nullptr);
1368 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
1369 params.m_CellToOutputWeights != nullptr);
1370 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
1371 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
1372 params.m_ForgetLayerNormWeights != nullptr ||
1373 params.m_CellLayerNormWeights != nullptr ||
1374 params.m_OutputLayerNormWeights != nullptr);
1375
1376 // validate the optional input groups
1377 if (desc.m_CifgEnabled &&
1378 (params.m_InputToInputWeights != nullptr ||
1379 params.m_RecurrentToInputWeights != nullptr ||
1380 params.m_InputGateBias != nullptr))
1381 {
1382 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
1383 " and input gate bias must be provided", __func__);
1384 }
1385
1386 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
1387 {
1388 return Fail("%s: projection bias should not be provided without projection weights", __func__);
1389 }
1390
1391 if (desc.m_PeepholeEnabled &&
1392 (params.m_CellToForgetWeights == nullptr ||
1393 params.m_CellToOutputWeights == nullptr ||
1394 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
1395 {
1396 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
1397 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
1398 }
1399
1400 if (desc.m_LayerNormEnabled &&
1401 (params.m_ForgetLayerNormWeights == nullptr ||
1402 params.m_CellLayerNormWeights == nullptr ||
1403 params.m_OutputLayerNormWeights == nullptr ||
1404 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
1405 {
1406 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
1407 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
1408 }
1409
1410 // Check if the layer is supported
1411 // Inputs
1412 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1413 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
1414 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
1415
1416 // Outputs
1417 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
1418 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
1419 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1420 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1421
Ferran Balaguera4a629a2019-07-30 10:16:13 +01001422 if (IsDynamicTensor(scratchBufferInfo) ||
1423 IsDynamicTensor(outputStateOutInfo) ||
1424 IsDynamicTensor(cellStateOutInfo) ||
1425 IsDynamicTensor(outputInfo))
1426 {
1427 return Fail("%s: Dynamic output tensors are not supported", __func__);
1428 }
1429
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001430 // Basic parameters
1431 armnn::LstmInputParamsInfo paramsInfo;
1432 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1433 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1434 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1435 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1436 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1437 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1438 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1439 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1440 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1441
1442 // Optional parameters
1443 if(!desc.m_CifgEnabled)
1444 {
1445 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1446 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1447 if (params.m_CellToInputWeights != nullptr)
1448 {
1449 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1450 }
1451 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1452 }
1453
1454 if(desc.m_ProjectionEnabled)
1455 {
1456 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
1457 if (params.m_ProjectionBias != nullptr)
1458 {
1459 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
1460 }
1461 }
1462
1463 if(desc.m_PeepholeEnabled)
1464 {
1465 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1466 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1467 }
1468
1469 if (desc.m_LayerNormEnabled)
1470 {
1471 if(!desc.m_CifgEnabled)
1472 {
1473 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
1474 }
1475 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
1476 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
1477 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
1478 }
1479
1480 bool isSupported = false;
1481 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1482 IsLstmSupported,
1483 data.m_Backends,
1484 isSupported,
1485 inputInfo,
1486 outputStateInInfo,
1487 cellStateInInfo,
1488 scratchBufferInfo,
1489 outputStateOutInfo,
1490 cellStateOutInfo,
1491 outputInfo,
1492 desc,
1493 paramsInfo);
1494 if (!isSupported)
1495 {
1496 return false;
1497 }
1498
1499 // Add the layer
1500 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1501
1502 input.Connect(layer->GetInputSlot(0));
1503 outputStateIn.Connect(layer->GetInputSlot(1));
1504 cellStateIn.Connect(layer->GetInputSlot(2));
1505
1506 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1507 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1508 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1509 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 3, *layer, 3, model, data));
1510}
1511
Aron Virginas-Tar8b991682019-07-31 12:54:59 +01001512bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
David Monahan613b49c2019-06-27 11:37:47 +01001513{
1514 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1515
1516 if (!input.IsValid())
1517 {
1518 return Fail("%s: Operation has invalid inputs", __func__);
1519 }
1520
1521 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1522
1523 if (!output)
1524 {
1525 return Fail("%s: Could not read output 0", __func__);
1526 }
1527
1528 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1529 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1530 if (IsDynamicTensor(outputInfo))
1531 {
1532 return Fail("%s: Dynamic output tensors are not supported", __func__);
1533 }
1534
1535 // ArmNN does not currently support non-fixed weights or bias
1536 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1537 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1538
1539 if (weightsOperand == nullptr)
1540 {
1541 return Fail("%s: Operand is invalid", __func__);
1542 }
1543 armnn::TransposeConvolution2dDescriptor desc;
1544 desc.m_DataLayout = armnn::DataLayout::NHWC;
1545
1546 // Determine whether padding is implicit or explicit
1547 bool implicitPadding = operation.inputs.size() == 9;
1548
1549 if (implicitPadding )
1550 {
1551 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 8, model, data);
1552 }
1553 else
1554 {
1555 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
1556 }
1557
1558 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1559 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1560 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1561
1562 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
1563
1564 // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
1565 // We have to permute it to OIHW if the data layout is NCHW.
1566 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
1567 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
1568 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
1569
1570 // Bias is a 1D tensor
1571 const ConstTensorPin biasPin =
1572 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1573
1574 if (!weightsPin.IsValid())
1575 {
1576 return Fail("%s: Operation has invalid weights", __func__);
1577 }
1578
1579 if (!biasPin.IsValid())
1580 {
1581 return Fail("%s: Operation has invalid biases", __func__);
1582 }
1583
1584 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1585 armnn::ConstTensor bias = biasPin.GetConstTensor();
1586 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1587
1588 ActivationFn activation;
1589
1590 if (implicitPadding)
1591 {
1592 android::nn::PaddingScheme paddingScheme;
1593 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 4, paddingScheme, model, data) ||
1594 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideX, model, data) ||
1595 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_StrideY, model, data) ||
1596 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data))
1597 {
1598 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1599 }
1600
1601 const uint32_t kernelX = weights.GetShape()[widthIndex];
1602 const uint32_t kernelY = weights.GetShape()[heightIndex];
1603 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1604 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1605
1606 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1607 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1608 }
1609 else if (operation.inputs.size() == 11)
1610 {
1611 // explicit padding
1612 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
1613 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
1614 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
1615 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
1616 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
1617 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
1618 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data))
1619 {
1620 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1621 }
1622 }
1623 else
1624 {
1625 return Fail("%s: Unsupported number of operation inputs", __func__);
1626 }
1627
1628 desc.m_BiasEnabled = true;
1629 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1630
1631 bool isSupported = false;
1632 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1633 IsTransposeConvolution2dSupported,
1634 data.m_Backends,
1635 isSupported,
1636 inputInfo,
1637 outputInfo,
1638 desc,
1639 weights.GetInfo(),
1640 biases);
1641 if (!isSupported)
1642 {
1643 return false;
1644 }
1645
1646 armnn::IConnectableLayer* startLayer =
1647 data.m_Network->AddTransposeConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1648 if (!startLayer)
1649 {
1650 return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
1651 }
1652
1653 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1654 if (!endLayer)
1655 {
1656 return Fail("%s: ProcessActivation failed", __func__);
1657 }
1658
1659 input.Connect(startLayer->GetInputSlot(0));
1660
1661 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
1662}
1663
Mike Kellyb5fdf382019-06-11 16:35:25 +01001664} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001665} // namespace armnn_driver