blob: d91cb3b26ce6a0532a9202e177b69c637200d545 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010010#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010011#include <Half.hpp>
Narumol Prangnawarat85f96542019-09-12 16:26:29 +010012#include <TensorUtils.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013
14#include <cmath>
15
Mike Kellyb5fdf382019-06-11 16:35:25 +010016namespace armnn_driver
17{
18namespace hal_1_2
19{
20
Mike Kellyb5fdf382019-06-11 16:35:25 +010021bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
22{
Mike Kellyb5fdf382019-06-11 16:35:25 +010023 switch (operation.type)
24 {
Kevin May407718f2019-09-09 14:46:41 +010025 case V1_2::OperationType::ABS:
26 return ConvertAbs(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010027 case V1_2::OperationType::ADD:
28 return ConvertAdd(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +010029 case V1_2::OperationType::AVERAGE_POOL_2D:
30 return ConvertAveragePool2d(operation, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +010031 case V1_2::OperationType::BATCH_TO_SPACE_ND:
32 return ConvertBatchToSpaceNd(operation, model, data);
Mike Kellyb8805202019-07-31 17:25:43 +010033 case V1_2::OperationType::CONCATENATION:
34 return ConvertConcatenation(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +010035 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +010036 return ConvertConv2d(operation, model, data);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +010037 case V1_2::OperationType::DEPTH_TO_SPACE:
38 return ConvertDepthToSpace(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +010039 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +010040 return ConvertDepthwiseConv2d(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010041 case V1_2::OperationType::DEQUANTIZE:
42 return ConvertDequantize(operation, model, data);
43 case V1_2::OperationType::DIV:
44 return ConvertDiv(operation, model, data);
Narumol Prangnawarat85f96542019-09-12 16:26:29 +010045 case V1_2::OperationType::EXPAND_DIMS:
46 return ConvertExpandDims(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010047 case V1_2::OperationType::FLOOR:
48 return ConvertFloor(operation, model, data);
49 case V1_2::OperationType::FULLY_CONNECTED:
50 return ConvertFullyConnected(operation, model, data);
51 case V1_2::OperationType::L2_NORMALIZATION:
52 return ConvertL2Normalization(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +010053 case V1_2::OperationType::L2_POOL_2D:
54 return ConvertL2Pool2d(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010055 case V1_2::OperationType::LOCAL_RESPONSE_NORMALIZATION:
56 return ConvertLocalResponseNormalization(operation, model, data);
57 case V1_2::OperationType::LOGISTIC:
58 return ConvertLogistic(operation, model, data);
59 case V1_2::OperationType::LSTM:
60 return ConvertLstm(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +010061 case V1_2::OperationType::MAX_POOL_2D:
62 return ConvertMaxPool2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +010063 case V1_2::OperationType::MAXIMUM:
64 return ConvertMaximum(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010065 case V1_2::OperationType::MEAN:
66 return ConvertMean(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +010067 case V1_2::OperationType::MINIMUM:
68 return ConvertMinimum(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010069 case V1_2::OperationType::MUL:
70 return ConvertMul(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +010071 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +010072 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010073 case V1_2::OperationType::PAD_V2:
74 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +010075 case V1_2::OperationType::PRELU:
76 return ConvertPrelu(operation, model, data);
Sadik Armagan5a476a82019-07-30 09:43:18 +010077 case V1_2::OperationType::QUANTIZE:
78 return ConvertQuantize(operation, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +010079 case V1_2::OperationType::QUANTIZED_16BIT_LSTM:
80 return ConvertQuantizedLstm(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +010081 case V1_2::OperationType::RELU:
82 return ConvertReLu(operation, model, data);
83 case V1_2::OperationType::RELU1:
84 return ConvertReLu1(operation, model, data);
85 case V1_2::OperationType::RELU6:
86 return ConvertReLu6(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010087 case V1_2::OperationType::RESHAPE:
88 return ConvertReshape(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +010089 case V1_2::OperationType::RESIZE_BILINEAR:
90 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010091 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +010092 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Aron Virginas-Tarfa6544e2019-09-10 14:42:22 +010093 case V1_2::OperationType::RSQRT:
94 return ConvertRsqrt(operation, model, data);
Sadik Armagan701d9a02019-09-04 15:16:18 +010095 case V1_2::OperationType::SQRT:
96 return ConvertSqrt(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010097 case V1_2::OperationType::SQUEEZE:
98 return ConvertSqueeze(operation, model, data);
99 case V1_2::OperationType::STRIDED_SLICE:
100 return ConvertStridedSlice(operation, model, data);
101 case V1_2::OperationType::TRANSPOSE:
102 return ConvertTranspose(operation, model, data);
David Monahan613b49c2019-06-27 11:37:47 +0100103 case V1_2::OperationType::TRANSPOSE_CONV_2D:
Aron Virginas-Tar8b991682019-07-31 12:54:59 +0100104 return ConvertTransposeConv2d(operation, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100105 case V1_2::OperationType::SOFTMAX:
106 return ConvertSoftmax(operation, model, data);
Finn Williamsd74c5052019-07-30 17:06:00 +0100107 case V1_2::OperationType::SPACE_TO_BATCH_ND :
108 return ConvertSpaceToBatchNd(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100109 case V1_2::OperationType::SPACE_TO_DEPTH:
110 return ConvertSpaceToDepth(operation, model, data);
Mike Kelly0a879362019-07-29 16:56:31 +0100111 case V1_2::OperationType::SUB:
112 return ConvertSub(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100113 case V1_2::OperationType::TANH:
114 return ConvertTanH(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100115 default:
116 return Fail("%s: Operation type %s not supported in ArmnnDriver",
117 __func__, toString(operation.type).c_str());
118 }
119}
120
Kevin May407718f2019-09-09 14:46:41 +0100121bool HalPolicy::ConvertAbs(const Operation& operation, const Model& model, ConversionData& data)
122{
123 ALOGV("hal_1_2::HalPolicy::ConvertAbs()");
124 return ::ConvertAbs<hal_1_2::HalPolicy>(operation, model, data);
125}
126
Mike Kelly46272802019-08-14 17:00:48 +0100127bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
128{
129 ALOGV("hal_1_2::HalPolicy::ConvertAdd()");
130 return ::ConvertAdd<hal_1_2::HalPolicy>(operation, model, data);
131}
132
Sadik Armagan15d63e22019-07-26 16:59:35 +0100133bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
134{
135 ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
136 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
137}
138
Finn Williams23b87b32019-07-30 11:44:05 +0100139bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
140{
141 ALOGV("hal_1_2::HalPolicy::ConvertBatchToSpaceNd()");
142 return ::ConvertBatchToSpaceNd<hal_1_2::HalPolicy>(operation, model, data);
143}
144
Mike Kellyb8805202019-07-31 17:25:43 +0100145bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
146{
147 ALOGV("hal_1_2::HalPolicy::ConvertConcatenation()");
148 return ::ConvertConcatenation<hal_1_2::HalPolicy>(operation, model, data);
149}
150
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100151bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
152{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100153 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
154
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100155 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
156 if (!input.IsValid())
157 {
158 return Fail("%s: Operation has invalid inputs", __func__);
159 }
160
161 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
162 if (!output)
163 {
164 return Fail("%s: Could not read output 0", __func__);
165 }
166
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100167 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
168 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
169
170 if (IsDynamicTensor(outputInfo))
171 {
172 return Fail("%s: Dynamic output tensors are not supported", __func__);
173 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100174
Mike Kellye1d60bb2019-07-11 11:44:52 +0100175 armnn::Convolution2dDescriptor desc;
176 desc.m_DataLayout = armnn::DataLayout::NHWC;
177
178 // Determine whether padding is implicit or explicit
179 bool implicitPadding = operation.inputs.size() == 7 ||
180 (operation.inputs.size() >= 8 &&
181 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
182
183 if (implicitPadding)
184 {
185 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
186 }
187 else if (operation.inputs.size() >= 10)
188 {
189 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
190 }
191
192 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
193
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100194 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100195 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
196 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
197 // the DataLayout is NCHW
198 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
199 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
200 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100201 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100202 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100203
204 if (!weightsPin.IsValid())
205 {
206 return Fail("%s: Operation has invalid weights", __func__);
207 }
208
209 if (!biasPin.IsValid())
210 {
211 return Fail("%s: Operation has invalid biases", __func__);
212 }
213
214 armnn::ConstTensor weights = weightsPin.GetConstTensor();
215 armnn::ConstTensor bias = biasPin.GetConstTensor();
216 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
217
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100218 ActivationFn activation;
219
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100220 if (implicitPadding)
221 {
222 android::nn::PaddingScheme paddingScheme;
223 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
224 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
225 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
226 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
227 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
228 {
229 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
230 }
231
Mike Kellye1d60bb2019-07-11 11:44:52 +0100232 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
233 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
234 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
235 const uint32_t kernelX = weights.GetShape()[widthIndex];
236 const uint32_t kernelY = weights.GetShape()[heightIndex];
237 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
238 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100239
Mike Kelly86b36d42019-07-12 16:39:33 +0100240 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
241 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100242
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100243 }
244 else if (operation.inputs.size() >= 10)
245 {
246 // explicit padding
247 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
248 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
249 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
250 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
251 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
252 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
253 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
254 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
255 {
256 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
257 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100258 }
259 else
260 {
261 return Fail("%s: Unsupported number of operation inputs", __func__);
262 }
263
264 desc.m_BiasEnabled = true;
265 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
266
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100267 bool isSupported = false;
268 FORWARD_LAYER_SUPPORT_FUNC(__func__,
269 IsConvolution2dSupported,
270 data.m_Backends,
271 isSupported,
272 inputInfo,
273 outputInfo,
274 desc,
275 weights.GetInfo(),
276 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100277
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100278 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100279 {
280 return false;
281 }
282
283 armnn::IConnectableLayer* startLayer =
284 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
285
286 if (!startLayer)
287 {
288 return Fail("%s: AddConvolution2dLayer failed", __func__);
289 }
290
291 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
292
293 if (!endLayer)
294 {
295 return Fail("%s: ProcessActivation failed", __func__);
296 }
297
298 input.Connect(startLayer->GetInputSlot(0));
299
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100300 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100301}
302
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +0100303bool HalPolicy::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
304{
305 ALOGV("hal_1_2::HalPolicy::ConvertDepthToSpace()");
306 return ::ConvertDepthToSpace<hal_1_2::HalPolicy>(operation, model, data);
307}
308
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100309bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
310{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100311 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
312
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100313 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
314
315 if (!input.IsValid())
316 {
317 return Fail("%s: Operation has invalid inputs", __func__);
318 }
319
320 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
321
322 if (!output)
323 {
324 return Fail("%s: Could not read output 0", __func__);
325 }
326
327 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100328 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
329
330 if (IsDynamicTensor(outputInfo))
331 {
332 return Fail("%s: Dynamic output tensors are not supported", __func__);
333 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100334
335 // ArmNN does not currently support non-fixed weights or bias
336 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
337 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
338
339 if (weightsOperand == nullptr)
340 {
341 return Fail("%s: Operand is invalid", __func__);
342 }
343 armnn::DepthwiseConvolution2dDescriptor desc;
344 desc.m_DataLayout = armnn::DataLayout::NHWC;
345
346 // Determine whether padding is implicit or explicit
347 bool implicitPadding = operation.inputs.size() == 8 ||
348 (operation.inputs.size() >= 9 &&
349 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
350
351 // Look ahead to find the optional DataLayout, if present
352 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
353 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
354
355 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
356 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
357 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
358 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
359
360 // Reinterpret weight data as [ H, W, I, M ]
361 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
362 weightsOperand->dimensions[2],
363 inputInfo.GetShape()[channelsIndex],
364 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
365
366 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
367 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
368
369 const ConstTensorPin weightsPin =
370 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
371 1,
372 model,
373 data,
374 HWIMToMIHW,
375 &weightsShape);
376
377 // Bias is a 1D tensor
378 const ConstTensorPin biasPin =
379 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
380
381 if (!weightsPin.IsValid())
382 {
383 return Fail("%s: Operation has invalid weights", __func__);
384 }
385
386 if (!biasPin.IsValid())
387 {
388 return Fail("%s: Operation has invalid biases", __func__);
389 }
390
391 armnn::ConstTensor weights = weightsPin.GetConstTensor();
392 armnn::ConstTensor bias = biasPin.GetConstTensor();
393 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
394
395 ActivationFn activation;
396
397 if (implicitPadding)
398 {
399 android::nn::PaddingScheme paddingScheme;
400 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
401 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
402 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
403 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
404 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
405 {
406 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
407 }
408
409 const uint32_t kernelX = weights.GetShape()[3];
410 const uint32_t kernelY = weights.GetShape()[2];
411 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
412 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
413
Mike Kelly86b36d42019-07-12 16:39:33 +0100414 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
415 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100416 }
417 else if (operation.inputs.size() >= 11)
418 {
419 // explicit padding
420 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
421 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
422 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
423 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
424 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
425 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
426 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
427 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
428 {
429 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
430 }
431 }
432 else
433 {
434 return Fail("%s: Unsupported number of operation inputs", __func__);
435 }
436
437 desc.m_BiasEnabled = true;
438 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
439
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100440 bool isSupported = false;
441 FORWARD_LAYER_SUPPORT_FUNC(__func__,
442 IsDepthwiseConvolutionSupported,
443 data.m_Backends,
444 isSupported,
445 inputInfo,
446 outputInfo,
447 desc,
448 weights.GetInfo(),
449 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100450
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100451 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100452 {
453 return false;
454 }
455
456 armnn::IConnectableLayer* startLayer =
457 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100458
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100459 if (!startLayer)
460 {
461 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
462 }
463
464 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
465 if (!endLayer)
466 {
467 return Fail("%s: ProcessActivation failed", __func__);
468 }
469
470 input.Connect(startLayer->GetInputSlot(0));
471
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100472 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100473}
474
Mike Kelly46272802019-08-14 17:00:48 +0100475bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
476{
477 ALOGV("hal_1_2::HalPolicy::ConvertDequantize()");
478 return ::ConvertDequantize<hal_1_2::HalPolicy>(operation, model, data);
479}
480
481bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
482{
483 ALOGV("hal_1_2::HalPolicy::ConvertDiv()");
484 return ::ConvertDiv<hal_1_2::HalPolicy>(operation, model, data);
485}
486
Narumol Prangnawarat85f96542019-09-12 16:26:29 +0100487bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
488{
489 ALOGV("hal_1_2::HalPolicy::ConvertExpandDims()");
490
491 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
492
493 if (!input.IsValid())
494 {
495 return Fail("%s: Operation has invalid input", __func__);
496 }
497
498 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
499 if (!output)
500 {
501 return Fail("%s: Operation has invalid output", __func__);
502 }
503
504 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
505 if (IsDynamicTensor(outputInfo))
506 {
507 return Fail("%s: Dynamic output tensors are not supported", __func__);
508 }
509
510 int32_t axis;
511 if (!GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, axis, model, data))
512 {
513 return Fail("%s: failed to get axis input value", __func__);
514 }
515
516 armnn::TensorShape targetShape;
517
518 try
519 {
520 targetShape = armnnUtils::ExpandDims(input.GetTensorInfo().GetShape(), axis);
521 }
522 catch (const std::exception &e)
523 {
524 return Fail("%s: %s", __func__, e.what());
525 }
526
527 if (targetShape != outputInfo.GetShape())
528 {
529 return Fail("%s: Shape of the output operand does not match the resolved expanded shape", __func__);
530 }
531
532 armnn::ReshapeDescriptor reshapeDescriptor;
533 reshapeDescriptor.m_TargetShape = targetShape;
534
535 bool isSupported = false;
536 FORWARD_LAYER_SUPPORT_FUNC(__func__,
537 IsReshapeSupported,
538 data.m_Backends,
539 isSupported,
540 input.GetTensorInfo(),
541 reshapeDescriptor);
542
543 if (!isSupported)
544 {
545 return false;
546 }
547
548 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
549 assert(layer != nullptr);
550 input.Connect(layer->GetInputSlot(0));
551
552 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
553}
554
Mike Kelly46272802019-08-14 17:00:48 +0100555bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
556{
557 ALOGV("hal_1_2::HalPolicy::ConvertFloor()");
558 return ::ConvertFloor<hal_1_2::HalPolicy>(operation, model, data);
559}
560
561bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
562{
563 ALOGV("hal_1_2::HalPolicy::ConvertFullyConnected()");
564 return ::ConvertFullyConnected<hal_1_2::HalPolicy>(operation, model, data);
565}
566
567bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
568{
569 ALOGV("hal_1_2::HalPolicy::ConvertL2Normalization()");
570 return ::ConvertL2Normalization<hal_1_2::HalPolicy>(operation, model, data);
571}
572
Sadik Armagan15d63e22019-07-26 16:59:35 +0100573bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
574{
575 ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
576 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
577}
578
Mike Kelly46272802019-08-14 17:00:48 +0100579bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
580 const Model& model,
581 ConversionData& data)
582{
583 ALOGV("hal_1_2::HalPolicy::ConvertLocalResponseNormalization()");
584 return ::ConvertLocalResponseNormalization<hal_1_2::HalPolicy>(operation, model, data);
585}
586
587bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
588{
589 ALOGV("hal_1_2::HalPolicy::ConvertLogistic()");
590 return ::ConvertLogistic<hal_1_2::HalPolicy>(operation, model, data);
591}
592
Sadik Armagan15d63e22019-07-26 16:59:35 +0100593bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
594{
595 ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
596 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
597}
598
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100599bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
600{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100601 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
602
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100603 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
604 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
605
606 if (!input0.IsValid() || !input1.IsValid())
607 {
608 return Fail("%s: Operation has invalid inputs", __func__);
609 }
610
611 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
612 if (!outputOperand)
613 {
614 return Fail("%s: Could not read output", __func__);
615 }
616
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100617 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100618 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100619 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100620 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100621 }
622
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100623 bool isSupported = false;
624 FORWARD_LAYER_SUPPORT_FUNC(__func__,
625 IsMaximumSupported,
626 data.m_Backends,
627 isSupported,
628 input0.GetTensorInfo(),
629 input1.GetTensorInfo(),
630 outInfo);
631
632 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100633 {
634 return false;
635 }
636
637 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
638 assert(layer != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100639 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
640 if (!isReshapeSupported)
641 {
642 return false;
643 }
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100644
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100645 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100646}
647
Mike Kelly46272802019-08-14 17:00:48 +0100648bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
649{
650 ALOGV("hal_1_2::HalPolicy::ConvertMean()");
651 return ::ConvertMean<hal_1_2::HalPolicy>(operation, model, data);
652}
653
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100654bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
655{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100656 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
657
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100658 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
659 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
660
661 if (!input0.IsValid() || !input1.IsValid())
662 {
663 return Fail("%s: Operation has invalid inputs", __func__);
664 }
665
666 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
667 if (!output)
668 {
669 return Fail("%s: Could not read output 0", __func__);
670 }
671
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100672 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100673 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100674 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100675 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100676 }
677
678 bool isSupported = false;
679 FORWARD_LAYER_SUPPORT_FUNC(__func__,
680 IsMinimumSupported,
681 data.m_Backends,
682 isSupported,
683 input0.GetTensorInfo(),
684 input1.GetTensorInfo(),
685 outputInfo);
686
687 if (!isSupported)
688 {
689 return false;
690 }
691
692 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
693 assert(layer != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100694 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
695 if (!isReshapeSupported)
696 {
697 return false;
698 }
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100699
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100700 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100701}
702
Mike Kelly46272802019-08-14 17:00:48 +0100703bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
704{
705 ALOGV("hal_1_2::HalPolicy::ConvertMul()");
706 return ::ConvertMul<hal_1_2::HalPolicy>(operation, model, data);
707}
708
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100709bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
710{
711 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
712 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
713}
714
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100715bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
716{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100717 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
718
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100719 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
720 if (!input.IsValid())
721 {
722 return Fail("%s: Could not read input 0", __func__);
723 }
724
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100725 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
726 if (!output)
727 {
728 return Fail("%s: Could not read output", __func__);
729 }
730
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100731 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
732 unsigned int rank = inputInfo.GetNumDimensions();
733
734 armnn::PadDescriptor descriptor;
735 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
736 {
737 return Fail("%s: Could not convert paddings", __func__);
738 }
739
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100740 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100741 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100742 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100743 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100744 }
745
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100746 // Determine type of padding value
747 OperandType operandType0;
748 OperandType operandType2;
749
750 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
751 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
752 {
753 return Fail("%s: Operation has invalid inputs", __func__);
754 }
755
756 // Read value to use for padding
757 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
758 {
759 armnn::Half f16PadValue;
760 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
761 {
762 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
763 }
764
765 descriptor.m_PadValue = f16PadValue;
766 }
767 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
768 {
769 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
770 {
771 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
772 }
773 }
774 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
775 {
Mike Kelly3c673942019-07-25 09:26:06 +0100776 int32_t intPadValue = 0;
777 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100778 {
779 return Fail("%s: Could not read input 2 (INT32)", __func__);
780 }
Mike Kelly3c673942019-07-25 09:26:06 +0100781 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100782 }
783 else
784 {
785 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
786 }
787
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100788 bool isSupported = false;
789 FORWARD_LAYER_SUPPORT_FUNC(__func__,
790 IsPadSupported,
791 data.m_Backends,
792 isSupported,
793 inputInfo,
794 outputInfo,
795 descriptor);
796 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100797 {
798 return false;
799 }
800
801 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
802 assert(layer != nullptr);
803 input.Connect(layer->GetInputSlot(0));
804 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
805
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100806 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100807}
808
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100809bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
810{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100811 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
812
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100813 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
814 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
815
816 if (!input.IsValid() || !alpha.IsValid())
817 {
818 return Fail("%s: Operation has invalid inputs", __func__);
819 }
820
821 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
822
823 if (!output)
824 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100825 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100826 }
827
828 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
829 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100830 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100831
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100832 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100833 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100834 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100835 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100836
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100837 bool isSupported = false;
838 FORWARD_LAYER_SUPPORT_FUNC(__func__,
839 IsPreluSupported,
840 data.m_Backends,
841 isSupported,
842 inputInfo,
843 alphaInfo,
844 outputInfo);
845 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100846 {
847 return false;
848 }
849
850 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
851
852 if (!layer)
853 {
854 return Fail("%s: AddPreluLayer failed", __func__);
855 }
856
Sadik Armagan64b19b52019-08-19 09:49:58 +0100857 bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
858 if (!isReshapeSupported)
859 {
860 return false;
861 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100862
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100863 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100864}
865
Sadik Armagan5a476a82019-07-30 09:43:18 +0100866bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
867{
868 ALOGV("hal_1_2::HalPolicy::ConvertQuantize()");
869
870 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
871 if (!input.IsValid())
872 {
873 return Fail("%s: Operation has invalid input", __func__);
874 }
875
876 const Operand* const outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
877 if (!outputOperand)
878 {
879 return Fail("%s: Operation has invalid outputs", __func__);
880 }
881
882 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
883 if (IsDynamicTensor(outputInfo))
884 {
885 return Fail("%s: Dynamic output tensors are not supported", __func__);
886 }
887
888 bool isSupported = false;
889 FORWARD_LAYER_SUPPORT_FUNC(__func__,
890 IsQuantizeSupported,
891 data.m_Backends,
892 isSupported,
893 input.GetTensorInfo(),
894 outputInfo);
895 if (!isSupported)
896 {
897 return false;
898 }
899
900 armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
901 assert(layer != nullptr);
902 input.Connect(layer->GetInputSlot(0));
903
904 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
905}
906
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100907bool HalPolicy::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
908{
909 ALOGV("hal_1_2::HalPolicy::ConvertQuantizedLstm()");
910
911 //Inputs:
912 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
913 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
914 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
915 if (!input.IsValid())
916 {
917 return Fail("%s: Could not read input 0: input", __func__);
918 }
919
920 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
921 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
922 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
923 LayerInputHandle previousCellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 13, model, data);
924 if (!previousCellStateIn.IsValid())
925 {
926 return Fail("%s: Could not read input 13: previousCellStateIn", __func__);
927 }
928
929 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
930 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
931 // is quantized with a fixed quantization range of -1, 127/128.
932 LayerInputHandle previousOutputIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 14, model, data);
933 if (!previousOutputIn.IsValid())
934 {
935 return Fail("%s: Could not read input 14: previousOutputIn", __func__);
936 }
937
938 // Get the input tensors:
939 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
940 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
941 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
942 const ConstTensorPin inputToInputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100943 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100944
945 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
946 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
947 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
948 const ConstTensorPin inputToForgetWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100949 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100950
951 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
952 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
953 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
954 const ConstTensorPin inputToCellWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100955 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100956
957 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
958 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
959 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
960 const ConstTensorPin inputToOutputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100961 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100962
963 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
964 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
965 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
966 const ConstTensorPin recurrentToInputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100967 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 5, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100968
969 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
970 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
971 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
972 const ConstTensorPin recurrentToForgetWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100973 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100974
975 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
976 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
977 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
978 const ConstTensorPin recurrentToCellWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100979 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100980
981 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
982 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
983 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
984 const ConstTensorPin recurrentToOutputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100985 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100986
987 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
988 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
989 // of input and weights scales and zeroPoint equal to 0.
990 const ConstTensorPin inputGateBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100991 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 9, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100992
993 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
994 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
995 // of input and weights scales and zeroPoint equal to 0.
996 const ConstTensorPin forgetGateBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100997 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 10, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100998
999 // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
1000 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
1001 // and weights scales and zeroPoint equal to 0.
1002 const ConstTensorPin cellBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001003 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 11, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001004
1005 // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
1006 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
1007 // of input and weights scales and zeroPoint equal to 0.
1008 const ConstTensorPin outputGateBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001009 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 12, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001010
1011 if (!inputToInputWeightsPin.IsValid() ||
1012 !inputToForgetWeightsPin.IsValid() ||
1013 !inputToCellWeightsPin.IsValid() ||
1014 !inputToOutputWeightsPin.IsValid() ||
1015 !recurrentToInputWeightsPin.IsValid() ||
1016 !recurrentToForgetWeightsPin.IsValid() ||
1017 !recurrentToCellWeightsPin.IsValid() ||
1018 !recurrentToOutputWeightsPin.IsValid() ||
1019 !inputGateBiasPin.IsValid() ||
1020 !forgetGateBiasPin.IsValid() ||
1021 !cellBiasPin.IsValid() ||
1022 !outputGateBiasPin.IsValid())
1023 {
1024 return Fail("%s: Operation has invalid tensor inputs", __func__);
1025 }
1026
1027 // Outputs:
1028 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
1029 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
1030 // of -2^4, 2^4 * 32767/32768.
1031 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1032 if (!cellStateOut)
1033 {
1034 return Fail("%s: Could not read output 0: cellStateOut", __func__);
1035 }
1036
1037 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
1038 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
1039 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1040 if (!output)
1041 {
1042 return Fail("%s: Could not read output 1: output", __func__);
1043 }
1044
1045 // Inputs
1046 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1047 const armnn::TensorInfo& previousCellStateInInfo = previousCellStateIn.GetTensorInfo();
1048 const armnn::TensorInfo& previousOutputInInfo = previousOutputIn.GetTensorInfo();
1049
1050 // Outputs
1051 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1052 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1053
1054 // Dynamic tensors currently not supported
1055 if (IsDynamicTensor(cellStateOutInfo) || IsDynamicTensor(outputInfo))
1056 {
1057 return Fail("%s: Dynamic output tensors are not supported", __func__);
1058 }
1059
1060 armnn::QuantizedLstmInputParams params;
1061
1062 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1063 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1064 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1065 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1066 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1067 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1068 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1069 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1070 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1071 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1072 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1073 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1074
1075 armnn::QuantizedLstmInputParamsInfo paramsInfo;
1076 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1077 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1078 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1079 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1080 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1081 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1082 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1083 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1084 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1085 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1086 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1087 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1088
1089 bool isSupported = false;
1090 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1091 IsQuantizedLstmSupported,
1092 data.m_Backends,
1093 isSupported,
1094 inputInfo,
1095 previousCellStateInInfo,
1096 previousOutputInInfo,
1097 cellStateOutInfo,
1098 outputInfo,
1099 paramsInfo);
1100
1101 if (!isSupported)
1102 {
1103 return false;
1104 }
1105
1106 armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
1107 input.Connect(layer->GetInputSlot(0));
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001108 previousCellStateIn.Connect(layer->GetInputSlot(1));
1109 previousOutputIn.Connect(layer->GetInputSlot(2));
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001110
1111 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1112 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data));
1113}
1114
Sadik Armagan61113162019-07-25 09:09:40 +01001115bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1116{
1117 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
1118 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
1119}
1120
1121bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1122{
1123 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
1124 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
1125}
1126
1127bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1128{
1129 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
1130 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
1131}
1132
Mike Kelly46272802019-08-14 17:00:48 +01001133bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1134{
1135 ALOGV("hal_1_2::HalPolicy::ConvertReshape()");
1136 return ::ConvertReshape<hal_1_2::HalPolicy>(operation, model, data);
1137}
1138
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +01001139bool HalPolicy::ConvertResize(const Operation& operation,
1140 const Model& model,
1141 ConversionData& data,
1142 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001143{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001144 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
1145
1146 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001147 if (!input.IsValid())
1148 {
1149 return Fail("%s: Could not read input 0", __func__);
1150 }
1151
1152 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1153 if (!output)
1154 {
1155 return Fail("%s: Could not read output 0", __func__);
1156 }
1157
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001158 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1159 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1160
1161 if (IsDynamicTensor(outputInfo))
1162 {
1163 return Fail("%s: Dynamic output tensors are not supported", __func__);
1164 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001165
1166 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +01001167 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001168 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
1169
1170 OperandType operandType1;
1171 OperandType operandType2;
1172
1173 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
1174 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
1175 {
1176 return Fail("%s: Operation has invalid inputs", __func__);
1177 }
1178
1179 if (operandType1 != operandType2)
1180 {
1181 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
1182 }
1183
1184 if (operandType1 == OperandType::INT32)
1185 {
1186 // Case 1: resizing by shape
1187 int32_t targetWidth = 0;
1188 int32_t targetHeight = 0;
1189
1190 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
1191 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
1192 {
1193 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
1194 }
1195
1196 if (targetWidth < 0 || targetHeight < 0)
1197 {
1198 return Fail("%s: Operation has invalid inputs for resizing by shape. "
1199 "Target width/height cannot be < 0", __func__);
1200 }
1201
1202 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +01001203 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001204 }
1205 else if (operandType1 == OperandType::FLOAT32)
1206 {
1207 // Case 2: resizing by scale
1208 float widthScale = 1.0f;
1209 float heightScale = 1.0f;
1210
1211 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
1212 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
1213 {
1214 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
1215 }
1216
1217 const armnn::TensorShape& inputShape = inputInfo.GetShape();
1218 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
1219
1220 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
1221 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
1222
1223 descriptor.m_TargetWidth = std::floor(width * widthScale);
1224 descriptor.m_TargetHeight = std::floor(height * heightScale);
1225 }
1226 else
1227 {
1228 // NOTE: FLOAT16 scales are not supported
1229 return false;
1230 }
1231
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001232 bool isSupported = false;
1233 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1234 IsResizeSupported,
1235 data.m_Backends,
1236 isSupported,
1237 inputInfo,
1238 outputInfo,
1239 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +01001240
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001241 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001242 {
1243 return false;
1244 }
1245
1246 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
1247
1248 assert(layer != nullptr);
1249
1250 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1251 input.Connect(layer->GetInputSlot(0));
1252
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001253 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001254}
1255
Aron Virginas-Tarfa6544e2019-09-10 14:42:22 +01001256bool HalPolicy::ConvertRsqrt(const Operation& operation, const Model& model, ConversionData& data)
1257{
1258 ALOGV("hal_1_2::HalPolicy::ConvertRsqrt()");
1259
1260 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1261 if (!input.IsValid())
1262 {
1263 return Fail("%s: Operation has invalid input", __func__);
1264 }
1265
1266 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1267 if (!output)
1268 {
1269 return Fail("%s: Could not read output 0", __func__);
1270 }
1271
1272 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1273 if (IsDynamicTensor(outputInfo))
1274 {
1275 return Fail("%s: Dynamic output tensors are not supported", __func__);
1276 }
1277
1278 bool isSupported = false;
1279 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1280 IsRsqrtSupported,
1281 data.m_Backends,
1282 isSupported,
1283 input.GetTensorInfo(),
1284 outputInfo);
1285
1286 if (!isSupported)
1287 {
1288 return false;
1289 }
1290
1291 armnn::IConnectableLayer* const layer = data.m_Network->AddRsqrtLayer();
1292 assert(layer != nullptr);
1293 input.Connect(layer->GetInputSlot(0));
1294
1295 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
1296}
1297
Finn Williamsd74c5052019-07-30 17:06:00 +01001298bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
1299{
1300 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToBatchNd()");
1301 return ::ConvertSpaceToBatchNd<hal_1_2::HalPolicy>(operation, model, data);
1302}
1303
Keith Davisa6bc52f2019-06-26 09:39:49 +01001304bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1305{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001306 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +01001307
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001308 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001309 if (!input.IsValid() )
1310 {
1311 return Fail("%s: Operation has invalid inputs", __func__);
1312 }
1313
1314 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1315 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +01001316 if (rank != 4)
1317 {
1318 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1319 }
1320
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001321 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1322 if (!output)
1323 {
1324 return Fail("%s: Could not read output 0", __func__);
1325 }
1326
1327 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1328 if (IsDynamicTensor(outputInfo))
1329 {
1330 return Fail("%s: Dynamic output tensors are not supported", __func__);
1331 }
1332
Keith Davisa6bc52f2019-06-26 09:39:49 +01001333 armnn::SpaceToDepthDescriptor desc;
1334
1335 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1336
1337 if (desc.m_BlockSize <= 1)
1338 {
1339 return Fail("%s: Block size must be at least 1 in all dimensions");
1340 }
1341
1342 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
1343
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001344 bool isSupported = false;
1345 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1346 IsSpaceToDepthSupported,
1347 data.m_Backends,
1348 isSupported,
1349 inputInfo,
1350 outputInfo,
1351 desc);
1352 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001353 {
1354 return false;
1355 }
1356
1357 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1358 assert(layer != nullptr);
1359 input.Connect(layer->GetInputSlot(0));
1360
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001361 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001362}
1363
Francis Murtagh074c25a2019-07-22 16:40:57 +01001364bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1365{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001366 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
1367
Francis Murtagh074c25a2019-07-22 16:40:57 +01001368 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1369 if (!input.IsValid())
1370 {
1371 return Fail("%s: Operation has invalid inputs", __func__);
1372 }
1373
1374 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1375 if (!outputOperand)
1376 {
1377 return Fail("%s: Operation has no outputs", __func__);
1378 }
1379
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001380 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001381 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +01001382 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001383 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001384 }
1385
1386 armnn::SoftmaxDescriptor desc;
1387 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
1388 {
1389 return Fail("%s: Operation has invalid inputs", __func__);
1390 }
1391
1392 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
1393 2,
1394 HalPolicy::OperandType::INT32,
1395 desc.m_Axis,
1396 model,
1397 data))
1398 {
1399 return Fail("%s: Operation has invalid inputs", __func__);
1400 }
1401
Narumol Prangnawarat52dc5272019-08-06 17:34:26 +01001402 if (input.GetTensorInfo().GetNumDimensions() > 2 ||
1403 !(desc.m_Axis == 1 ||
1404 (desc.m_Axis < 0 && static_cast<int>(input.GetTensorInfo().GetNumDimensions()) + desc.m_Axis == 1)))
1405 {
1406 return Fail("%s: Unsupported input greater than 2D or axis != 1", __func__);
1407 }
1408
Francis Murtagh074c25a2019-07-22 16:40:57 +01001409 bool isSupported = false;
1410 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1411 IsSoftmaxSupported,
1412 data.m_Backends,
1413 isSupported,
1414 input.GetTensorInfo(),
1415 outputInfo,
1416 desc);
1417 if (!isSupported)
1418 {
1419 return false;
1420 }
1421
1422 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1423 assert(layer != nullptr);
1424 input.Connect(layer->GetInputSlot(0));
1425
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001426 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001427}
1428
Mike Kelly0a879362019-07-29 16:56:31 +01001429bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
1430{
1431 ALOGV("hal_1_2::HalPolicy::ConvertSub()");
1432 return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
1433}
1434
Sadik Armagan61113162019-07-25 09:09:40 +01001435bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1436{
1437 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1438 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1439}
1440
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001441bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
1442{
1443 // Inputs:
1444 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1445 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1446 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1447 if (!input.IsValid())
1448 {
1449 return Fail("%s: Could not read input 0: input", __func__);
1450 }
1451 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1452 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 18, model, data);
1453 if (!outputStateIn.IsValid())
1454 {
1455 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1456 }
1457 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1458 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 19, model, data);
1459 if (!cellStateIn.IsValid())
1460 {
1461 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1462 }
1463
1464 // Get the mandatory input tensors:
1465 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1466 // [num_units, input_size].
1467 const ConstTensorPin inputToForgetWeightsPin =
1468 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1469 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1470 // [num_units, input_size].
1471 const ConstTensorPin inputToCellWeightsPin =
1472 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
1473 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1474 // [num_units, input_size].
1475 const ConstTensorPin inputToOutputWeightsPin =
1476 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
1477 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1478 // [num_units, output_size].
1479 const ConstTensorPin recurrentToForgetWeightsPin =
1480 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
1481 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1482 // [num_units, output_size].
1483 const ConstTensorPin recurrentToCellWeightsPin =
1484 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
1485 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1486 // [num_units, output_size].
1487 const ConstTensorPin recurrentToOutputWeightsPin =
1488 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
1489 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1490 const ConstTensorPin forgetGateBiasPin =
1491 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 13, model, data);
1492 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1493 const ConstTensorPin cellBiasPin =
1494 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 14, model, data);
1495 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1496 const ConstTensorPin outputGateBiasPin =
1497 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 15, model, data);
1498
1499 if (!inputToForgetWeightsPin.IsValid() ||
1500 !inputToCellWeightsPin.IsValid() ||
1501 !inputToOutputWeightsPin.IsValid() ||
1502 !recurrentToForgetWeightsPin.IsValid() ||
1503 !recurrentToCellWeightsPin.IsValid() ||
1504 !recurrentToOutputWeightsPin.IsValid() ||
1505 !forgetGateBiasPin.IsValid() ||
1506 !cellBiasPin.IsValid() ||
1507 !outputGateBiasPin.IsValid())
1508 {
1509 return Fail("%s: Operation has invalid tensor inputs", __func__);
1510 }
1511
1512 // Get the optional input tensors:
1513 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1514 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1515 const ConstTensorPin inputToInputWeightsPin =
1516 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1517 1,
1518 model,
1519 data,
1520 g_DontPermute,
1521 nullptr,
1522 true);
1523
1524 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1525 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1526 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1527 const ConstTensorPin recurrentToInputWeightsPin =
1528 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1529 5,
1530 model,
1531 data,
1532 g_DontPermute,
1533 nullptr,
1534 true);
1535
1536 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1537 const ConstTensorPin cellToInputWeightsPin =
1538 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1539 9,
1540 model,
1541 data,
1542 g_DontPermute,
1543 nullptr,
1544 true);
1545
1546 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1547 const ConstTensorPin cellToForgetWeightsPin =
1548 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1549 10,
1550 model,
1551 data,
1552 g_DontPermute,
1553 nullptr,
1554 true);
1555
1556 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1557 const ConstTensorPin cellToOutputWeightsPin =
1558 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1559 11,
1560 model,
1561 data,
1562 g_DontPermute,
1563 nullptr,
1564 true);
1565
1566 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1567 const ConstTensorPin inputGateBiasPin =
1568 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1569 12,
1570 model,
1571 data,
1572 g_DontPermute,
1573 nullptr,
1574 true);
1575
1576 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1577 // [output_size, num_units].
1578 const ConstTensorPin projectionWeightsPin =
1579 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1580 16,
1581 model,
1582 data,
1583 g_DontPermute,
1584 nullptr,
1585 true);
1586
1587 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1588 const ConstTensorPin projectionBiasPin =
1589 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1590 17,
1591 model,
1592 data,
1593 g_DontPermute,
1594 nullptr,
1595 true);
1596
1597 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
1598 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
1599 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
1600 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
1601 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
1602 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
1603 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
1604 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
1605 {
1606 return Fail("%s: Operation has invalid tensor inputs", __func__);
1607 }
1608
1609 // Get the mandatory input scalars (actually 1-D tensors of size 1):
1610 // 20: The activation function: A value indicating the activation function:
1611 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1612 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1613 // If set to 0.0 then clipping is disabled.
1614 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1615 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1616 ActivationFn activation;
1617 float cellClip;
1618 float projClip;
1619 if (!GetInputActivationFunctionFromTensor<hal_1_2::HalPolicy>(operation, 20, activation, model, data) ||
1620 !GetInputScalar<hal_1_2::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
1621 !GetInputScalar<hal_1_2::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
1622 {
1623 return Fail("%s: Operation has invalid scalar inputs", __func__);
1624 }
1625
1626 // Get the normalization tensors
1627 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
1628 // Used to rescale normalized inputs to activation at input gate.
1629 const ConstTensorPin inputLayerNormWeightsPin =
1630 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1631 23,
1632 model,
1633 data,
1634 g_DontPermute,
1635 nullptr,
1636 true);
1637
1638 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
1639 // Used to rescale normalized inputs to activation at forget gate.
1640 const ConstTensorPin forgetLayerNormWeightsPin =
1641 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1642 24,
1643 model,
1644 data,
1645 g_DontPermute,
1646 nullptr,
1647 true);
1648
1649 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
1650 // Used to rescale normalized inputs to activation at cell gate.
1651 const ConstTensorPin cellLayerNormWeightsPin =
1652 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1653 25,
1654 model,
1655 data,
1656 g_DontPermute,
1657 nullptr,
1658 true);
1659
1660 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
1661 // Used to rescale normalized inputs to activation at output gate.
1662 const ConstTensorPin outputLayerNormWeightsPin =
1663 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1664 26,
1665 model,
1666 data,
1667 g_DontPermute,
1668 nullptr,
1669 true);
1670
1671 // Outputs:
1672 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
1673 // with CIFG, or [batch_size, num_units * 3] without CIFG.
1674 const Operand* scratchBuffer = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1675 if (!scratchBuffer)
1676 {
1677 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
1678 }
1679 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1680 const Operand* outputStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1681 if (!outputStateOut)
1682 {
1683 return Fail("%s: Could not read output 1: outputStateOut", __func__);
1684 }
1685 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1686 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 2, model);
1687 if (!cellStateOut)
1688 {
1689 return Fail("%s: Could not read output 2: cellStateOut", __func__);
1690 }
1691 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1692 // effectively the same as the current “output state (out)” value.
1693 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 3, model);
1694 if (!output)
1695 {
1696 return Fail("%s: Could not read output 3: output", __func__);
1697 }
1698
1699 // set the params structure for the AddLstmLayer call
1700 armnn::LstmInputParams params;
1701 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1702 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1703 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1704 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1705 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1706 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1707 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1708 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1709 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
1710 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
1711 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
1712 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1713 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1714 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1715 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1716 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
1717 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
1718 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
1719 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
1720 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
1721 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
1722
1723 // set the layer descriptor
1724 armnn::LstmDescriptor desc;
1725 desc.m_ActivationFunc = activation;
1726 desc.m_ClippingThresCell = cellClip;
1727 desc.m_ClippingThresProj = projClip;
1728 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
1729 params.m_RecurrentToInputWeights == nullptr ||
1730 params.m_InputGateBias == nullptr);
1731 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
1732 params.m_CellToOutputWeights != nullptr);
1733 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
1734 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
1735 params.m_ForgetLayerNormWeights != nullptr ||
1736 params.m_CellLayerNormWeights != nullptr ||
1737 params.m_OutputLayerNormWeights != nullptr);
1738
1739 // validate the optional input groups
1740 if (desc.m_CifgEnabled &&
1741 (params.m_InputToInputWeights != nullptr ||
1742 params.m_RecurrentToInputWeights != nullptr ||
1743 params.m_InputGateBias != nullptr))
1744 {
1745 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
1746 " and input gate bias must be provided", __func__);
1747 }
1748
1749 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
1750 {
1751 return Fail("%s: projection bias should not be provided without projection weights", __func__);
1752 }
1753
1754 if (desc.m_PeepholeEnabled &&
1755 (params.m_CellToForgetWeights == nullptr ||
1756 params.m_CellToOutputWeights == nullptr ||
1757 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
1758 {
1759 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
1760 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
1761 }
1762
1763 if (desc.m_LayerNormEnabled &&
1764 (params.m_ForgetLayerNormWeights == nullptr ||
1765 params.m_CellLayerNormWeights == nullptr ||
1766 params.m_OutputLayerNormWeights == nullptr ||
1767 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
1768 {
1769 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
1770 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
1771 }
1772
1773 // Check if the layer is supported
1774 // Inputs
1775 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1776 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
1777 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
1778
1779 // Outputs
1780 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
1781 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
1782 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1783 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1784
Ferran Balaguera4a629a2019-07-30 10:16:13 +01001785 if (IsDynamicTensor(scratchBufferInfo) ||
1786 IsDynamicTensor(outputStateOutInfo) ||
1787 IsDynamicTensor(cellStateOutInfo) ||
1788 IsDynamicTensor(outputInfo))
1789 {
1790 return Fail("%s: Dynamic output tensors are not supported", __func__);
1791 }
1792
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001793 // Basic parameters
1794 armnn::LstmInputParamsInfo paramsInfo;
1795 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1796 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1797 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1798 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1799 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1800 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1801 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1802 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1803 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1804
1805 // Optional parameters
1806 if(!desc.m_CifgEnabled)
1807 {
1808 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1809 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1810 if (params.m_CellToInputWeights != nullptr)
1811 {
1812 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1813 }
1814 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1815 }
1816
1817 if(desc.m_ProjectionEnabled)
1818 {
1819 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
1820 if (params.m_ProjectionBias != nullptr)
1821 {
1822 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
1823 }
1824 }
1825
1826 if(desc.m_PeepholeEnabled)
1827 {
1828 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1829 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1830 }
1831
1832 if (desc.m_LayerNormEnabled)
1833 {
1834 if(!desc.m_CifgEnabled)
1835 {
1836 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
1837 }
1838 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
1839 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
1840 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
1841 }
1842
1843 bool isSupported = false;
1844 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1845 IsLstmSupported,
1846 data.m_Backends,
1847 isSupported,
1848 inputInfo,
1849 outputStateInInfo,
1850 cellStateInInfo,
1851 scratchBufferInfo,
1852 outputStateOutInfo,
1853 cellStateOutInfo,
1854 outputInfo,
1855 desc,
1856 paramsInfo);
1857 if (!isSupported)
1858 {
1859 return false;
1860 }
1861
1862 // Add the layer
1863 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1864
1865 input.Connect(layer->GetInputSlot(0));
1866 outputStateIn.Connect(layer->GetInputSlot(1));
1867 cellStateIn.Connect(layer->GetInputSlot(2));
1868
1869 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1870 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1871 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1872 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 3, *layer, 3, model, data));
1873}
1874
Sadik Armagan701d9a02019-09-04 15:16:18 +01001875bool HalPolicy::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
1876{
1877 ALOGV("hal_1_2::HalPolicy::ConvertSqrt()");
1878 armnn::ActivationDescriptor desc;
1879 desc.m_Function = armnn::ActivationFunction::Sqrt;
1880
1881 return ::ConvertToActivation<hal_1_2::HalPolicy>(operation, __func__, desc, model, data);
1882}
1883
Mike Kelly46272802019-08-14 17:00:48 +01001884bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
1885{
Sadik Armagan701d9a02019-09-04 15:16:18 +01001886 ALOGV("hal_1_2::HalPolicy::ConvertSqueeze()");
Mike Kelly46272802019-08-14 17:00:48 +01001887 return ::ConvertSqueeze<hal_1_2::HalPolicy>(operation, model, data);
1888}
1889
1890bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
1891{
Sadik Armagan701d9a02019-09-04 15:16:18 +01001892 ALOGV("hal_1_2::HalPolicy::ConvertStridedSlice()");
Mike Kelly46272802019-08-14 17:00:48 +01001893 return ::ConvertStridedSlice<hal_1_2::HalPolicy>(operation, model, data);
1894}
1895
1896bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
1897{
Sadik Armagan701d9a02019-09-04 15:16:18 +01001898 ALOGV("hal_1_2::HalPolicy::ConvertTranspose()");
Mike Kelly46272802019-08-14 17:00:48 +01001899 return ::ConvertTranspose<hal_1_2::HalPolicy>(operation, model, data);
1900}
1901
Aron Virginas-Tar8b991682019-07-31 12:54:59 +01001902bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
David Monahan613b49c2019-06-27 11:37:47 +01001903{
1904 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1905
1906 if (!input.IsValid())
1907 {
1908 return Fail("%s: Operation has invalid inputs", __func__);
1909 }
1910
1911 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1912
1913 if (!output)
1914 {
1915 return Fail("%s: Could not read output 0", __func__);
1916 }
1917
1918 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1919 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1920 if (IsDynamicTensor(outputInfo))
1921 {
1922 return Fail("%s: Dynamic output tensors are not supported", __func__);
1923 }
1924
1925 // ArmNN does not currently support non-fixed weights or bias
1926 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1927 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1928
1929 if (weightsOperand == nullptr)
1930 {
1931 return Fail("%s: Operand is invalid", __func__);
1932 }
1933 armnn::TransposeConvolution2dDescriptor desc;
1934 desc.m_DataLayout = armnn::DataLayout::NHWC;
1935
1936 // Determine whether padding is implicit or explicit
1937 bool implicitPadding = operation.inputs.size() == 9;
1938
1939 if (implicitPadding )
1940 {
1941 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 8, model, data);
1942 }
1943 else
1944 {
1945 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
1946 }
1947
1948 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1949 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1950 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1951
1952 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
1953
1954 // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
1955 // We have to permute it to OIHW if the data layout is NCHW.
1956 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
1957 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
1958 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
1959
1960 // Bias is a 1D tensor
1961 const ConstTensorPin biasPin =
1962 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1963
1964 if (!weightsPin.IsValid())
1965 {
1966 return Fail("%s: Operation has invalid weights", __func__);
1967 }
1968
1969 if (!biasPin.IsValid())
1970 {
1971 return Fail("%s: Operation has invalid biases", __func__);
1972 }
1973
1974 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1975 armnn::ConstTensor bias = biasPin.GetConstTensor();
1976 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1977
1978 ActivationFn activation;
1979
1980 if (implicitPadding)
1981 {
Sadik Armagan3e3003e2019-08-13 12:54:34 +01001982 int32_t strideX{0};
1983 int32_t strideY{0};
1984 int32_t padLeft{0};
1985 int32_t padRight{0};
1986 int32_t padTop{0};
1987 int32_t padBottom{0};
1988
David Monahan613b49c2019-06-27 11:37:47 +01001989 android::nn::PaddingScheme paddingScheme;
1990 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 4, paddingScheme, model, data) ||
Sadik Armagan3e3003e2019-08-13 12:54:34 +01001991 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, strideX, model, data) ||
1992 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, strideY, model, data) ||
David Monahan613b49c2019-06-27 11:37:47 +01001993 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data))
1994 {
1995 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1996 }
1997
1998 const uint32_t kernelX = weights.GetShape()[widthIndex];
1999 const uint32_t kernelY = weights.GetShape()[heightIndex];
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +01002000 const uint32_t outputX = outputInfo.GetShape()[widthIndex];
2001 const uint32_t outputY = outputInfo.GetShape()[heightIndex];
David Monahan613b49c2019-06-27 11:37:47 +01002002
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +01002003 CalcPaddingTransposeConv(outputX, kernelX, desc.m_StrideX, padLeft, padRight, paddingScheme);
2004 CalcPaddingTransposeConv(outputY, kernelY, desc.m_StrideY, padTop, padBottom, paddingScheme);
2005
2006 // NOTE: The Android NN API allows for negative padding values in TransposeConv2d,
2007 // but Arm NN only supports values >= 0
2008 if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
2009 {
2010 return Fail("%s: Negative padding values are not supported", __func__);
2011 }
2012
Sadik Armagan3e3003e2019-08-13 12:54:34 +01002013 desc.m_StrideX = boost::numeric_cast<uint32_t>(strideX);
2014 desc.m_StrideY = boost::numeric_cast<uint32_t>(strideY);
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +01002015 desc.m_PadLeft = boost::numeric_cast<uint32_t>(padLeft);
2016 desc.m_PadRight = boost::numeric_cast<uint32_t>(padRight);
2017 desc.m_PadTop = boost::numeric_cast<uint32_t>(padTop);
2018 desc.m_PadBottom = boost::numeric_cast<uint32_t>(padBottom);
David Monahan613b49c2019-06-27 11:37:47 +01002019 }
2020 else if (operation.inputs.size() == 11)
2021 {
2022 // explicit padding
2023 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
2024 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
2025 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
2026 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
2027 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
2028 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
2029 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data))
2030 {
2031 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
2032 }
2033 }
2034 else
2035 {
2036 return Fail("%s: Unsupported number of operation inputs", __func__);
2037 }
2038
2039 desc.m_BiasEnabled = true;
2040 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2041
2042 bool isSupported = false;
2043 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2044 IsTransposeConvolution2dSupported,
2045 data.m_Backends,
2046 isSupported,
2047 inputInfo,
2048 outputInfo,
2049 desc,
2050 weights.GetInfo(),
2051 biases);
2052 if (!isSupported)
2053 {
2054 return false;
2055 }
2056
2057 armnn::IConnectableLayer* startLayer =
2058 data.m_Network->AddTransposeConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2059 if (!startLayer)
2060 {
2061 return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
2062 }
2063
2064 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2065 if (!endLayer)
2066 {
2067 return Fail("%s: ProcessActivation failed", __func__);
2068 }
2069
2070 input.Connect(startLayer->GetInputSlot(0));
2071
2072 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
2073}
2074
Mike Kellyb5fdf382019-06-11 16:35:25 +01002075} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01002076} // namespace armnn_driver