blob: 7aa6967a1105d6fece8ddc8b661c7edc3623faa6 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010010#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010011#include <Half.hpp>
Narumol Prangnawarat85f96542019-09-12 16:26:29 +010012#include <TensorUtils.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013
14#include <cmath>
15
Mike Kellyb5fdf382019-06-11 16:35:25 +010016namespace armnn_driver
17{
18namespace hal_1_2
19{
20
Mike Kellyb5fdf382019-06-11 16:35:25 +010021bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
22{
Mike Kellyb5fdf382019-06-11 16:35:25 +010023 switch (operation.type)
24 {
Kevin May407718f2019-09-09 14:46:41 +010025 case V1_2::OperationType::ABS:
26 return ConvertAbs(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010027 case V1_2::OperationType::ADD:
28 return ConvertAdd(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +010029 case V1_2::OperationType::AVERAGE_POOL_2D:
30 return ConvertAveragePool2d(operation, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +010031 case V1_2::OperationType::BATCH_TO_SPACE_ND:
32 return ConvertBatchToSpaceNd(operation, model, data);
Mike Kellyb8805202019-07-31 17:25:43 +010033 case V1_2::OperationType::CONCATENATION:
34 return ConvertConcatenation(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +010035 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +010036 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +010037 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +010038 return ConvertDepthwiseConv2d(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010039 case V1_2::OperationType::DEQUANTIZE:
40 return ConvertDequantize(operation, model, data);
41 case V1_2::OperationType::DIV:
42 return ConvertDiv(operation, model, data);
Narumol Prangnawarat85f96542019-09-12 16:26:29 +010043 case V1_2::OperationType::EXPAND_DIMS:
44 return ConvertExpandDims(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010045 case V1_2::OperationType::FLOOR:
46 return ConvertFloor(operation, model, data);
47 case V1_2::OperationType::FULLY_CONNECTED:
48 return ConvertFullyConnected(operation, model, data);
49 case V1_2::OperationType::L2_NORMALIZATION:
50 return ConvertL2Normalization(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +010051 case V1_2::OperationType::L2_POOL_2D:
52 return ConvertL2Pool2d(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010053 case V1_2::OperationType::LOCAL_RESPONSE_NORMALIZATION:
54 return ConvertLocalResponseNormalization(operation, model, data);
55 case V1_2::OperationType::LOGISTIC:
56 return ConvertLogistic(operation, model, data);
57 case V1_2::OperationType::LSTM:
58 return ConvertLstm(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +010059 case V1_2::OperationType::MAX_POOL_2D:
60 return ConvertMaxPool2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +010061 case V1_2::OperationType::MAXIMUM:
62 return ConvertMaximum(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010063 case V1_2::OperationType::MEAN:
64 return ConvertMean(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +010065 case V1_2::OperationType::MINIMUM:
66 return ConvertMinimum(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010067 case V1_2::OperationType::MUL:
68 return ConvertMul(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +010069 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +010070 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010071 case V1_2::OperationType::PAD_V2:
72 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +010073 case V1_2::OperationType::PRELU:
74 return ConvertPrelu(operation, model, data);
Sadik Armagan5a476a82019-07-30 09:43:18 +010075 case V1_2::OperationType::QUANTIZE:
76 return ConvertQuantize(operation, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +010077 case V1_2::OperationType::QUANTIZED_16BIT_LSTM:
78 return ConvertQuantizedLstm(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +010079 case V1_2::OperationType::RELU:
80 return ConvertReLu(operation, model, data);
81 case V1_2::OperationType::RELU1:
82 return ConvertReLu1(operation, model, data);
83 case V1_2::OperationType::RELU6:
84 return ConvertReLu6(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010085 case V1_2::OperationType::RESHAPE:
86 return ConvertReshape(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +010087 case V1_2::OperationType::RESIZE_BILINEAR:
88 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010089 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +010090 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Aron Virginas-Tarfa6544e2019-09-10 14:42:22 +010091 case V1_2::OperationType::RSQRT:
92 return ConvertRsqrt(operation, model, data);
Sadik Armagan701d9a02019-09-04 15:16:18 +010093 case V1_2::OperationType::SQRT:
94 return ConvertSqrt(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010095 case V1_2::OperationType::SQUEEZE:
96 return ConvertSqueeze(operation, model, data);
97 case V1_2::OperationType::STRIDED_SLICE:
98 return ConvertStridedSlice(operation, model, data);
99 case V1_2::OperationType::TRANSPOSE:
100 return ConvertTranspose(operation, model, data);
David Monahan613b49c2019-06-27 11:37:47 +0100101 case V1_2::OperationType::TRANSPOSE_CONV_2D:
Aron Virginas-Tar8b991682019-07-31 12:54:59 +0100102 return ConvertTransposeConv2d(operation, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100103 case V1_2::OperationType::SOFTMAX:
104 return ConvertSoftmax(operation, model, data);
Finn Williamsd74c5052019-07-30 17:06:00 +0100105 case V1_2::OperationType::SPACE_TO_BATCH_ND :
106 return ConvertSpaceToBatchNd(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100107 case V1_2::OperationType::SPACE_TO_DEPTH:
108 return ConvertSpaceToDepth(operation, model, data);
Mike Kelly0a879362019-07-29 16:56:31 +0100109 case V1_2::OperationType::SUB:
110 return ConvertSub(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100111 case V1_2::OperationType::TANH:
112 return ConvertTanH(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100113 default:
114 return Fail("%s: Operation type %s not supported in ArmnnDriver",
115 __func__, toString(operation.type).c_str());
116 }
117}
118
Kevin May407718f2019-09-09 14:46:41 +0100119bool HalPolicy::ConvertAbs(const Operation& operation, const Model& model, ConversionData& data)
120{
121 ALOGV("hal_1_2::HalPolicy::ConvertAbs()");
122 return ::ConvertAbs<hal_1_2::HalPolicy>(operation, model, data);
123}
124
Mike Kelly46272802019-08-14 17:00:48 +0100125bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
126{
127 ALOGV("hal_1_2::HalPolicy::ConvertAdd()");
128 return ::ConvertAdd<hal_1_2::HalPolicy>(operation, model, data);
129}
130
Sadik Armagan15d63e22019-07-26 16:59:35 +0100131bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
132{
133 ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
134 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
135}
136
Finn Williams23b87b32019-07-30 11:44:05 +0100137bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
138{
139 ALOGV("hal_1_2::HalPolicy::ConvertBatchToSpaceNd()");
140 return ::ConvertBatchToSpaceNd<hal_1_2::HalPolicy>(operation, model, data);
141}
142
Mike Kellyb8805202019-07-31 17:25:43 +0100143bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
144{
145 ALOGV("hal_1_2::HalPolicy::ConvertConcatenation()");
146 return ::ConvertConcatenation<hal_1_2::HalPolicy>(operation, model, data);
147}
148
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100149bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
150{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100151 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
152
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100153 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
154 if (!input.IsValid())
155 {
156 return Fail("%s: Operation has invalid inputs", __func__);
157 }
158
159 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
160 if (!output)
161 {
162 return Fail("%s: Could not read output 0", __func__);
163 }
164
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100165 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
166 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
167
168 if (IsDynamicTensor(outputInfo))
169 {
170 return Fail("%s: Dynamic output tensors are not supported", __func__);
171 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100172
Mike Kellye1d60bb2019-07-11 11:44:52 +0100173 armnn::Convolution2dDescriptor desc;
174 desc.m_DataLayout = armnn::DataLayout::NHWC;
175
176 // Determine whether padding is implicit or explicit
177 bool implicitPadding = operation.inputs.size() == 7 ||
178 (operation.inputs.size() >= 8 &&
179 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
180
181 if (implicitPadding)
182 {
183 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
184 }
185 else if (operation.inputs.size() >= 10)
186 {
187 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
188 }
189
190 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
191
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100192 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100193 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
194 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
195 // the DataLayout is NCHW
196 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
197 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
198 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100199 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100200 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100201
202 if (!weightsPin.IsValid())
203 {
204 return Fail("%s: Operation has invalid weights", __func__);
205 }
206
207 if (!biasPin.IsValid())
208 {
209 return Fail("%s: Operation has invalid biases", __func__);
210 }
211
212 armnn::ConstTensor weights = weightsPin.GetConstTensor();
213 armnn::ConstTensor bias = biasPin.GetConstTensor();
214 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
215
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100216 ActivationFn activation;
217
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100218 if (implicitPadding)
219 {
220 android::nn::PaddingScheme paddingScheme;
221 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
222 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
223 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
224 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
225 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
226 {
227 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
228 }
229
Mike Kellye1d60bb2019-07-11 11:44:52 +0100230 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
231 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
232 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
233 const uint32_t kernelX = weights.GetShape()[widthIndex];
234 const uint32_t kernelY = weights.GetShape()[heightIndex];
235 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
236 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100237
Mike Kelly86b36d42019-07-12 16:39:33 +0100238 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
239 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100240
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100241 }
242 else if (operation.inputs.size() >= 10)
243 {
244 // explicit padding
245 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
246 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
247 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
248 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
249 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
250 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
251 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
252 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
253 {
254 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
255 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100256 }
257 else
258 {
259 return Fail("%s: Unsupported number of operation inputs", __func__);
260 }
261
262 desc.m_BiasEnabled = true;
263 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
264
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100265 bool isSupported = false;
266 FORWARD_LAYER_SUPPORT_FUNC(__func__,
267 IsConvolution2dSupported,
268 data.m_Backends,
269 isSupported,
270 inputInfo,
271 outputInfo,
272 desc,
273 weights.GetInfo(),
274 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100275
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100276 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100277 {
278 return false;
279 }
280
281 armnn::IConnectableLayer* startLayer =
282 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
283
284 if (!startLayer)
285 {
286 return Fail("%s: AddConvolution2dLayer failed", __func__);
287 }
288
289 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
290
291 if (!endLayer)
292 {
293 return Fail("%s: ProcessActivation failed", __func__);
294 }
295
296 input.Connect(startLayer->GetInputSlot(0));
297
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100298 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100299}
300
301bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
302{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100303 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
304
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100305 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
306
307 if (!input.IsValid())
308 {
309 return Fail("%s: Operation has invalid inputs", __func__);
310 }
311
312 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
313
314 if (!output)
315 {
316 return Fail("%s: Could not read output 0", __func__);
317 }
318
319 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100320 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
321
322 if (IsDynamicTensor(outputInfo))
323 {
324 return Fail("%s: Dynamic output tensors are not supported", __func__);
325 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100326
327 // ArmNN does not currently support non-fixed weights or bias
328 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
329 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
330
331 if (weightsOperand == nullptr)
332 {
333 return Fail("%s: Operand is invalid", __func__);
334 }
335 armnn::DepthwiseConvolution2dDescriptor desc;
336 desc.m_DataLayout = armnn::DataLayout::NHWC;
337
338 // Determine whether padding is implicit or explicit
339 bool implicitPadding = operation.inputs.size() == 8 ||
340 (operation.inputs.size() >= 9 &&
341 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
342
343 // Look ahead to find the optional DataLayout, if present
344 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
345 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
346
347 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
348 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
349 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
350 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
351
352 // Reinterpret weight data as [ H, W, I, M ]
353 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
354 weightsOperand->dimensions[2],
355 inputInfo.GetShape()[channelsIndex],
356 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
357
358 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
359 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
360
361 const ConstTensorPin weightsPin =
362 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
363 1,
364 model,
365 data,
366 HWIMToMIHW,
367 &weightsShape);
368
369 // Bias is a 1D tensor
370 const ConstTensorPin biasPin =
371 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
372
373 if (!weightsPin.IsValid())
374 {
375 return Fail("%s: Operation has invalid weights", __func__);
376 }
377
378 if (!biasPin.IsValid())
379 {
380 return Fail("%s: Operation has invalid biases", __func__);
381 }
382
383 armnn::ConstTensor weights = weightsPin.GetConstTensor();
384 armnn::ConstTensor bias = biasPin.GetConstTensor();
385 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
386
387 ActivationFn activation;
388
389 if (implicitPadding)
390 {
391 android::nn::PaddingScheme paddingScheme;
392 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
393 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
394 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
395 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
396 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
397 {
398 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
399 }
400
401 const uint32_t kernelX = weights.GetShape()[3];
402 const uint32_t kernelY = weights.GetShape()[2];
403 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
404 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
405
Mike Kelly86b36d42019-07-12 16:39:33 +0100406 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
407 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100408 }
409 else if (operation.inputs.size() >= 11)
410 {
411 // explicit padding
412 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
413 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
414 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
415 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
416 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
417 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
418 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
419 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
420 {
421 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
422 }
423 }
424 else
425 {
426 return Fail("%s: Unsupported number of operation inputs", __func__);
427 }
428
429 desc.m_BiasEnabled = true;
430 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
431
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100432 bool isSupported = false;
433 FORWARD_LAYER_SUPPORT_FUNC(__func__,
434 IsDepthwiseConvolutionSupported,
435 data.m_Backends,
436 isSupported,
437 inputInfo,
438 outputInfo,
439 desc,
440 weights.GetInfo(),
441 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100442
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100443 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100444 {
445 return false;
446 }
447
448 armnn::IConnectableLayer* startLayer =
449 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100450
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100451 if (!startLayer)
452 {
453 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
454 }
455
456 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
457 if (!endLayer)
458 {
459 return Fail("%s: ProcessActivation failed", __func__);
460 }
461
462 input.Connect(startLayer->GetInputSlot(0));
463
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100464 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100465}
466
Mike Kelly46272802019-08-14 17:00:48 +0100467bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
468{
469 ALOGV("hal_1_2::HalPolicy::ConvertDequantize()");
470 return ::ConvertDequantize<hal_1_2::HalPolicy>(operation, model, data);
471}
472
473bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
474{
475 ALOGV("hal_1_2::HalPolicy::ConvertDiv()");
476 return ::ConvertDiv<hal_1_2::HalPolicy>(operation, model, data);
477}
478
Narumol Prangnawarat85f96542019-09-12 16:26:29 +0100479bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
480{
481 ALOGV("hal_1_2::HalPolicy::ConvertExpandDims()");
482
483 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
484
485 if (!input.IsValid())
486 {
487 return Fail("%s: Operation has invalid input", __func__);
488 }
489
490 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
491 if (!output)
492 {
493 return Fail("%s: Operation has invalid output", __func__);
494 }
495
496 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
497 if (IsDynamicTensor(outputInfo))
498 {
499 return Fail("%s: Dynamic output tensors are not supported", __func__);
500 }
501
502 int32_t axis;
503 if (!GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, axis, model, data))
504 {
505 return Fail("%s: failed to get axis input value", __func__);
506 }
507
508 armnn::TensorShape targetShape;
509
510 try
511 {
512 targetShape = armnnUtils::ExpandDims(input.GetTensorInfo().GetShape(), axis);
513 }
514 catch (const std::exception &e)
515 {
516 return Fail("%s: %s", __func__, e.what());
517 }
518
519 if (targetShape != outputInfo.GetShape())
520 {
521 return Fail("%s: Shape of the output operand does not match the resolved expanded shape", __func__);
522 }
523
524 armnn::ReshapeDescriptor reshapeDescriptor;
525 reshapeDescriptor.m_TargetShape = targetShape;
526
527 bool isSupported = false;
528 FORWARD_LAYER_SUPPORT_FUNC(__func__,
529 IsReshapeSupported,
530 data.m_Backends,
531 isSupported,
532 input.GetTensorInfo(),
533 reshapeDescriptor);
534
535 if (!isSupported)
536 {
537 return false;
538 }
539
540 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
541 assert(layer != nullptr);
542 input.Connect(layer->GetInputSlot(0));
543
544 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
545}
546
Mike Kelly46272802019-08-14 17:00:48 +0100547bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
548{
549 ALOGV("hal_1_2::HalPolicy::ConvertFloor()");
550 return ::ConvertFloor<hal_1_2::HalPolicy>(operation, model, data);
551}
552
553bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
554{
555 ALOGV("hal_1_2::HalPolicy::ConvertFullyConnected()");
556 return ::ConvertFullyConnected<hal_1_2::HalPolicy>(operation, model, data);
557}
558
559bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
560{
561 ALOGV("hal_1_2::HalPolicy::ConvertL2Normalization()");
562 return ::ConvertL2Normalization<hal_1_2::HalPolicy>(operation, model, data);
563}
564
Sadik Armagan15d63e22019-07-26 16:59:35 +0100565bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
566{
567 ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
568 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
569}
570
Mike Kelly46272802019-08-14 17:00:48 +0100571bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
572 const Model& model,
573 ConversionData& data)
574{
575 ALOGV("hal_1_2::HalPolicy::ConvertLocalResponseNormalization()");
576 return ::ConvertLocalResponseNormalization<hal_1_2::HalPolicy>(operation, model, data);
577}
578
579bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
580{
581 ALOGV("hal_1_2::HalPolicy::ConvertLogistic()");
582 return ::ConvertLogistic<hal_1_2::HalPolicy>(operation, model, data);
583}
584
Sadik Armagan15d63e22019-07-26 16:59:35 +0100585bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
586{
587 ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
588 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
589}
590
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100591bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
592{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100593 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
594
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100595 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
596 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
597
598 if (!input0.IsValid() || !input1.IsValid())
599 {
600 return Fail("%s: Operation has invalid inputs", __func__);
601 }
602
603 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
604 if (!outputOperand)
605 {
606 return Fail("%s: Could not read output", __func__);
607 }
608
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100609 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100610 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100611 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100612 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100613 }
614
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100615 bool isSupported = false;
616 FORWARD_LAYER_SUPPORT_FUNC(__func__,
617 IsMaximumSupported,
618 data.m_Backends,
619 isSupported,
620 input0.GetTensorInfo(),
621 input1.GetTensorInfo(),
622 outInfo);
623
624 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100625 {
626 return false;
627 }
628
629 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
630 assert(layer != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100631 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
632 if (!isReshapeSupported)
633 {
634 return false;
635 }
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100636
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100637 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100638}
639
Mike Kelly46272802019-08-14 17:00:48 +0100640bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
641{
642 ALOGV("hal_1_2::HalPolicy::ConvertMean()");
643 return ::ConvertMean<hal_1_2::HalPolicy>(operation, model, data);
644}
645
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100646bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
647{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100648 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
649
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100650 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
651 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
652
653 if (!input0.IsValid() || !input1.IsValid())
654 {
655 return Fail("%s: Operation has invalid inputs", __func__);
656 }
657
658 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
659 if (!output)
660 {
661 return Fail("%s: Could not read output 0", __func__);
662 }
663
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100664 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100665 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100666 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100667 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100668 }
669
670 bool isSupported = false;
671 FORWARD_LAYER_SUPPORT_FUNC(__func__,
672 IsMinimumSupported,
673 data.m_Backends,
674 isSupported,
675 input0.GetTensorInfo(),
676 input1.GetTensorInfo(),
677 outputInfo);
678
679 if (!isSupported)
680 {
681 return false;
682 }
683
684 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
685 assert(layer != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +0100686 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
687 if (!isReshapeSupported)
688 {
689 return false;
690 }
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100691
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100692 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100693}
694
Mike Kelly46272802019-08-14 17:00:48 +0100695bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
696{
697 ALOGV("hal_1_2::HalPolicy::ConvertMul()");
698 return ::ConvertMul<hal_1_2::HalPolicy>(operation, model, data);
699}
700
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100701bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
702{
703 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
704 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
705}
706
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100707bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
708{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100709 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
710
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100711 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
712 if (!input.IsValid())
713 {
714 return Fail("%s: Could not read input 0", __func__);
715 }
716
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100717 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
718 if (!output)
719 {
720 return Fail("%s: Could not read output", __func__);
721 }
722
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100723 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
724 unsigned int rank = inputInfo.GetNumDimensions();
725
726 armnn::PadDescriptor descriptor;
727 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
728 {
729 return Fail("%s: Could not convert paddings", __func__);
730 }
731
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100732 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100733 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100734 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100735 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100736 }
737
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100738 // Determine type of padding value
739 OperandType operandType0;
740 OperandType operandType2;
741
742 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
743 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
744 {
745 return Fail("%s: Operation has invalid inputs", __func__);
746 }
747
748 // Read value to use for padding
749 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
750 {
751 armnn::Half f16PadValue;
752 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
753 {
754 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
755 }
756
757 descriptor.m_PadValue = f16PadValue;
758 }
759 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
760 {
761 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
762 {
763 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
764 }
765 }
766 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
767 {
Mike Kelly3c673942019-07-25 09:26:06 +0100768 int32_t intPadValue = 0;
769 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100770 {
771 return Fail("%s: Could not read input 2 (INT32)", __func__);
772 }
Mike Kelly3c673942019-07-25 09:26:06 +0100773 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100774 }
775 else
776 {
777 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
778 }
779
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100780 bool isSupported = false;
781 FORWARD_LAYER_SUPPORT_FUNC(__func__,
782 IsPadSupported,
783 data.m_Backends,
784 isSupported,
785 inputInfo,
786 outputInfo,
787 descriptor);
788 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100789 {
790 return false;
791 }
792
793 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
794 assert(layer != nullptr);
795 input.Connect(layer->GetInputSlot(0));
796 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
797
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100798 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100799}
800
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100801bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
802{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100803 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
804
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100805 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
806 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
807
808 if (!input.IsValid() || !alpha.IsValid())
809 {
810 return Fail("%s: Operation has invalid inputs", __func__);
811 }
812
813 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
814
815 if (!output)
816 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100817 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100818 }
819
820 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
821 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100822 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100823
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100824 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100825 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100826 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100827 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100828
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100829 bool isSupported = false;
830 FORWARD_LAYER_SUPPORT_FUNC(__func__,
831 IsPreluSupported,
832 data.m_Backends,
833 isSupported,
834 inputInfo,
835 alphaInfo,
836 outputInfo);
837 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100838 {
839 return false;
840 }
841
842 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
843
844 if (!layer)
845 {
846 return Fail("%s: AddPreluLayer failed", __func__);
847 }
848
Sadik Armagan64b19b52019-08-19 09:49:58 +0100849 bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
850 if (!isReshapeSupported)
851 {
852 return false;
853 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100854
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100855 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100856}
857
Sadik Armagan5a476a82019-07-30 09:43:18 +0100858bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
859{
860 ALOGV("hal_1_2::HalPolicy::ConvertQuantize()");
861
862 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
863 if (!input.IsValid())
864 {
865 return Fail("%s: Operation has invalid input", __func__);
866 }
867
868 const Operand* const outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
869 if (!outputOperand)
870 {
871 return Fail("%s: Operation has invalid outputs", __func__);
872 }
873
874 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
875 if (IsDynamicTensor(outputInfo))
876 {
877 return Fail("%s: Dynamic output tensors are not supported", __func__);
878 }
879
880 bool isSupported = false;
881 FORWARD_LAYER_SUPPORT_FUNC(__func__,
882 IsQuantizeSupported,
883 data.m_Backends,
884 isSupported,
885 input.GetTensorInfo(),
886 outputInfo);
887 if (!isSupported)
888 {
889 return false;
890 }
891
892 armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
893 assert(layer != nullptr);
894 input.Connect(layer->GetInputSlot(0));
895
896 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
897}
898
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100899bool HalPolicy::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
900{
901 ALOGV("hal_1_2::HalPolicy::ConvertQuantizedLstm()");
902
903 //Inputs:
904 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
905 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
906 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
907 if (!input.IsValid())
908 {
909 return Fail("%s: Could not read input 0: input", __func__);
910 }
911
912 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
913 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
914 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
915 LayerInputHandle previousCellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 13, model, data);
916 if (!previousCellStateIn.IsValid())
917 {
918 return Fail("%s: Could not read input 13: previousCellStateIn", __func__);
919 }
920
921 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
922 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
923 // is quantized with a fixed quantization range of -1, 127/128.
924 LayerInputHandle previousOutputIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 14, model, data);
925 if (!previousOutputIn.IsValid())
926 {
927 return Fail("%s: Could not read input 14: previousOutputIn", __func__);
928 }
929
930 // Get the input tensors:
931 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
932 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
933 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
934 const ConstTensorPin inputToInputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100935 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100936
937 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
938 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
939 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
940 const ConstTensorPin inputToForgetWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100941 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100942
943 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
944 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
945 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
946 const ConstTensorPin inputToCellWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100947 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100948
949 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
950 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
951 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
952 const ConstTensorPin inputToOutputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100953 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100954
955 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
956 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
957 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
958 const ConstTensorPin recurrentToInputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100959 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 5, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100960
961 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
962 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
963 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
964 const ConstTensorPin recurrentToForgetWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100965 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100966
967 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
968 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
969 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
970 const ConstTensorPin recurrentToCellWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100971 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100972
973 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
974 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
975 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
976 const ConstTensorPin recurrentToOutputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100977 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100978
979 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
980 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
981 // of input and weights scales and zeroPoint equal to 0.
982 const ConstTensorPin inputGateBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100983 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 9, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100984
985 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
986 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
987 // of input and weights scales and zeroPoint equal to 0.
988 const ConstTensorPin forgetGateBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100989 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 10, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100990
991 // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
992 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
993 // and weights scales and zeroPoint equal to 0.
994 const ConstTensorPin cellBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +0100995 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 11, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +0100996
997 // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
998 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
999 // of input and weights scales and zeroPoint equal to 0.
1000 const ConstTensorPin outputGateBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001001 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 12, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001002
1003 if (!inputToInputWeightsPin.IsValid() ||
1004 !inputToForgetWeightsPin.IsValid() ||
1005 !inputToCellWeightsPin.IsValid() ||
1006 !inputToOutputWeightsPin.IsValid() ||
1007 !recurrentToInputWeightsPin.IsValid() ||
1008 !recurrentToForgetWeightsPin.IsValid() ||
1009 !recurrentToCellWeightsPin.IsValid() ||
1010 !recurrentToOutputWeightsPin.IsValid() ||
1011 !inputGateBiasPin.IsValid() ||
1012 !forgetGateBiasPin.IsValid() ||
1013 !cellBiasPin.IsValid() ||
1014 !outputGateBiasPin.IsValid())
1015 {
1016 return Fail("%s: Operation has invalid tensor inputs", __func__);
1017 }
1018
1019 // Outputs:
1020 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
1021 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
1022 // of -2^4, 2^4 * 32767/32768.
1023 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1024 if (!cellStateOut)
1025 {
1026 return Fail("%s: Could not read output 0: cellStateOut", __func__);
1027 }
1028
1029 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
1030 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
1031 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1032 if (!output)
1033 {
1034 return Fail("%s: Could not read output 1: output", __func__);
1035 }
1036
1037 // Inputs
1038 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1039 const armnn::TensorInfo& previousCellStateInInfo = previousCellStateIn.GetTensorInfo();
1040 const armnn::TensorInfo& previousOutputInInfo = previousOutputIn.GetTensorInfo();
1041
1042 // Outputs
1043 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1044 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1045
1046 // Dynamic tensors currently not supported
1047 if (IsDynamicTensor(cellStateOutInfo) || IsDynamicTensor(outputInfo))
1048 {
1049 return Fail("%s: Dynamic output tensors are not supported", __func__);
1050 }
1051
1052 armnn::QuantizedLstmInputParams params;
1053
1054 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1055 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1056 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1057 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1058 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1059 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1060 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1061 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1062 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1063 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1064 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1065 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1066
1067 armnn::QuantizedLstmInputParamsInfo paramsInfo;
1068 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1069 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1070 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1071 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1072 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1073 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1074 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1075 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1076 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1077 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1078 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1079 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1080
1081 bool isSupported = false;
1082 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1083 IsQuantizedLstmSupported,
1084 data.m_Backends,
1085 isSupported,
1086 inputInfo,
1087 previousCellStateInInfo,
1088 previousOutputInInfo,
1089 cellStateOutInfo,
1090 outputInfo,
1091 paramsInfo);
1092
1093 if (!isSupported)
1094 {
1095 return false;
1096 }
1097
1098 armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
1099 input.Connect(layer->GetInputSlot(0));
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001100 previousCellStateIn.Connect(layer->GetInputSlot(1));
1101 previousOutputIn.Connect(layer->GetInputSlot(2));
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001102
1103 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1104 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data));
1105}
1106
Sadik Armagan61113162019-07-25 09:09:40 +01001107bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1108{
1109 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
1110 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
1111}
1112
1113bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1114{
1115 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
1116 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
1117}
1118
1119bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1120{
1121 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
1122 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
1123}
1124
Mike Kelly46272802019-08-14 17:00:48 +01001125bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1126{
1127 ALOGV("hal_1_2::HalPolicy::ConvertReshape()");
1128 return ::ConvertReshape<hal_1_2::HalPolicy>(operation, model, data);
1129}
1130
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +01001131bool HalPolicy::ConvertResize(const Operation& operation,
1132 const Model& model,
1133 ConversionData& data,
1134 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001135{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001136 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
1137
1138 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001139 if (!input.IsValid())
1140 {
1141 return Fail("%s: Could not read input 0", __func__);
1142 }
1143
1144 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1145 if (!output)
1146 {
1147 return Fail("%s: Could not read output 0", __func__);
1148 }
1149
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001150 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1151 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1152
1153 if (IsDynamicTensor(outputInfo))
1154 {
1155 return Fail("%s: Dynamic output tensors are not supported", __func__);
1156 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001157
1158 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +01001159 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001160 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
1161
1162 OperandType operandType1;
1163 OperandType operandType2;
1164
1165 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
1166 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
1167 {
1168 return Fail("%s: Operation has invalid inputs", __func__);
1169 }
1170
1171 if (operandType1 != operandType2)
1172 {
1173 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
1174 }
1175
1176 if (operandType1 == OperandType::INT32)
1177 {
1178 // Case 1: resizing by shape
1179 int32_t targetWidth = 0;
1180 int32_t targetHeight = 0;
1181
1182 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
1183 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
1184 {
1185 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
1186 }
1187
1188 if (targetWidth < 0 || targetHeight < 0)
1189 {
1190 return Fail("%s: Operation has invalid inputs for resizing by shape. "
1191 "Target width/height cannot be < 0", __func__);
1192 }
1193
1194 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +01001195 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001196 }
1197 else if (operandType1 == OperandType::FLOAT32)
1198 {
1199 // Case 2: resizing by scale
1200 float widthScale = 1.0f;
1201 float heightScale = 1.0f;
1202
1203 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
1204 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
1205 {
1206 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
1207 }
1208
1209 const armnn::TensorShape& inputShape = inputInfo.GetShape();
1210 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
1211
1212 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
1213 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
1214
1215 descriptor.m_TargetWidth = std::floor(width * widthScale);
1216 descriptor.m_TargetHeight = std::floor(height * heightScale);
1217 }
1218 else
1219 {
1220 // NOTE: FLOAT16 scales are not supported
1221 return false;
1222 }
1223
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001224 bool isSupported = false;
1225 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1226 IsResizeSupported,
1227 data.m_Backends,
1228 isSupported,
1229 inputInfo,
1230 outputInfo,
1231 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +01001232
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001233 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001234 {
1235 return false;
1236 }
1237
1238 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
1239
1240 assert(layer != nullptr);
1241
1242 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1243 input.Connect(layer->GetInputSlot(0));
1244
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001245 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001246}
1247
Aron Virginas-Tarfa6544e2019-09-10 14:42:22 +01001248bool HalPolicy::ConvertRsqrt(const Operation& operation, const Model& model, ConversionData& data)
1249{
1250 ALOGV("hal_1_2::HalPolicy::ConvertRsqrt()");
1251
1252 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1253 if (!input.IsValid())
1254 {
1255 return Fail("%s: Operation has invalid input", __func__);
1256 }
1257
1258 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1259 if (!output)
1260 {
1261 return Fail("%s: Could not read output 0", __func__);
1262 }
1263
1264 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1265 if (IsDynamicTensor(outputInfo))
1266 {
1267 return Fail("%s: Dynamic output tensors are not supported", __func__);
1268 }
1269
1270 bool isSupported = false;
1271 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1272 IsRsqrtSupported,
1273 data.m_Backends,
1274 isSupported,
1275 input.GetTensorInfo(),
1276 outputInfo);
1277
1278 if (!isSupported)
1279 {
1280 return false;
1281 }
1282
1283 armnn::IConnectableLayer* const layer = data.m_Network->AddRsqrtLayer();
1284 assert(layer != nullptr);
1285 input.Connect(layer->GetInputSlot(0));
1286
1287 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
1288}
1289
Finn Williamsd74c5052019-07-30 17:06:00 +01001290bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
1291{
1292 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToBatchNd()");
1293 return ::ConvertSpaceToBatchNd<hal_1_2::HalPolicy>(operation, model, data);
1294}
1295
Keith Davisa6bc52f2019-06-26 09:39:49 +01001296bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1297{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001298 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +01001299
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001300 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001301 if (!input.IsValid() )
1302 {
1303 return Fail("%s: Operation has invalid inputs", __func__);
1304 }
1305
1306 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1307 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +01001308 if (rank != 4)
1309 {
1310 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1311 }
1312
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001313 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1314 if (!output)
1315 {
1316 return Fail("%s: Could not read output 0", __func__);
1317 }
1318
1319 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1320 if (IsDynamicTensor(outputInfo))
1321 {
1322 return Fail("%s: Dynamic output tensors are not supported", __func__);
1323 }
1324
Keith Davisa6bc52f2019-06-26 09:39:49 +01001325 armnn::SpaceToDepthDescriptor desc;
1326
1327 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1328
1329 if (desc.m_BlockSize <= 1)
1330 {
1331 return Fail("%s: Block size must be at least 1 in all dimensions");
1332 }
1333
1334 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
1335
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001336 bool isSupported = false;
1337 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1338 IsSpaceToDepthSupported,
1339 data.m_Backends,
1340 isSupported,
1341 inputInfo,
1342 outputInfo,
1343 desc);
1344 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001345 {
1346 return false;
1347 }
1348
1349 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1350 assert(layer != nullptr);
1351 input.Connect(layer->GetInputSlot(0));
1352
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001353 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001354}
1355
Francis Murtagh074c25a2019-07-22 16:40:57 +01001356bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1357{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001358 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
1359
Francis Murtagh074c25a2019-07-22 16:40:57 +01001360 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1361 if (!input.IsValid())
1362 {
1363 return Fail("%s: Operation has invalid inputs", __func__);
1364 }
1365
1366 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1367 if (!outputOperand)
1368 {
1369 return Fail("%s: Operation has no outputs", __func__);
1370 }
1371
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001372 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001373 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +01001374 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001375 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001376 }
1377
1378 armnn::SoftmaxDescriptor desc;
1379 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
1380 {
1381 return Fail("%s: Operation has invalid inputs", __func__);
1382 }
1383
1384 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
1385 2,
1386 HalPolicy::OperandType::INT32,
1387 desc.m_Axis,
1388 model,
1389 data))
1390 {
1391 return Fail("%s: Operation has invalid inputs", __func__);
1392 }
1393
Narumol Prangnawarat52dc5272019-08-06 17:34:26 +01001394 if (input.GetTensorInfo().GetNumDimensions() > 2 ||
1395 !(desc.m_Axis == 1 ||
1396 (desc.m_Axis < 0 && static_cast<int>(input.GetTensorInfo().GetNumDimensions()) + desc.m_Axis == 1)))
1397 {
1398 return Fail("%s: Unsupported input greater than 2D or axis != 1", __func__);
1399 }
1400
Francis Murtagh074c25a2019-07-22 16:40:57 +01001401 bool isSupported = false;
1402 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1403 IsSoftmaxSupported,
1404 data.m_Backends,
1405 isSupported,
1406 input.GetTensorInfo(),
1407 outputInfo,
1408 desc);
1409 if (!isSupported)
1410 {
1411 return false;
1412 }
1413
1414 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1415 assert(layer != nullptr);
1416 input.Connect(layer->GetInputSlot(0));
1417
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001418 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001419}
1420
Mike Kelly0a879362019-07-29 16:56:31 +01001421bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
1422{
1423 ALOGV("hal_1_2::HalPolicy::ConvertSub()");
1424 return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
1425}
1426
Sadik Armagan61113162019-07-25 09:09:40 +01001427bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1428{
1429 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1430 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1431}
1432
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001433bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
1434{
1435 // Inputs:
1436 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1437 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1438 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1439 if (!input.IsValid())
1440 {
1441 return Fail("%s: Could not read input 0: input", __func__);
1442 }
1443 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1444 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 18, model, data);
1445 if (!outputStateIn.IsValid())
1446 {
1447 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1448 }
1449 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1450 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 19, model, data);
1451 if (!cellStateIn.IsValid())
1452 {
1453 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1454 }
1455
1456 // Get the mandatory input tensors:
1457 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1458 // [num_units, input_size].
1459 const ConstTensorPin inputToForgetWeightsPin =
1460 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1461 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1462 // [num_units, input_size].
1463 const ConstTensorPin inputToCellWeightsPin =
1464 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
1465 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1466 // [num_units, input_size].
1467 const ConstTensorPin inputToOutputWeightsPin =
1468 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
1469 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1470 // [num_units, output_size].
1471 const ConstTensorPin recurrentToForgetWeightsPin =
1472 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
1473 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1474 // [num_units, output_size].
1475 const ConstTensorPin recurrentToCellWeightsPin =
1476 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
1477 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1478 // [num_units, output_size].
1479 const ConstTensorPin recurrentToOutputWeightsPin =
1480 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
1481 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1482 const ConstTensorPin forgetGateBiasPin =
1483 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 13, model, data);
1484 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1485 const ConstTensorPin cellBiasPin =
1486 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 14, model, data);
1487 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1488 const ConstTensorPin outputGateBiasPin =
1489 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 15, model, data);
1490
1491 if (!inputToForgetWeightsPin.IsValid() ||
1492 !inputToCellWeightsPin.IsValid() ||
1493 !inputToOutputWeightsPin.IsValid() ||
1494 !recurrentToForgetWeightsPin.IsValid() ||
1495 !recurrentToCellWeightsPin.IsValid() ||
1496 !recurrentToOutputWeightsPin.IsValid() ||
1497 !forgetGateBiasPin.IsValid() ||
1498 !cellBiasPin.IsValid() ||
1499 !outputGateBiasPin.IsValid())
1500 {
1501 return Fail("%s: Operation has invalid tensor inputs", __func__);
1502 }
1503
1504 // Get the optional input tensors:
1505 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1506 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1507 const ConstTensorPin inputToInputWeightsPin =
1508 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1509 1,
1510 model,
1511 data,
1512 g_DontPermute,
1513 nullptr,
1514 true);
1515
1516 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1517 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1518 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1519 const ConstTensorPin recurrentToInputWeightsPin =
1520 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1521 5,
1522 model,
1523 data,
1524 g_DontPermute,
1525 nullptr,
1526 true);
1527
1528 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1529 const ConstTensorPin cellToInputWeightsPin =
1530 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1531 9,
1532 model,
1533 data,
1534 g_DontPermute,
1535 nullptr,
1536 true);
1537
1538 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1539 const ConstTensorPin cellToForgetWeightsPin =
1540 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1541 10,
1542 model,
1543 data,
1544 g_DontPermute,
1545 nullptr,
1546 true);
1547
1548 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1549 const ConstTensorPin cellToOutputWeightsPin =
1550 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1551 11,
1552 model,
1553 data,
1554 g_DontPermute,
1555 nullptr,
1556 true);
1557
1558 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1559 const ConstTensorPin inputGateBiasPin =
1560 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1561 12,
1562 model,
1563 data,
1564 g_DontPermute,
1565 nullptr,
1566 true);
1567
1568 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1569 // [output_size, num_units].
1570 const ConstTensorPin projectionWeightsPin =
1571 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1572 16,
1573 model,
1574 data,
1575 g_DontPermute,
1576 nullptr,
1577 true);
1578
1579 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1580 const ConstTensorPin projectionBiasPin =
1581 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1582 17,
1583 model,
1584 data,
1585 g_DontPermute,
1586 nullptr,
1587 true);
1588
1589 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
1590 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
1591 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
1592 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
1593 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
1594 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
1595 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
1596 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
1597 {
1598 return Fail("%s: Operation has invalid tensor inputs", __func__);
1599 }
1600
1601 // Get the mandatory input scalars (actually 1-D tensors of size 1):
1602 // 20: The activation function: A value indicating the activation function:
1603 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1604 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1605 // If set to 0.0 then clipping is disabled.
1606 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1607 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1608 ActivationFn activation;
1609 float cellClip;
1610 float projClip;
1611 if (!GetInputActivationFunctionFromTensor<hal_1_2::HalPolicy>(operation, 20, activation, model, data) ||
1612 !GetInputScalar<hal_1_2::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
1613 !GetInputScalar<hal_1_2::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
1614 {
1615 return Fail("%s: Operation has invalid scalar inputs", __func__);
1616 }
1617
1618 // Get the normalization tensors
1619 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
1620 // Used to rescale normalized inputs to activation at input gate.
1621 const ConstTensorPin inputLayerNormWeightsPin =
1622 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1623 23,
1624 model,
1625 data,
1626 g_DontPermute,
1627 nullptr,
1628 true);
1629
1630 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
1631 // Used to rescale normalized inputs to activation at forget gate.
1632 const ConstTensorPin forgetLayerNormWeightsPin =
1633 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1634 24,
1635 model,
1636 data,
1637 g_DontPermute,
1638 nullptr,
1639 true);
1640
1641 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
1642 // Used to rescale normalized inputs to activation at cell gate.
1643 const ConstTensorPin cellLayerNormWeightsPin =
1644 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1645 25,
1646 model,
1647 data,
1648 g_DontPermute,
1649 nullptr,
1650 true);
1651
1652 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
1653 // Used to rescale normalized inputs to activation at output gate.
1654 const ConstTensorPin outputLayerNormWeightsPin =
1655 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1656 26,
1657 model,
1658 data,
1659 g_DontPermute,
1660 nullptr,
1661 true);
1662
1663 // Outputs:
1664 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
1665 // with CIFG, or [batch_size, num_units * 3] without CIFG.
1666 const Operand* scratchBuffer = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1667 if (!scratchBuffer)
1668 {
1669 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
1670 }
1671 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1672 const Operand* outputStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1673 if (!outputStateOut)
1674 {
1675 return Fail("%s: Could not read output 1: outputStateOut", __func__);
1676 }
1677 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1678 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 2, model);
1679 if (!cellStateOut)
1680 {
1681 return Fail("%s: Could not read output 2: cellStateOut", __func__);
1682 }
1683 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1684 // effectively the same as the current “output state (out)” value.
1685 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 3, model);
1686 if (!output)
1687 {
1688 return Fail("%s: Could not read output 3: output", __func__);
1689 }
1690
1691 // set the params structure for the AddLstmLayer call
1692 armnn::LstmInputParams params;
1693 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1694 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1695 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1696 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1697 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1698 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1699 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1700 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1701 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
1702 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
1703 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
1704 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1705 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1706 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1707 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1708 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
1709 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
1710 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
1711 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
1712 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
1713 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
1714
1715 // set the layer descriptor
1716 armnn::LstmDescriptor desc;
1717 desc.m_ActivationFunc = activation;
1718 desc.m_ClippingThresCell = cellClip;
1719 desc.m_ClippingThresProj = projClip;
1720 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
1721 params.m_RecurrentToInputWeights == nullptr ||
1722 params.m_InputGateBias == nullptr);
1723 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
1724 params.m_CellToOutputWeights != nullptr);
1725 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
1726 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
1727 params.m_ForgetLayerNormWeights != nullptr ||
1728 params.m_CellLayerNormWeights != nullptr ||
1729 params.m_OutputLayerNormWeights != nullptr);
1730
1731 // validate the optional input groups
1732 if (desc.m_CifgEnabled &&
1733 (params.m_InputToInputWeights != nullptr ||
1734 params.m_RecurrentToInputWeights != nullptr ||
1735 params.m_InputGateBias != nullptr))
1736 {
1737 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
1738 " and input gate bias must be provided", __func__);
1739 }
1740
1741 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
1742 {
1743 return Fail("%s: projection bias should not be provided without projection weights", __func__);
1744 }
1745
1746 if (desc.m_PeepholeEnabled &&
1747 (params.m_CellToForgetWeights == nullptr ||
1748 params.m_CellToOutputWeights == nullptr ||
1749 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
1750 {
1751 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
1752 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
1753 }
1754
1755 if (desc.m_LayerNormEnabled &&
1756 (params.m_ForgetLayerNormWeights == nullptr ||
1757 params.m_CellLayerNormWeights == nullptr ||
1758 params.m_OutputLayerNormWeights == nullptr ||
1759 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
1760 {
1761 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
1762 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
1763 }
1764
1765 // Check if the layer is supported
1766 // Inputs
1767 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1768 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
1769 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
1770
1771 // Outputs
1772 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
1773 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
1774 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1775 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1776
Ferran Balaguera4a629a2019-07-30 10:16:13 +01001777 if (IsDynamicTensor(scratchBufferInfo) ||
1778 IsDynamicTensor(outputStateOutInfo) ||
1779 IsDynamicTensor(cellStateOutInfo) ||
1780 IsDynamicTensor(outputInfo))
1781 {
1782 return Fail("%s: Dynamic output tensors are not supported", __func__);
1783 }
1784
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001785 // Basic parameters
1786 armnn::LstmInputParamsInfo paramsInfo;
1787 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1788 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1789 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1790 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1791 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1792 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1793 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1794 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1795 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1796
1797 // Optional parameters
1798 if(!desc.m_CifgEnabled)
1799 {
1800 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1801 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1802 if (params.m_CellToInputWeights != nullptr)
1803 {
1804 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1805 }
1806 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1807 }
1808
1809 if(desc.m_ProjectionEnabled)
1810 {
1811 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
1812 if (params.m_ProjectionBias != nullptr)
1813 {
1814 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
1815 }
1816 }
1817
1818 if(desc.m_PeepholeEnabled)
1819 {
1820 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1821 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1822 }
1823
1824 if (desc.m_LayerNormEnabled)
1825 {
1826 if(!desc.m_CifgEnabled)
1827 {
1828 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
1829 }
1830 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
1831 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
1832 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
1833 }
1834
1835 bool isSupported = false;
1836 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1837 IsLstmSupported,
1838 data.m_Backends,
1839 isSupported,
1840 inputInfo,
1841 outputStateInInfo,
1842 cellStateInInfo,
1843 scratchBufferInfo,
1844 outputStateOutInfo,
1845 cellStateOutInfo,
1846 outputInfo,
1847 desc,
1848 paramsInfo);
1849 if (!isSupported)
1850 {
1851 return false;
1852 }
1853
1854 // Add the layer
1855 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1856
1857 input.Connect(layer->GetInputSlot(0));
1858 outputStateIn.Connect(layer->GetInputSlot(1));
1859 cellStateIn.Connect(layer->GetInputSlot(2));
1860
1861 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1862 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1863 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1864 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 3, *layer, 3, model, data));
1865}
1866
Sadik Armagan701d9a02019-09-04 15:16:18 +01001867bool HalPolicy::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
1868{
1869 ALOGV("hal_1_2::HalPolicy::ConvertSqrt()");
1870 armnn::ActivationDescriptor desc;
1871 desc.m_Function = armnn::ActivationFunction::Sqrt;
1872
1873 return ::ConvertToActivation<hal_1_2::HalPolicy>(operation, __func__, desc, model, data);
1874}
1875
Mike Kelly46272802019-08-14 17:00:48 +01001876bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
1877{
Sadik Armagan701d9a02019-09-04 15:16:18 +01001878 ALOGV("hal_1_2::HalPolicy::ConvertSqueeze()");
Mike Kelly46272802019-08-14 17:00:48 +01001879 return ::ConvertSqueeze<hal_1_2::HalPolicy>(operation, model, data);
1880}
1881
1882bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
1883{
Sadik Armagan701d9a02019-09-04 15:16:18 +01001884 ALOGV("hal_1_2::HalPolicy::ConvertStridedSlice()");
Mike Kelly46272802019-08-14 17:00:48 +01001885 return ::ConvertStridedSlice<hal_1_2::HalPolicy>(operation, model, data);
1886}
1887
1888bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
1889{
Sadik Armagan701d9a02019-09-04 15:16:18 +01001890 ALOGV("hal_1_2::HalPolicy::ConvertTranspose()");
Mike Kelly46272802019-08-14 17:00:48 +01001891 return ::ConvertTranspose<hal_1_2::HalPolicy>(operation, model, data);
1892}
1893
Aron Virginas-Tar8b991682019-07-31 12:54:59 +01001894bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
David Monahan613b49c2019-06-27 11:37:47 +01001895{
1896 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1897
1898 if (!input.IsValid())
1899 {
1900 return Fail("%s: Operation has invalid inputs", __func__);
1901 }
1902
1903 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1904
1905 if (!output)
1906 {
1907 return Fail("%s: Could not read output 0", __func__);
1908 }
1909
1910 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1911 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1912 if (IsDynamicTensor(outputInfo))
1913 {
1914 return Fail("%s: Dynamic output tensors are not supported", __func__);
1915 }
1916
1917 // ArmNN does not currently support non-fixed weights or bias
1918 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1919 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1920
1921 if (weightsOperand == nullptr)
1922 {
1923 return Fail("%s: Operand is invalid", __func__);
1924 }
1925 armnn::TransposeConvolution2dDescriptor desc;
1926 desc.m_DataLayout = armnn::DataLayout::NHWC;
1927
1928 // Determine whether padding is implicit or explicit
1929 bool implicitPadding = operation.inputs.size() == 9;
1930
1931 if (implicitPadding )
1932 {
1933 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 8, model, data);
1934 }
1935 else
1936 {
1937 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
1938 }
1939
1940 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1941 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1942 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1943
1944 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
1945
1946 // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
1947 // We have to permute it to OIHW if the data layout is NCHW.
1948 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
1949 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
1950 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
1951
1952 // Bias is a 1D tensor
1953 const ConstTensorPin biasPin =
1954 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1955
1956 if (!weightsPin.IsValid())
1957 {
1958 return Fail("%s: Operation has invalid weights", __func__);
1959 }
1960
1961 if (!biasPin.IsValid())
1962 {
1963 return Fail("%s: Operation has invalid biases", __func__);
1964 }
1965
1966 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1967 armnn::ConstTensor bias = biasPin.GetConstTensor();
1968 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1969
1970 ActivationFn activation;
1971
1972 if (implicitPadding)
1973 {
Sadik Armagan3e3003e2019-08-13 12:54:34 +01001974 int32_t strideX{0};
1975 int32_t strideY{0};
1976 int32_t padLeft{0};
1977 int32_t padRight{0};
1978 int32_t padTop{0};
1979 int32_t padBottom{0};
1980
David Monahan613b49c2019-06-27 11:37:47 +01001981 android::nn::PaddingScheme paddingScheme;
1982 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 4, paddingScheme, model, data) ||
Sadik Armagan3e3003e2019-08-13 12:54:34 +01001983 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, strideX, model, data) ||
1984 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, strideY, model, data) ||
David Monahan613b49c2019-06-27 11:37:47 +01001985 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data))
1986 {
1987 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1988 }
1989
1990 const uint32_t kernelX = weights.GetShape()[widthIndex];
1991 const uint32_t kernelY = weights.GetShape()[heightIndex];
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +01001992 const uint32_t outputX = outputInfo.GetShape()[widthIndex];
1993 const uint32_t outputY = outputInfo.GetShape()[heightIndex];
David Monahan613b49c2019-06-27 11:37:47 +01001994
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +01001995 CalcPaddingTransposeConv(outputX, kernelX, desc.m_StrideX, padLeft, padRight, paddingScheme);
1996 CalcPaddingTransposeConv(outputY, kernelY, desc.m_StrideY, padTop, padBottom, paddingScheme);
1997
1998 // NOTE: The Android NN API allows for negative padding values in TransposeConv2d,
1999 // but Arm NN only supports values >= 0
2000 if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
2001 {
2002 return Fail("%s: Negative padding values are not supported", __func__);
2003 }
2004
Sadik Armagan3e3003e2019-08-13 12:54:34 +01002005 desc.m_StrideX = boost::numeric_cast<uint32_t>(strideX);
2006 desc.m_StrideY = boost::numeric_cast<uint32_t>(strideY);
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +01002007 desc.m_PadLeft = boost::numeric_cast<uint32_t>(padLeft);
2008 desc.m_PadRight = boost::numeric_cast<uint32_t>(padRight);
2009 desc.m_PadTop = boost::numeric_cast<uint32_t>(padTop);
2010 desc.m_PadBottom = boost::numeric_cast<uint32_t>(padBottom);
David Monahan613b49c2019-06-27 11:37:47 +01002011 }
2012 else if (operation.inputs.size() == 11)
2013 {
2014 // explicit padding
2015 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
2016 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
2017 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
2018 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
2019 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
2020 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
2021 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data))
2022 {
2023 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
2024 }
2025 }
2026 else
2027 {
2028 return Fail("%s: Unsupported number of operation inputs", __func__);
2029 }
2030
2031 desc.m_BiasEnabled = true;
2032 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
2033
2034 bool isSupported = false;
2035 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2036 IsTransposeConvolution2dSupported,
2037 data.m_Backends,
2038 isSupported,
2039 inputInfo,
2040 outputInfo,
2041 desc,
2042 weights.GetInfo(),
2043 biases);
2044 if (!isSupported)
2045 {
2046 return false;
2047 }
2048
2049 armnn::IConnectableLayer* startLayer =
2050 data.m_Network->AddTransposeConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
2051 if (!startLayer)
2052 {
2053 return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
2054 }
2055
2056 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
2057 if (!endLayer)
2058 {
2059 return Fail("%s: ProcessActivation failed", __func__);
2060 }
2061
2062 input.Connect(startLayer->GetInputSlot(0));
2063
2064 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
2065}
2066
Mike Kellyb5fdf382019-06-11 16:35:25 +01002067} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01002068} // namespace armnn_driver