blob: 55df9dab8b9dcad20889b6b286f0980f08a638b4 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010010#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010011#include <Half.hpp>
Narumol Prangnawarat85f96542019-09-12 16:26:29 +010012#include <TensorUtils.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013
Teresa Charlin8f6429d2019-10-01 13:10:15 +010014#include <armnn/TypesUtils.hpp>
15
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010016#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
Teresa Charlin8f6429d2019-10-01 13:10:15 +010023using namespace armnn;
24
Mike Kellyb5fdf382019-06-11 16:35:25 +010025bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
26{
Mike Kellyb5fdf382019-06-11 16:35:25 +010027 switch (operation.type)
28 {
Kevin May407718f2019-09-09 14:46:41 +010029 case V1_2::OperationType::ABS:
30 return ConvertAbs(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010031 case V1_2::OperationType::ADD:
32 return ConvertAdd(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +010033 case V1_2::OperationType::AVERAGE_POOL_2D:
34 return ConvertAveragePool2d(operation, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +010035 case V1_2::OperationType::BATCH_TO_SPACE_ND:
36 return ConvertBatchToSpaceNd(operation, model, data);
Mike Kellyb8805202019-07-31 17:25:43 +010037 case V1_2::OperationType::CONCATENATION:
38 return ConvertConcatenation(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +010039 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +010040 return ConvertConv2d(operation, model, data);
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +010041 case V1_2::OperationType::DEPTH_TO_SPACE:
42 return ConvertDepthToSpace(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +010043 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +010044 return ConvertDepthwiseConv2d(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010045 case V1_2::OperationType::DEQUANTIZE:
46 return ConvertDequantize(operation, model, data);
47 case V1_2::OperationType::DIV:
48 return ConvertDiv(operation, model, data);
Narumol Prangnawarat85f96542019-09-12 16:26:29 +010049 case V1_2::OperationType::EXPAND_DIMS:
50 return ConvertExpandDims(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010051 case V1_2::OperationType::FLOOR:
52 return ConvertFloor(operation, model, data);
53 case V1_2::OperationType::FULLY_CONNECTED:
54 return ConvertFullyConnected(operation, model, data);
Teresa Charlin8f6429d2019-10-01 13:10:15 +010055 case V1_2::OperationType::GROUPED_CONV_2D:
56 return ConvertGroupedConv2d(operation, model, data);
Aron Virginas-Tara2a73802019-10-09 15:30:40 +010057 case V1_2::OperationType::INSTANCE_NORMALIZATION:
58 return ConvertInstanceNormalization(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010059 case V1_2::OperationType::L2_NORMALIZATION:
60 return ConvertL2Normalization(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +010061 case V1_2::OperationType::L2_POOL_2D:
62 return ConvertL2Pool2d(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010063 case V1_2::OperationType::LOCAL_RESPONSE_NORMALIZATION:
64 return ConvertLocalResponseNormalization(operation, model, data);
65 case V1_2::OperationType::LOGISTIC:
66 return ConvertLogistic(operation, model, data);
Aron Virginas-Tar75e67792019-10-15 13:33:03 +010067 case V1_2::OperationType::LOG_SOFTMAX:
68 return ConvertLogSoftmax(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010069 case V1_2::OperationType::LSTM:
70 return ConvertLstm(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +010071 case V1_2::OperationType::MAX_POOL_2D:
72 return ConvertMaxPool2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +010073 case V1_2::OperationType::MAXIMUM:
74 return ConvertMaximum(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010075 case V1_2::OperationType::MEAN:
76 return ConvertMean(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +010077 case V1_2::OperationType::MINIMUM:
78 return ConvertMinimum(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010079 case V1_2::OperationType::MUL:
80 return ConvertMul(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +010081 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +010082 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010083 case V1_2::OperationType::PAD_V2:
84 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +010085 case V1_2::OperationType::PRELU:
86 return ConvertPrelu(operation, model, data);
Sadik Armagan5a476a82019-07-30 09:43:18 +010087 case V1_2::OperationType::QUANTIZE:
88 return ConvertQuantize(operation, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +010089 case V1_2::OperationType::QUANTIZED_16BIT_LSTM:
90 return ConvertQuantizedLstm(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +010091 case V1_2::OperationType::RELU:
92 return ConvertReLu(operation, model, data);
93 case V1_2::OperationType::RELU1:
94 return ConvertReLu1(operation, model, data);
95 case V1_2::OperationType::RELU6:
96 return ConvertReLu6(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +010097 case V1_2::OperationType::RESHAPE:
98 return ConvertReshape(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +010099 case V1_2::OperationType::RESIZE_BILINEAR:
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100100 return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100101 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100102 return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
Aron Virginas-Tarfa6544e2019-09-10 14:42:22 +0100103 case V1_2::OperationType::RSQRT:
104 return ConvertRsqrt(operation, model, data);
Sadik Armagan701d9a02019-09-04 15:16:18 +0100105 case V1_2::OperationType::SQRT:
106 return ConvertSqrt(operation, model, data);
Mike Kelly46272802019-08-14 17:00:48 +0100107 case V1_2::OperationType::SQUEEZE:
108 return ConvertSqueeze(operation, model, data);
109 case V1_2::OperationType::STRIDED_SLICE:
110 return ConvertStridedSlice(operation, model, data);
111 case V1_2::OperationType::TRANSPOSE:
112 return ConvertTranspose(operation, model, data);
David Monahan613b49c2019-06-27 11:37:47 +0100113 case V1_2::OperationType::TRANSPOSE_CONV_2D:
Aron Virginas-Tar8b991682019-07-31 12:54:59 +0100114 return ConvertTransposeConv2d(operation, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100115 case V1_2::OperationType::SOFTMAX:
116 return ConvertSoftmax(operation, model, data);
Finn Williamsd74c5052019-07-30 17:06:00 +0100117 case V1_2::OperationType::SPACE_TO_BATCH_ND :
118 return ConvertSpaceToBatchNd(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100119 case V1_2::OperationType::SPACE_TO_DEPTH:
120 return ConvertSpaceToDepth(operation, model, data);
Mike Kelly0a879362019-07-29 16:56:31 +0100121 case V1_2::OperationType::SUB:
122 return ConvertSub(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100123 case V1_2::OperationType::TANH:
124 return ConvertTanH(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100125 default:
126 return Fail("%s: Operation type %s not supported in ArmnnDriver",
127 __func__, toString(operation.type).c_str());
128 }
129}
130
Kevin May407718f2019-09-09 14:46:41 +0100131bool HalPolicy::ConvertAbs(const Operation& operation, const Model& model, ConversionData& data)
132{
133 ALOGV("hal_1_2::HalPolicy::ConvertAbs()");
134 return ::ConvertAbs<hal_1_2::HalPolicy>(operation, model, data);
135}
136
Mike Kelly46272802019-08-14 17:00:48 +0100137bool HalPolicy::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
138{
139 ALOGV("hal_1_2::HalPolicy::ConvertAdd()");
140 return ::ConvertAdd<hal_1_2::HalPolicy>(operation, model, data);
141}
142
Sadik Armagan15d63e22019-07-26 16:59:35 +0100143bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
144{
145 ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100146 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, PoolingAlgorithm::Average, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +0100147}
148
Finn Williams23b87b32019-07-30 11:44:05 +0100149bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
150{
151 ALOGV("hal_1_2::HalPolicy::ConvertBatchToSpaceNd()");
152 return ::ConvertBatchToSpaceNd<hal_1_2::HalPolicy>(operation, model, data);
153}
154
Mike Kellyb8805202019-07-31 17:25:43 +0100155bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
156{
157 ALOGV("hal_1_2::HalPolicy::ConvertConcatenation()");
158 return ::ConvertConcatenation<hal_1_2::HalPolicy>(operation, model, data);
159}
160
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100161bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
162{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100163 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
164
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100165 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
166 if (!input.IsValid())
167 {
168 return Fail("%s: Operation has invalid inputs", __func__);
169 }
170
171 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
172 if (!output)
173 {
174 return Fail("%s: Could not read output 0", __func__);
175 }
176
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100177 const TensorInfo& inputInfo = input.GetTensorInfo();
178 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100179
180 if (IsDynamicTensor(outputInfo))
181 {
182 return Fail("%s: Dynamic output tensors are not supported", __func__);
183 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100184
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100185 Convolution2dDescriptor desc;
186 desc.m_DataLayout = DataLayout::NHWC;
Mike Kellye1d60bb2019-07-11 11:44:52 +0100187
188 // Determine whether padding is implicit or explicit
189 bool implicitPadding = operation.inputs.size() == 7 ||
190 (operation.inputs.size() >= 8 &&
191 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
192
193 if (implicitPadding)
194 {
195 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
196 }
197 else if (operation.inputs.size() >= 10)
198 {
199 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
200 }
201
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100202 const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
Mike Kellye1d60bb2019-07-11 11:44:52 +0100203
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100204 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100205 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
206 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
207 // the DataLayout is NCHW
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100208 const ConstTensorPin weightsPin = (desc.m_DataLayout == DataLayout::NCHW) ?
Mike Kellye1d60bb2019-07-11 11:44:52 +0100209 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
210 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100211 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100212 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100213
214 if (!weightsPin.IsValid())
215 {
216 return Fail("%s: Operation has invalid weights", __func__);
217 }
218
219 if (!biasPin.IsValid())
220 {
221 return Fail("%s: Operation has invalid biases", __func__);
222 }
223
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100224 ConstTensor weights = weightsPin.GetConstTensor();
225 ConstTensor bias = biasPin.GetConstTensor();
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100226 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
227
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100228 ActivationFn activation;
229
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100230 if (implicitPadding)
231 {
232 android::nn::PaddingScheme paddingScheme;
233 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
234 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
235 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
236 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
237 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
238 {
239 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
240 }
241
Mike Kellye1d60bb2019-07-11 11:44:52 +0100242 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
243 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
244 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
245 const uint32_t kernelX = weights.GetShape()[widthIndex];
246 const uint32_t kernelY = weights.GetShape()[heightIndex];
247 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
248 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100249
Mike Kelly86b36d42019-07-12 16:39:33 +0100250 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
251 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100252
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100253 }
254 else if (operation.inputs.size() >= 10)
255 {
256 // explicit padding
257 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
258 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
259 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
260 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
261 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
262 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
263 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
264 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
265 {
266 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
267 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100268 }
269 else
270 {
271 return Fail("%s: Unsupported number of operation inputs", __func__);
272 }
273
274 desc.m_BiasEnabled = true;
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100275 Optional<TensorInfo> biases(bias.GetInfo());
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100276
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100277 bool isSupported = false;
278 FORWARD_LAYER_SUPPORT_FUNC(__func__,
279 IsConvolution2dSupported,
280 data.m_Backends,
281 isSupported,
282 inputInfo,
283 outputInfo,
284 desc,
285 weights.GetInfo(),
286 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100287
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100288 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100289 {
290 return false;
291 }
292
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100293 IConnectableLayer* startLayer =
294 data.m_Network->AddConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100295
296 if (!startLayer)
297 {
298 return Fail("%s: AddConvolution2dLayer failed", __func__);
299 }
300
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100301 IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100302
303 if (!endLayer)
304 {
305 return Fail("%s: ProcessActivation failed", __func__);
306 }
307
308 input.Connect(startLayer->GetInputSlot(0));
309
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100310 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100311}
312
Aron Virginas-Tar8edb16d2019-10-01 13:34:59 +0100313bool HalPolicy::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
314{
315 ALOGV("hal_1_2::HalPolicy::ConvertDepthToSpace()");
316 return ::ConvertDepthToSpace<hal_1_2::HalPolicy>(operation, model, data);
317}
318
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100319bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
320{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100321 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
322
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100323 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
324
325 if (!input.IsValid())
326 {
327 return Fail("%s: Operation has invalid inputs", __func__);
328 }
329
330 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
331
332 if (!output)
333 {
334 return Fail("%s: Could not read output 0", __func__);
335 }
336
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100337 const TensorInfo& inputInfo = input.GetTensorInfo();
338 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100339
340 if (IsDynamicTensor(outputInfo))
341 {
342 return Fail("%s: Dynamic output tensors are not supported", __func__);
343 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100344
345 // ArmNN does not currently support non-fixed weights or bias
346 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
347 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
348
349 if (weightsOperand == nullptr)
350 {
351 return Fail("%s: Operand is invalid", __func__);
352 }
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100353 DepthwiseConvolution2dDescriptor desc;
354 desc.m_DataLayout = DataLayout::NHWC;
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100355
356 // Determine whether padding is implicit or explicit
357 bool implicitPadding = operation.inputs.size() == 8 ||
358 (operation.inputs.size() >= 9 &&
359 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
360
361 // Look ahead to find the optional DataLayout, if present
362 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
363 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
364
365 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
366 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
367 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
368 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
369
370 // Reinterpret weight data as [ H, W, I, M ]
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100371 TensorShape weightsShape({ weightsOperand->dimensions[1],
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100372 weightsOperand->dimensions[2],
373 inputInfo.GetShape()[channelsIndex],
374 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
375
376 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100377 const PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100378
379 const ConstTensorPin weightsPin =
380 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
381 1,
382 model,
383 data,
384 HWIMToMIHW,
385 &weightsShape);
386
387 // Bias is a 1D tensor
388 const ConstTensorPin biasPin =
389 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
390
391 if (!weightsPin.IsValid())
392 {
393 return Fail("%s: Operation has invalid weights", __func__);
394 }
395
396 if (!biasPin.IsValid())
397 {
398 return Fail("%s: Operation has invalid biases", __func__);
399 }
400
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100401 ConstTensor weights = weightsPin.GetConstTensor();
402 ConstTensor bias = biasPin.GetConstTensor();
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100403 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
404
405 ActivationFn activation;
406
407 if (implicitPadding)
408 {
409 android::nn::PaddingScheme paddingScheme;
410 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
411 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
412 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
413 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
414 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
415 {
416 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
417 }
418
419 const uint32_t kernelX = weights.GetShape()[3];
420 const uint32_t kernelY = weights.GetShape()[2];
421 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
422 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
423
Mike Kelly86b36d42019-07-12 16:39:33 +0100424 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
425 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100426 }
427 else if (operation.inputs.size() >= 11)
428 {
429 // explicit padding
430 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
431 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
432 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
433 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
434 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
435 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
436 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
437 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
438 {
439 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
440 }
441 }
442 else
443 {
444 return Fail("%s: Unsupported number of operation inputs", __func__);
445 }
446
447 desc.m_BiasEnabled = true;
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100448 Optional<TensorInfo> biases(bias.GetInfo());
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100449
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100450 bool isSupported = false;
451 FORWARD_LAYER_SUPPORT_FUNC(__func__,
452 IsDepthwiseConvolutionSupported,
453 data.m_Backends,
454 isSupported,
455 inputInfo,
456 outputInfo,
457 desc,
458 weights.GetInfo(),
459 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100460
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100461 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100462 {
463 return false;
464 }
465
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100466 IConnectableLayer* startLayer =
467 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100468
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100469 if (!startLayer)
470 {
471 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
472 }
473
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100474 IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100475 if (!endLayer)
476 {
477 return Fail("%s: ProcessActivation failed", __func__);
478 }
479
480 input.Connect(startLayer->GetInputSlot(0));
481
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100482 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100483}
484
Mike Kelly46272802019-08-14 17:00:48 +0100485bool HalPolicy::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
486{
487 ALOGV("hal_1_2::HalPolicy::ConvertDequantize()");
488 return ::ConvertDequantize<hal_1_2::HalPolicy>(operation, model, data);
489}
490
491bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
492{
493 ALOGV("hal_1_2::HalPolicy::ConvertDiv()");
494 return ::ConvertDiv<hal_1_2::HalPolicy>(operation, model, data);
495}
496
Narumol Prangnawarat85f96542019-09-12 16:26:29 +0100497bool HalPolicy::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
498{
499 ALOGV("hal_1_2::HalPolicy::ConvertExpandDims()");
500
501 LayerInputHandle input = ConvertToLayerInputHandle<HalPolicy>(operation, 0, model, data);
502
503 if (!input.IsValid())
504 {
505 return Fail("%s: Operation has invalid input", __func__);
506 }
507
508 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
509 if (!output)
510 {
511 return Fail("%s: Operation has invalid output", __func__);
512 }
513
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100514 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Narumol Prangnawarat85f96542019-09-12 16:26:29 +0100515 if (IsDynamicTensor(outputInfo))
516 {
517 return Fail("%s: Dynamic output tensors are not supported", __func__);
518 }
519
520 int32_t axis;
521 if (!GetInputScalar<HalPolicy>(operation, 1, OperandType::INT32, axis, model, data))
522 {
523 return Fail("%s: failed to get axis input value", __func__);
524 }
525
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100526 TensorShape targetShape;
Narumol Prangnawarat85f96542019-09-12 16:26:29 +0100527
528 try
529 {
530 targetShape = armnnUtils::ExpandDims(input.GetTensorInfo().GetShape(), axis);
531 }
532 catch (const std::exception &e)
533 {
534 return Fail("%s: %s", __func__, e.what());
535 }
536
537 if (targetShape != outputInfo.GetShape())
538 {
539 return Fail("%s: Shape of the output operand does not match the resolved expanded shape", __func__);
540 }
541
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100542 ReshapeDescriptor reshapeDescriptor;
Narumol Prangnawarat85f96542019-09-12 16:26:29 +0100543 reshapeDescriptor.m_TargetShape = targetShape;
544
545 bool isSupported = false;
546 FORWARD_LAYER_SUPPORT_FUNC(__func__,
547 IsReshapeSupported,
548 data.m_Backends,
549 isSupported,
550 input.GetTensorInfo(),
551 reshapeDescriptor);
552
553 if (!isSupported)
554 {
555 return false;
556 }
557
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100558 IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Narumol Prangnawarat85f96542019-09-12 16:26:29 +0100559 assert(layer != nullptr);
560 input.Connect(layer->GetInputSlot(0));
561
562 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
563}
564
Mike Kelly46272802019-08-14 17:00:48 +0100565bool HalPolicy::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
566{
567 ALOGV("hal_1_2::HalPolicy::ConvertFloor()");
568 return ::ConvertFloor<hal_1_2::HalPolicy>(operation, model, data);
569}
570
571bool HalPolicy::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
572{
573 ALOGV("hal_1_2::HalPolicy::ConvertFullyConnected()");
574 return ::ConvertFullyConnected<hal_1_2::HalPolicy>(operation, model, data);
575}
576
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100577bool HalPolicy::ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data)
578{
579 ALOGV("hal_1_2::HalPolicy::ConvertGroupedConv2d()");
580
581 //
582 // Parse data
583 //
584 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
585 if (!input.IsValid())
586 {
587 return Fail("%s: Operation has invalid inputs", __func__);
588 }
589 const TensorInfo& inputInfo = input.GetTensorInfo();
590
591 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
592 if (!output)
593 {
594 return Fail("%s: Could not read output 0", __func__);
595 }
596 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
597 if (IsDynamicTensor(outputInfo))
598 {
599 return Fail("%s: Dynamic output tensors are not supported", __func__);
600 }
601
602 // Look ahead to determine data layout
603 DataLayout dataLayout = DataLayout::NHWC;
604 if (operation.inputs.size() == 12)
605 {
606 dataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 11, model, data);
607 }
608 else
609 {
610 dataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 8, model, data);
611 }
612
613 // NOTE:
614 // NNAPI weights are always OHWI, i.e. [depth_out, filter_height, filter_width, depth_group],
615 // but Arm NN expects the filter's height and width indices to match the input's height and
616 // width indices so when the DataLayout is NCHW, we need to permute the weights to OIHW
617 const PermutationVector ohwiToOihw = { 0u, 2u, 3u, 1u };
618 const ConstTensorPin weightsPin = (dataLayout == DataLayout::NCHW) ?
619 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, ohwiToOihw) :
620 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
621 const ConstTensorPin biasesPin =
622 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
623 if (!weightsPin.IsValid() || !biasesPin.IsValid())
624 {
625 return Fail("%s: Operation has invalid inputs", __func__);
626 }
627
628 ConstTensor weights = weightsPin.GetConstTensor();
629 ConstTensor biases = biasesPin.GetConstTensor();
630 SanitizeBiasQuantizationScale(biases.GetInfo(), weights.GetInfo(), inputInfo);
631
632 const TensorShape& inputShape = inputInfo.GetShape();
633 const TensorShape& outputShape = outputInfo.GetShape();
634 const TensorShape& weightsShape = weights.GetShape();
635 const TensorShape& biasesShape = biases.GetShape();
636
637 armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
638 const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
639 const unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
640 const unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
641
642 Convolution2dDescriptor desc;
643 desc.m_DataLayout = dataLayout;
644 desc.m_BiasEnabled = true;
645
646 int numGroups;
647 ActivationFn activation;
648
649 if (operation.inputs.size() == 12)
650 {
651 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
652 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
653 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
654 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
655 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
656 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
657 !GetInputScalar<hal_1_2::HalPolicy>(operation, 9, OperandType::INT32, numGroups, model, data) ||
658 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data))
659 {
660 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
661 }
662
663 }
664 else if (operation.inputs.size() == 9)
665 {
666 android::nn::PaddingScheme paddingScheme;
667 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
668 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
669 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
670 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, numGroups, model, data) ||
671 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data))
672 {
673 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
674 }
675
676 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
677 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
678
679 const uint32_t kernelX = weightsShape[widthIndex];
680 const uint32_t kernelY = weightsShape[heightIndex];
681
682 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
683 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
684 }
685 else
686 {
687 return Fail("%s: Unsupported number of operation inputs", __func__);
688 }
689
690 const unsigned int outputChannels = outputShape[channelsIndex];
691
692 const unsigned int channelsPerGroup = weightsShape[channelsIndex];
693 const unsigned int channelMultiplier = outputChannels / numGroups;
694
695 //
696 // Validate all relevant inputs
697 //
698 if (numGroups <= 0)
699 {
700 return Fail("%s: Number of groups must be greater than 0. Got: %d", __func__, numGroups);
701 }
702
703 if (outputChannels % numGroups != 0u)
704 {
705 return Fail("%s: Output channels must be divisible by the number of groups", __func__);
706 }
707
708 //
709 // Set up Splitter layer
710 //
711 unsigned int splitterDimSizes[4] = { inputShape[0], inputShape[1], inputShape[2], inputShape[3] };
712 splitterDimSizes[channelsIndex] /= numGroups; // split in depth
713
714 TensorInfo splitterOutputInfo(4,
715 splitterDimSizes,
716 inputInfo.GetDataType(),
717 inputInfo.GetQuantizationScale(),
718 inputInfo.GetQuantizationOffset());
719
720 std::vector<std::reference_wrapper<TensorInfo>> splitterOutputInfos(numGroups, std::ref(splitterOutputInfo));
721
722 ViewsDescriptor splitterDesc(numGroups);
723 for (unsigned int group = 0u; group < numGroups; ++group)
724 {
725 splitterDesc.SetViewOriginCoord(group, channelsIndex, splitterDimSizes[channelsIndex] * group);
726 for (unsigned int dimIdx = 0u; dimIdx < 4u; dimIdx++)
727 {
728 splitterDesc.SetViewSize(group, dimIdx, splitterDimSizes[dimIdx]);
729 }
730 }
731
732 bool isSupported = false;
733 FORWARD_LAYER_SUPPORT_FUNC(__func__,
734 IsSplitterSupported,
735 data.m_Backends,
736 isSupported,
737 inputInfo,
738 splitterOutputInfos,
739 splitterDesc);
740 if (!isSupported)
741 {
742 return false;
743 }
744
745 IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
746 if (!splitterLayer)
747 {
748 return Fail("%s: Failed to add SplitterLayer", __func__);
749 }
750
751 input.Connect(splitterLayer->GetInputSlot(0));
752 for (unsigned int group = 0u; group < splitterLayer->GetNumOutputSlots(); ++group)
753 {
754 splitterLayer->GetOutputSlot(group).SetTensorInfo(splitterOutputInfo);
755 }
756
757 //
758 // Set up Convolution2d layers for each group
759 //
760 TensorShape groupInputShape(inputShape);
761 groupInputShape[channelsIndex] = channelsPerGroup;
762
763 TensorShape groupOutputShape(outputShape);
764 groupOutputShape[channelsIndex] = 1;
765
766 TensorShape groupWeightsShape(weightsShape);
767 groupWeightsShape[0] /= channelMultiplier * numGroups;
768
769 TensorShape groupBiasesShape({ 1 });
770
771 const TensorInfo groupInputInfo (groupInputShape,
772 inputInfo.GetDataType(),
773 inputInfo.GetQuantizationScale(),
774 inputInfo.GetQuantizationOffset());
775 const TensorInfo groupWeightsInfo(groupWeightsShape,
776 weights.GetInfo().GetDataType(),
777 weights.GetInfo().GetQuantizationScale(),
778 weights.GetInfo().GetQuantizationOffset());
779 const TensorInfo groupBiasesInfo (groupBiasesShape,
780 biases.GetInfo().GetDataType(),
781 biases.GetInfo().GetQuantizationScale(),
782 biases.GetInfo().GetQuantizationOffset());
783 const TensorInfo groupOutputInfo (groupOutputShape,
784 outputInfo.GetDataType(),
785 outputInfo.GetQuantizationScale(),
786 outputInfo.GetQuantizationOffset());
787
788 const unsigned int weightsDataTypeSize = GetDataTypeSize(groupWeightsInfo.GetDataType());
789 const unsigned int biasesDataTypeSize = GetDataTypeSize(groupBiasesInfo.GetDataType());
790
791 std::vector<IConnectableLayer*> convLayers(numGroups*channelMultiplier, nullptr);
792 for (unsigned int group = 0u; group < numGroups; ++group)
793 {
794 for (unsigned int m = 0u; m < channelMultiplier; ++m)
795 {
796 auto index = group * channelMultiplier + m;
797
798 const unsigned int weightsDataOffset = groupWeightsShape.GetNumElements() * index * weightsDataTypeSize;
799 const unsigned int biasesDataOffset = groupBiasesShape.GetNumElements() * index * biasesDataTypeSize;
800
801 // Extract weights and biases data for current group convolution
802 ConstTensor groupWeights(groupWeightsInfo,
803 static_cast<const void *>(reinterpret_cast<const char *>(weights.GetMemoryArea()) +
804 weightsDataOffset));
805 ConstTensor groupBiases(groupBiasesInfo,
806 static_cast<const void *>(reinterpret_cast<const char *>(biases.GetMemoryArea()) +
807 biasesDataOffset));
808
809 isSupported = false;
810 FORWARD_LAYER_SUPPORT_FUNC(__func__,
811 IsConvolution2dSupported,
812 data.m_Backends,
813 isSupported,
814 groupInputInfo,
815 groupOutputInfo,
816 desc,
817 groupWeightsInfo,
818 Optional<TensorInfo>(groupBiasesInfo));
819 if (!isSupported)
820 {
821 return false;
822 }
823
824 IConnectableLayer *convLayer =
825 data.m_Network->AddConvolution2dLayer(desc, groupWeights, Optional<ConstTensor>(groupBiases));
826 if (!convLayer)
827 {
828 return Fail("%s: AddConvolution2dLayer failed", __func__);
829 }
830
831 splitterLayer->GetOutputSlot(group).Connect(convLayer->GetInputSlot(0));
832 convLayer->GetOutputSlot(0).SetTensorInfo(groupOutputInfo);
833
834 convLayers[index] = convLayer;
835 }
836 }
837
838 //
839 // Set up Concat layer
840 //
841 ConcatDescriptor concatDescriptor(outputInfo.GetShape()[channelsIndex]);
842 for (unsigned int group = 0u; group < numGroups; ++group)
843 {
844 for (unsigned int m = 0u; m < channelMultiplier; ++m)
845 {
846 auto index = group * channelMultiplier + m;
847 concatDescriptor.SetViewOriginCoord(index, channelsIndex, index);
848 concatDescriptor.SetConcatAxis(channelsIndex);
849 }
850 }
851
852 isSupported = false;
853 FORWARD_LAYER_SUPPORT_FUNC(__func__,
854 IsConcatSupported,
855 data.m_Backends,
856 isSupported,
857 std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
858 outputInfo,
859 concatDescriptor);
860 if (!isSupported)
861 {
862 return false;
863 }
864
865 IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
866 if (!concatLayer)
867 {
868 return Fail("%s: AddConcatLayer failed", __func__);
869 }
870
871 for (unsigned int group = 0u; group < numGroups; ++group)
872 {
873 for (unsigned int m = 0u; m < channelMultiplier; ++m)
874 {
875 auto index = group * channelMultiplier + m;
876 convLayers[index]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(index));
877 }
878 }
879 concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
880
881 //
882 // Set up Activation layer (if it is set)
883 //
884 IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, concatLayer, data);
885 if (!endLayer)
886 {
887 return Fail("%s: ProcessActivation failed", __func__);
888 }
889
890 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *endLayer, model, data);
891}
892
Aron Virginas-Tara2a73802019-10-09 15:30:40 +0100893bool HalPolicy::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
894{
895 ALOGV("hal_1_2::HalPolicy::ConvertInstanceNormalization()");
896
897 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
898 if (!input.IsValid())
899 {
900 return Fail("%s: Operation has an invalid input 0", __func__);
901 }
902
903 const Operand* output = GetOutputOperand<HalPolicy>(operation, 0, model);
904 if (!output)
905 {
906 return Fail("%s: Operation has an invalid output", __func__);
907 }
908
909 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
910 if (IsDynamicTensor(outputInfo))
911 {
912 return Fail("%s: Dynamic output tensors are not supported", __func__);
913 }
914
915 // Determine data type of input tensor
916 OperandType inputType;
917 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, inputType))
918 {
919 return Fail("%s: Operation has invalid inputs", __func__);
920 }
921
922 InstanceNormalizationDescriptor desc;
923
924 // Read gamma, beta & epsilon
925 if (inputType == OperandType::TENSOR_FLOAT16)
926 {
927 Half fp16Gamma;
928 Half fp16Beta;
929 Half fp16Epsilon;
930
931 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::FLOAT16, fp16Gamma, model, data) ||
932 !GetInputScalar<hal_1_2::HalPolicy>(operation, 2, OperandType::FLOAT16, fp16Beta, model, data) ||
933 !GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::FLOAT16, fp16Epsilon, model, data))
934 {
935 return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
936 }
937
938 desc.m_Gamma = static_cast<float>(fp16Gamma);
939 desc.m_Beta = static_cast<float>(fp16Beta);
940 desc.m_Eps = static_cast<float>(fp16Epsilon);
941 }
942 else if (inputType == OperandType::TENSOR_FLOAT32)
943 {
944 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::FLOAT32, desc.m_Gamma, model, data) ||
945 !GetInputScalar<hal_1_2::HalPolicy>(operation, 2, OperandType::FLOAT32, desc.m_Beta, model, data) ||
946 !GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::FLOAT32, desc.m_Eps, model, data))
947 {
948 return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
949 }
950 }
951 else
952 {
953 return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
954 }
955
956 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 4, model, data);
957
958 bool isSupported = false;
959 FORWARD_LAYER_SUPPORT_FUNC(__func__,
960 IsInstanceNormalizationSupported,
961 data.m_Backends,
962 isSupported,
963 input.GetTensorInfo(),
964 outputInfo,
965 desc);
966 if (!isSupported)
967 {
968 return false;
969 }
970
971 IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
972 input.Connect(layer->GetInputSlot(0));
973
974 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
975}
976
Mike Kelly46272802019-08-14 17:00:48 +0100977bool HalPolicy::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
978{
979 ALOGV("hal_1_2::HalPolicy::ConvertL2Normalization()");
980 return ::ConvertL2Normalization<hal_1_2::HalPolicy>(operation, model, data);
981}
982
Sadik Armagan15d63e22019-07-26 16:59:35 +0100983bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
984{
985 ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
Teresa Charlin8f6429d2019-10-01 13:10:15 +0100986 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, PoolingAlgorithm::L2, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +0100987}
988
Mike Kelly46272802019-08-14 17:00:48 +0100989bool HalPolicy::ConvertLocalResponseNormalization(const Operation& operation,
990 const Model& model,
991 ConversionData& data)
992{
993 ALOGV("hal_1_2::HalPolicy::ConvertLocalResponseNormalization()");
994 return ::ConvertLocalResponseNormalization<hal_1_2::HalPolicy>(operation, model, data);
995}
996
997bool HalPolicy::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
998{
999 ALOGV("hal_1_2::HalPolicy::ConvertLogistic()");
1000 return ::ConvertLogistic<hal_1_2::HalPolicy>(operation, model, data);
1001}
1002
Aron Virginas-Tar75e67792019-10-15 13:33:03 +01001003bool HalPolicy::ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1004{
1005 ALOGV("hal_1_2::HalPolicy::ConvertLogSoftmax()");
1006
1007 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1008 if (!input.IsValid())
1009 {
1010 return Fail("%s: Failed to read input 0", __func__);
1011 }
1012
1013 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1014 if (!output)
1015 {
1016 return Fail("%s: Failed to read output", __func__);
1017 }
1018
1019 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1020 if (IsDynamicTensor(outputInfo))
1021 {
1022 return Fail("%s: Dynamic output tensors are not supported", __func__);
1023 }
1024
1025 // Determine data type of input tensor
1026 OperandType inputType;
1027 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, inputType))
1028 {
1029 return Fail("%s: Operation has invalid inputs", __func__);
1030 }
1031
1032 LogSoftmaxDescriptor descriptor;
1033
1034 // Read beta
1035 if (inputType == OperandType::TENSOR_FLOAT16)
1036 {
1037 Half fp16Beta;
1038 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::FLOAT16, fp16Beta, model, data))
1039 {
1040 return Fail("%s: Failed to read input 1 (FLOAT16)", __func__);
1041 }
1042
1043 descriptor.m_Beta = static_cast<float>(fp16Beta);
1044 }
1045 else if (inputType == OperandType::TENSOR_FLOAT32)
1046 {
1047 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::FLOAT32, descriptor.m_Beta, model, data))
1048 {
1049 return Fail("%s: Failed to read input 1 (FLOAT32)", __func__);
1050 }
1051 }
1052 else
1053 {
1054 return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
1055 }
1056
1057 // Read axis
1058 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_Axis, model, data))
1059 {
1060 return Fail("%s: Failed to read input 2", __func__);
1061 }
1062
1063 bool isSupported = false;
1064 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1065 IsLogSoftmaxSupported,
1066 data.m_Backends,
1067 isSupported,
1068 input.GetTensorInfo(),
1069 outputInfo,
1070 descriptor);
1071 if (!isSupported)
1072 {
1073 return false;
1074 }
1075
1076 armnn::IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
1077 if (!layer)
1078 {
1079 return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
1080 }
1081
1082 input.Connect(layer->GetInputSlot(0));
1083
1084 return SetupAndTrackLayerOutputSlot<HalPolicy>(operation, 0, *layer, model, data);
1085}
1086
Sadik Armagan15d63e22019-07-26 16:59:35 +01001087bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
1088{
1089 ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001090 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, PoolingAlgorithm::Max, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +01001091}
1092
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +01001093bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
1094{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001095 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
1096
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +01001097 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1098 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
1099
1100 if (!input0.IsValid() || !input1.IsValid())
1101 {
1102 return Fail("%s: Operation has invalid inputs", __func__);
1103 }
1104
1105 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1106 if (!outputOperand)
1107 {
1108 return Fail("%s: Could not read output", __func__);
1109 }
1110
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001111 const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001112 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +01001113 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001114 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +01001115 }
1116
Aron Virginas-Tard7593232019-07-16 13:17:06 +01001117 bool isSupported = false;
1118 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1119 IsMaximumSupported,
1120 data.m_Backends,
1121 isSupported,
1122 input0.GetTensorInfo(),
1123 input1.GetTensorInfo(),
1124 outInfo);
1125
1126 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +01001127 {
1128 return false;
1129 }
1130
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001131 IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +01001132 assert(layer != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001133 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
1134 if (!isReshapeSupported)
1135 {
1136 return false;
1137 }
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +01001138
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001139 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +01001140}
1141
Mike Kelly46272802019-08-14 17:00:48 +01001142bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
1143{
1144 ALOGV("hal_1_2::HalPolicy::ConvertMean()");
1145 return ::ConvertMean<hal_1_2::HalPolicy>(operation, model, data);
1146}
1147
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +01001148bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
1149{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001150 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
1151
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +01001152 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1153 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
1154
1155 if (!input0.IsValid() || !input1.IsValid())
1156 {
1157 return Fail("%s: Operation has invalid inputs", __func__);
1158 }
1159
1160 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1161 if (!output)
1162 {
1163 return Fail("%s: Could not read output 0", __func__);
1164 }
1165
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001166 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001167 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +01001168 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001169 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +01001170 }
1171
1172 bool isSupported = false;
1173 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1174 IsMinimumSupported,
1175 data.m_Backends,
1176 isSupported,
1177 input0.GetTensorInfo(),
1178 input1.GetTensorInfo(),
1179 outputInfo);
1180
1181 if (!isSupported)
1182 {
1183 return false;
1184 }
1185
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001186 IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +01001187 assert(layer != nullptr);
Sadik Armagan64b19b52019-08-19 09:49:58 +01001188 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
1189 if (!isReshapeSupported)
1190 {
1191 return false;
1192 }
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +01001193
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001194 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +01001195}
1196
Mike Kelly46272802019-08-14 17:00:48 +01001197bool HalPolicy::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
1198{
1199 ALOGV("hal_1_2::HalPolicy::ConvertMul()");
1200 return ::ConvertMul<hal_1_2::HalPolicy>(operation, model, data);
1201}
1202
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +01001203bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
1204{
1205 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
1206 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
1207}
1208
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001209bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
1210{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001211 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
1212
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001213 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1214 if (!input.IsValid())
1215 {
1216 return Fail("%s: Could not read input 0", __func__);
1217 }
1218
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01001219 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1220 if (!output)
1221 {
1222 return Fail("%s: Could not read output", __func__);
1223 }
1224
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001225 const TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001226 unsigned int rank = inputInfo.GetNumDimensions();
1227
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001228 PadDescriptor descriptor;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001229 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
1230 {
1231 return Fail("%s: Could not convert paddings", __func__);
1232 }
1233
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001234 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001235 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +01001236 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001237 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +01001238 }
1239
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001240 // Determine type of padding value
1241 OperandType operandType0;
1242 OperandType operandType2;
1243
1244 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
1245 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
1246 {
1247 return Fail("%s: Operation has invalid inputs", __func__);
1248 }
1249
1250 // Read value to use for padding
1251 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
1252 {
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001253 Half f16PadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001254 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
1255 {
1256 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
1257 }
1258
1259 descriptor.m_PadValue = f16PadValue;
1260 }
1261 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
1262 {
1263 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
1264 {
1265 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
1266 }
1267 }
1268 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
1269 {
Mike Kelly3c673942019-07-25 09:26:06 +01001270 int32_t intPadValue = 0;
1271 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001272 {
1273 return Fail("%s: Could not read input 2 (INT32)", __func__);
1274 }
Mike Kelly3c673942019-07-25 09:26:06 +01001275 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001276 }
1277 else
1278 {
1279 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
1280 }
1281
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001282 bool isSupported = false;
1283 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1284 IsPadSupported,
1285 data.m_Backends,
1286 isSupported,
1287 inputInfo,
1288 outputInfo,
1289 descriptor);
1290 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001291 {
1292 return false;
1293 }
1294
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001295 IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001296 assert(layer != nullptr);
1297 input.Connect(layer->GetInputSlot(0));
1298 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1299
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001300 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +01001301}
1302
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001303bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
1304{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001305 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
1306
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001307 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1308 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
1309
1310 if (!input.IsValid() || !alpha.IsValid())
1311 {
1312 return Fail("%s: Operation has invalid inputs", __func__);
1313 }
1314
1315 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1316
1317 if (!output)
1318 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +01001319 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001320 }
1321
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001322 const TensorInfo& inputInfo = input.GetTensorInfo();
1323 const TensorInfo& alphaInfo = alpha.GetTensorInfo();
1324 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001325
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001326 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001327 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001328 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01001329 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001330
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001331 bool isSupported = false;
1332 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1333 IsPreluSupported,
1334 data.m_Backends,
1335 isSupported,
1336 inputInfo,
1337 alphaInfo,
1338 outputInfo);
1339 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001340 {
1341 return false;
1342 }
1343
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001344 IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001345
1346 if (!layer)
1347 {
1348 return Fail("%s: AddPreluLayer failed", __func__);
1349 }
1350
Sadik Armagan64b19b52019-08-19 09:49:58 +01001351 bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
1352 if (!isReshapeSupported)
1353 {
1354 return false;
1355 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001356
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001357 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001358}
1359
Sadik Armagan5a476a82019-07-30 09:43:18 +01001360bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
1361{
1362 ALOGV("hal_1_2::HalPolicy::ConvertQuantize()");
1363
1364 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1365 if (!input.IsValid())
1366 {
1367 return Fail("%s: Operation has invalid input", __func__);
1368 }
1369
1370 const Operand* const outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1371 if (!outputOperand)
1372 {
1373 return Fail("%s: Operation has invalid outputs", __func__);
1374 }
1375
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001376 const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Sadik Armagan5a476a82019-07-30 09:43:18 +01001377 if (IsDynamicTensor(outputInfo))
1378 {
1379 return Fail("%s: Dynamic output tensors are not supported", __func__);
1380 }
1381
1382 bool isSupported = false;
1383 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1384 IsQuantizeSupported,
1385 data.m_Backends,
1386 isSupported,
1387 input.GetTensorInfo(),
1388 outputInfo);
1389 if (!isSupported)
1390 {
1391 return false;
1392 }
1393
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001394 IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
Sadik Armagan5a476a82019-07-30 09:43:18 +01001395 assert(layer != nullptr);
1396 input.Connect(layer->GetInputSlot(0));
1397
1398 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
1399}
1400
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001401bool HalPolicy::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
1402{
1403 ALOGV("hal_1_2::HalPolicy::ConvertQuantizedLstm()");
1404
1405 //Inputs:
1406 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
1407 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
1408 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1409 if (!input.IsValid())
1410 {
1411 return Fail("%s: Could not read input 0: input", __func__);
1412 }
1413
1414 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
1415 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
1416 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
1417 LayerInputHandle previousCellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 13, model, data);
1418 if (!previousCellStateIn.IsValid())
1419 {
1420 return Fail("%s: Could not read input 13: previousCellStateIn", __func__);
1421 }
1422
1423 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
1424 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
1425 // is quantized with a fixed quantization range of -1, 127/128.
1426 LayerInputHandle previousOutputIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 14, model, data);
1427 if (!previousOutputIn.IsValid())
1428 {
1429 return Fail("%s: Could not read input 14: previousOutputIn", __func__);
1430 }
1431
1432 // Get the input tensors:
1433 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
1434 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
1435 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
1436 const ConstTensorPin inputToInputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001437 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001438
1439 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
1440 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
1441 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
1442 const ConstTensorPin inputToForgetWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001443 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001444
1445 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
1446 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
1447 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
1448 const ConstTensorPin inputToCellWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001449 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001450
1451 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
1452 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
1453 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
1454 const ConstTensorPin inputToOutputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001455 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001456
1457 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
1458 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
1459 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
1460 const ConstTensorPin recurrentToInputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001461 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 5, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001462
1463 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
1464 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
1465 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
1466 const ConstTensorPin recurrentToForgetWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001467 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001468
1469 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
1470 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
1471 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
1472 const ConstTensorPin recurrentToCellWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001473 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001474
1475 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
1476 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
1477 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
1478 const ConstTensorPin recurrentToOutputWeightsPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001479 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001480
1481 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
1482 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
1483 // of input and weights scales and zeroPoint equal to 0.
1484 const ConstTensorPin inputGateBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001485 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 9, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001486
1487 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
1488 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
1489 // of input and weights scales and zeroPoint equal to 0.
1490 const ConstTensorPin forgetGateBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001491 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 10, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001492
1493 // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
1494 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
1495 // and weights scales and zeroPoint equal to 0.
1496 const ConstTensorPin cellBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001497 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 11, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001498
1499 // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
1500 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
1501 // of input and weights scales and zeroPoint equal to 0.
1502 const ConstTensorPin outputGateBiasPin =
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001503 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 12, model, data);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001504
1505 if (!inputToInputWeightsPin.IsValid() ||
1506 !inputToForgetWeightsPin.IsValid() ||
1507 !inputToCellWeightsPin.IsValid() ||
1508 !inputToOutputWeightsPin.IsValid() ||
1509 !recurrentToInputWeightsPin.IsValid() ||
1510 !recurrentToForgetWeightsPin.IsValid() ||
1511 !recurrentToCellWeightsPin.IsValid() ||
1512 !recurrentToOutputWeightsPin.IsValid() ||
1513 !inputGateBiasPin.IsValid() ||
1514 !forgetGateBiasPin.IsValid() ||
1515 !cellBiasPin.IsValid() ||
1516 !outputGateBiasPin.IsValid())
1517 {
1518 return Fail("%s: Operation has invalid tensor inputs", __func__);
1519 }
1520
1521 // Outputs:
1522 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
1523 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
1524 // of -2^4, 2^4 * 32767/32768.
1525 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1526 if (!cellStateOut)
1527 {
1528 return Fail("%s: Could not read output 0: cellStateOut", __func__);
1529 }
1530
1531 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
1532 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
1533 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1534 if (!output)
1535 {
1536 return Fail("%s: Could not read output 1: output", __func__);
1537 }
1538
1539 // Inputs
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001540 const TensorInfo& inputInfo = input.GetTensorInfo();
1541 const TensorInfo& previousCellStateInInfo = previousCellStateIn.GetTensorInfo();
1542 const TensorInfo& previousOutputInInfo = previousOutputIn.GetTensorInfo();
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001543
1544 // Outputs
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001545 const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1546 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001547
1548 // Dynamic tensors currently not supported
1549 if (IsDynamicTensor(cellStateOutInfo) || IsDynamicTensor(outputInfo))
1550 {
1551 return Fail("%s: Dynamic output tensors are not supported", __func__);
1552 }
1553
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001554 QuantizedLstmInputParams params;
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001555
1556 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1557 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1558 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1559 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1560 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1561 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1562 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1563 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1564 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1565 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1566 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1567 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1568
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001569 QuantizedLstmInputParamsInfo paramsInfo;
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001570 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1571 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1572 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1573 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1574 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1575 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1576 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1577 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1578 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1579 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1580 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1581 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1582
1583 bool isSupported = false;
1584 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1585 IsQuantizedLstmSupported,
1586 data.m_Backends,
1587 isSupported,
1588 inputInfo,
1589 previousCellStateInInfo,
1590 previousOutputInInfo,
1591 cellStateOutInfo,
1592 outputInfo,
1593 paramsInfo);
1594
1595 if (!isSupported)
1596 {
1597 return false;
1598 }
1599
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001600 IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001601 input.Connect(layer->GetInputSlot(0));
Ellen Norris-Thompsona3d7fad2019-08-05 14:20:32 +01001602 previousCellStateIn.Connect(layer->GetInputSlot(1));
1603 previousOutputIn.Connect(layer->GetInputSlot(2));
Ellen Norris-Thompson7efb46d2019-07-24 17:39:19 +01001604
1605 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1606 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data));
1607}
1608
Sadik Armagan61113162019-07-25 09:09:40 +01001609bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
1610{
1611 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
1612 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
1613}
1614
1615bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
1616{
1617 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
1618 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
1619}
1620
1621bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
1622{
1623 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
1624 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
1625}
1626
Mike Kelly46272802019-08-14 17:00:48 +01001627bool HalPolicy::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
1628{
1629 ALOGV("hal_1_2::HalPolicy::ConvertReshape()");
1630 return ::ConvertReshape<hal_1_2::HalPolicy>(operation, model, data);
1631}
1632
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +01001633bool HalPolicy::ConvertResize(const Operation& operation,
1634 const Model& model,
1635 ConversionData& data,
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001636 ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001637{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001638 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
1639
1640 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001641 if (!input.IsValid())
1642 {
1643 return Fail("%s: Could not read input 0", __func__);
1644 }
1645
1646 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1647 if (!output)
1648 {
1649 return Fail("%s: Could not read output 0", __func__);
1650 }
1651
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001652 const TensorInfo& inputInfo = input.GetTensorInfo();
1653 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001654
1655 if (IsDynamicTensor(outputInfo))
1656 {
1657 return Fail("%s: Dynamic output tensors are not supported", __func__);
1658 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001659
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001660 ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +01001661 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001662 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
1663
1664 OperandType operandType1;
1665 OperandType operandType2;
1666
1667 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
1668 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
1669 {
1670 return Fail("%s: Operation has invalid inputs", __func__);
1671 }
1672
1673 if (operandType1 != operandType2)
1674 {
1675 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
1676 }
1677
1678 if (operandType1 == OperandType::INT32)
1679 {
1680 // Case 1: resizing by shape
1681 int32_t targetWidth = 0;
1682 int32_t targetHeight = 0;
1683
1684 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
1685 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
1686 {
1687 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
1688 }
1689
1690 if (targetWidth < 0 || targetHeight < 0)
1691 {
1692 return Fail("%s: Operation has invalid inputs for resizing by shape. "
1693 "Target width/height cannot be < 0", __func__);
1694 }
1695
1696 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +01001697 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001698 }
1699 else if (operandType1 == OperandType::FLOAT32)
1700 {
1701 // Case 2: resizing by scale
1702 float widthScale = 1.0f;
1703 float heightScale = 1.0f;
1704
1705 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
1706 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
1707 {
1708 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
1709 }
1710
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001711 const TensorShape& inputShape = inputInfo.GetShape();
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001712 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
1713
1714 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
1715 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
1716
1717 descriptor.m_TargetWidth = std::floor(width * widthScale);
1718 descriptor.m_TargetHeight = std::floor(height * heightScale);
1719 }
1720 else
1721 {
1722 // NOTE: FLOAT16 scales are not supported
1723 return false;
1724 }
1725
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001726 bool isSupported = false;
1727 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1728 IsResizeSupported,
1729 data.m_Backends,
1730 isSupported,
1731 inputInfo,
1732 outputInfo,
1733 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +01001734
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001735 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001736 {
1737 return false;
1738 }
1739
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001740 IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001741
1742 assert(layer != nullptr);
1743
1744 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
1745 input.Connect(layer->GetInputSlot(0));
1746
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001747 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +01001748}
1749
Aron Virginas-Tarfa6544e2019-09-10 14:42:22 +01001750bool HalPolicy::ConvertRsqrt(const Operation& operation, const Model& model, ConversionData& data)
1751{
1752 ALOGV("hal_1_2::HalPolicy::ConvertRsqrt()");
1753
1754 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1755 if (!input.IsValid())
1756 {
1757 return Fail("%s: Operation has invalid input", __func__);
1758 }
1759
1760 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1761 if (!output)
1762 {
1763 return Fail("%s: Could not read output 0", __func__);
1764 }
1765
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001766 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarfa6544e2019-09-10 14:42:22 +01001767 if (IsDynamicTensor(outputInfo))
1768 {
1769 return Fail("%s: Dynamic output tensors are not supported", __func__);
1770 }
1771
1772 bool isSupported = false;
1773 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1774 IsRsqrtSupported,
1775 data.m_Backends,
1776 isSupported,
1777 input.GetTensorInfo(),
1778 outputInfo);
1779
1780 if (!isSupported)
1781 {
1782 return false;
1783 }
1784
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001785 IConnectableLayer* const layer = data.m_Network->AddRsqrtLayer();
Aron Virginas-Tarfa6544e2019-09-10 14:42:22 +01001786 assert(layer != nullptr);
1787 input.Connect(layer->GetInputSlot(0));
1788
1789 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
1790}
1791
Finn Williamsd74c5052019-07-30 17:06:00 +01001792bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
1793{
1794 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToBatchNd()");
1795 return ::ConvertSpaceToBatchNd<hal_1_2::HalPolicy>(operation, model, data);
1796}
1797
Keith Davisa6bc52f2019-06-26 09:39:49 +01001798bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
1799{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001800 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +01001801
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001802 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001803 if (!input.IsValid() )
1804 {
1805 return Fail("%s: Operation has invalid inputs", __func__);
1806 }
1807
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001808 const TensorInfo& inputInfo = input.GetTensorInfo();
Keith Davisa6bc52f2019-06-26 09:39:49 +01001809 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +01001810 if (rank != 4)
1811 {
1812 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1813 }
1814
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001815 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1816 if (!output)
1817 {
1818 return Fail("%s: Could not read output 0", __func__);
1819 }
1820
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001821 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001822 if (IsDynamicTensor(outputInfo))
1823 {
1824 return Fail("%s: Dynamic output tensors are not supported", __func__);
1825 }
1826
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001827 SpaceToDepthDescriptor desc;
Keith Davisa6bc52f2019-06-26 09:39:49 +01001828
1829 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
1830
1831 if (desc.m_BlockSize <= 1)
1832 {
1833 return Fail("%s: Block size must be at least 1 in all dimensions");
1834 }
1835
1836 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
1837
Ferran Balaguerd30093c2019-07-09 17:04:47 +01001838 bool isSupported = false;
1839 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1840 IsSpaceToDepthSupported,
1841 data.m_Backends,
1842 isSupported,
1843 inputInfo,
1844 outputInfo,
1845 desc);
1846 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001847 {
1848 return false;
1849 }
1850
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001851 IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001852 assert(layer != nullptr);
1853 input.Connect(layer->GetInputSlot(0));
1854
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001855 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001856}
1857
Francis Murtagh074c25a2019-07-22 16:40:57 +01001858bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1859{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001860 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
1861
Francis Murtagh074c25a2019-07-22 16:40:57 +01001862 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1863 if (!input.IsValid())
1864 {
1865 return Fail("%s: Operation has invalid inputs", __func__);
1866 }
1867
1868 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1869 if (!outputOperand)
1870 {
1871 return Fail("%s: Operation has no outputs", __func__);
1872 }
1873
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001874 const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001875 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +01001876 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001877 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001878 }
1879
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001880 SoftmaxDescriptor desc;
Francis Murtagh074c25a2019-07-22 16:40:57 +01001881 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
1882 {
1883 return Fail("%s: Operation has invalid inputs", __func__);
1884 }
1885
1886 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
1887 2,
1888 HalPolicy::OperandType::INT32,
1889 desc.m_Axis,
1890 model,
1891 data))
1892 {
1893 return Fail("%s: Operation has invalid inputs", __func__);
1894 }
1895
Narumol Prangnawarat52dc5272019-08-06 17:34:26 +01001896 if (input.GetTensorInfo().GetNumDimensions() > 2 ||
1897 !(desc.m_Axis == 1 ||
1898 (desc.m_Axis < 0 && static_cast<int>(input.GetTensorInfo().GetNumDimensions()) + desc.m_Axis == 1)))
1899 {
1900 return Fail("%s: Unsupported input greater than 2D or axis != 1", __func__);
1901 }
1902
Francis Murtagh074c25a2019-07-22 16:40:57 +01001903 bool isSupported = false;
1904 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1905 IsSoftmaxSupported,
1906 data.m_Backends,
1907 isSupported,
1908 input.GetTensorInfo(),
1909 outputInfo,
1910 desc);
1911 if (!isSupported)
1912 {
1913 return false;
1914 }
1915
Teresa Charlin8f6429d2019-10-01 13:10:15 +01001916 IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001917 assert(layer != nullptr);
1918 input.Connect(layer->GetInputSlot(0));
1919
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001920 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001921}
1922
Mike Kelly0a879362019-07-29 16:56:31 +01001923bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
1924{
1925 ALOGV("hal_1_2::HalPolicy::ConvertSub()");
1926 return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
1927}
1928
Sadik Armagan61113162019-07-25 09:09:40 +01001929bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1930{
1931 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1932 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1933}
1934
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001935bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
1936{
1937 // Inputs:
1938 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1939 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1940 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1941 if (!input.IsValid())
1942 {
1943 return Fail("%s: Could not read input 0: input", __func__);
1944 }
1945 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1946 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 18, model, data);
1947 if (!outputStateIn.IsValid())
1948 {
1949 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1950 }
1951 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1952 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 19, model, data);
1953 if (!cellStateIn.IsValid())
1954 {
1955 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1956 }
1957
1958 // Get the mandatory input tensors:
1959 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1960 // [num_units, input_size].
1961 const ConstTensorPin inputToForgetWeightsPin =
1962 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1963 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1964 // [num_units, input_size].
1965 const ConstTensorPin inputToCellWeightsPin =
1966 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
1967 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1968 // [num_units, input_size].
1969 const ConstTensorPin inputToOutputWeightsPin =
1970 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
1971 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1972 // [num_units, output_size].
1973 const ConstTensorPin recurrentToForgetWeightsPin =
1974 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
1975 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1976 // [num_units, output_size].
1977 const ConstTensorPin recurrentToCellWeightsPin =
1978 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
1979 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1980 // [num_units, output_size].
1981 const ConstTensorPin recurrentToOutputWeightsPin =
1982 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
1983 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1984 const ConstTensorPin forgetGateBiasPin =
1985 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 13, model, data);
1986 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1987 const ConstTensorPin cellBiasPin =
1988 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 14, model, data);
1989 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1990 const ConstTensorPin outputGateBiasPin =
1991 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 15, model, data);
1992
1993 if (!inputToForgetWeightsPin.IsValid() ||
1994 !inputToCellWeightsPin.IsValid() ||
1995 !inputToOutputWeightsPin.IsValid() ||
1996 !recurrentToForgetWeightsPin.IsValid() ||
1997 !recurrentToCellWeightsPin.IsValid() ||
1998 !recurrentToOutputWeightsPin.IsValid() ||
1999 !forgetGateBiasPin.IsValid() ||
2000 !cellBiasPin.IsValid() ||
2001 !outputGateBiasPin.IsValid())
2002 {
2003 return Fail("%s: Operation has invalid tensor inputs", __func__);
2004 }
2005
2006 // Get the optional input tensors:
2007 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2008 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
2009 const ConstTensorPin inputToInputWeightsPin =
2010 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2011 1,
2012 model,
2013 data,
2014 g_DontPermute,
2015 nullptr,
2016 true);
2017
2018 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2019 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
2020 // “num_units”), or the second dimension of the “projection_weights”, if defined.
2021 const ConstTensorPin recurrentToInputWeightsPin =
2022 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2023 5,
2024 model,
2025 data,
2026 g_DontPermute,
2027 nullptr,
2028 true);
2029
2030 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2031 const ConstTensorPin cellToInputWeightsPin =
2032 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2033 9,
2034 model,
2035 data,
2036 g_DontPermute,
2037 nullptr,
2038 true);
2039
2040 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2041 const ConstTensorPin cellToForgetWeightsPin =
2042 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2043 10,
2044 model,
2045 data,
2046 g_DontPermute,
2047 nullptr,
2048 true);
2049
2050 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2051 const ConstTensorPin cellToOutputWeightsPin =
2052 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2053 11,
2054 model,
2055 data,
2056 g_DontPermute,
2057 nullptr,
2058 true);
2059
2060 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2061 const ConstTensorPin inputGateBiasPin =
2062 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2063 12,
2064 model,
2065 data,
2066 g_DontPermute,
2067 nullptr,
2068 true);
2069
2070 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2071 // [output_size, num_units].
2072 const ConstTensorPin projectionWeightsPin =
2073 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2074 16,
2075 model,
2076 data,
2077 g_DontPermute,
2078 nullptr,
2079 true);
2080
2081 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
2082 const ConstTensorPin projectionBiasPin =
2083 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2084 17,
2085 model,
2086 data,
2087 g_DontPermute,
2088 nullptr,
2089 true);
2090
2091 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
2092 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
2093 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
2094 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
2095 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
2096 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
2097 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
2098 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
2099 {
2100 return Fail("%s: Operation has invalid tensor inputs", __func__);
2101 }
2102
2103 // Get the mandatory input scalars (actually 1-D tensors of size 1):
2104 // 20: The activation function: A value indicating the activation function:
2105 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
2106 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
2107 // If set to 0.0 then clipping is disabled.
2108 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
2109 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
2110 ActivationFn activation;
2111 float cellClip;
2112 float projClip;
2113 if (!GetInputActivationFunctionFromTensor<hal_1_2::HalPolicy>(operation, 20, activation, model, data) ||
2114 !GetInputScalar<hal_1_2::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
2115 !GetInputScalar<hal_1_2::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
2116 {
2117 return Fail("%s: Operation has invalid scalar inputs", __func__);
2118 }
2119
2120 // Get the normalization tensors
2121 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
2122 // Used to rescale normalized inputs to activation at input gate.
2123 const ConstTensorPin inputLayerNormWeightsPin =
2124 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2125 23,
2126 model,
2127 data,
2128 g_DontPermute,
2129 nullptr,
2130 true);
2131
2132 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
2133 // Used to rescale normalized inputs to activation at forget gate.
2134 const ConstTensorPin forgetLayerNormWeightsPin =
2135 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2136 24,
2137 model,
2138 data,
2139 g_DontPermute,
2140 nullptr,
2141 true);
2142
2143 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
2144 // Used to rescale normalized inputs to activation at cell gate.
2145 const ConstTensorPin cellLayerNormWeightsPin =
2146 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2147 25,
2148 model,
2149 data,
2150 g_DontPermute,
2151 nullptr,
2152 true);
2153
2154 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
2155 // Used to rescale normalized inputs to activation at output gate.
2156 const ConstTensorPin outputLayerNormWeightsPin =
2157 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
2158 26,
2159 model,
2160 data,
2161 g_DontPermute,
2162 nullptr,
2163 true);
2164
2165 // Outputs:
2166 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
2167 // with CIFG, or [batch_size, num_units * 3] without CIFG.
2168 const Operand* scratchBuffer = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
2169 if (!scratchBuffer)
2170 {
2171 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
2172 }
2173 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2174 const Operand* outputStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
2175 if (!outputStateOut)
2176 {
2177 return Fail("%s: Could not read output 1: outputStateOut", __func__);
2178 }
2179 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2180 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 2, model);
2181 if (!cellStateOut)
2182 {
2183 return Fail("%s: Could not read output 2: cellStateOut", __func__);
2184 }
2185 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
2186 // effectively the same as the current “output state (out)” value.
2187 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 3, model);
2188 if (!output)
2189 {
2190 return Fail("%s: Could not read output 3: output", __func__);
2191 }
2192
2193 // set the params structure for the AddLstmLayer call
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002194 LstmInputParams params;
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002195 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
2196 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
2197 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
2198 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
2199 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
2200 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
2201 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
2202 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
2203 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
2204 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
2205 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
2206 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
2207 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
2208 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
2209 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
2210 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
2211 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
2212 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
2213 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
2214 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
2215 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
2216
2217 // set the layer descriptor
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002218 LstmDescriptor desc;
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002219 desc.m_ActivationFunc = activation;
2220 desc.m_ClippingThresCell = cellClip;
2221 desc.m_ClippingThresProj = projClip;
2222 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
2223 params.m_RecurrentToInputWeights == nullptr ||
2224 params.m_InputGateBias == nullptr);
2225 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
2226 params.m_CellToOutputWeights != nullptr);
2227 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
2228 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
2229 params.m_ForgetLayerNormWeights != nullptr ||
2230 params.m_CellLayerNormWeights != nullptr ||
2231 params.m_OutputLayerNormWeights != nullptr);
2232
2233 // validate the optional input groups
2234 if (desc.m_CifgEnabled &&
2235 (params.m_InputToInputWeights != nullptr ||
2236 params.m_RecurrentToInputWeights != nullptr ||
2237 params.m_InputGateBias != nullptr))
2238 {
2239 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
2240 " and input gate bias must be provided", __func__);
2241 }
2242
2243 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
2244 {
2245 return Fail("%s: projection bias should not be provided without projection weights", __func__);
2246 }
2247
2248 if (desc.m_PeepholeEnabled &&
2249 (params.m_CellToForgetWeights == nullptr ||
2250 params.m_CellToOutputWeights == nullptr ||
2251 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
2252 {
2253 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
2254 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
2255 }
2256
2257 if (desc.m_LayerNormEnabled &&
2258 (params.m_ForgetLayerNormWeights == nullptr ||
2259 params.m_CellLayerNormWeights == nullptr ||
2260 params.m_OutputLayerNormWeights == nullptr ||
2261 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
2262 {
2263 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
2264 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
2265 }
2266
2267 // Check if the layer is supported
2268 // Inputs
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002269 const TensorInfo& inputInfo = input.GetTensorInfo();
2270 const TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
2271 const TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002272
2273 // Outputs
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002274 const TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
2275 const TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
2276 const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
2277 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002278
Ferran Balaguera4a629a2019-07-30 10:16:13 +01002279 if (IsDynamicTensor(scratchBufferInfo) ||
2280 IsDynamicTensor(outputStateOutInfo) ||
2281 IsDynamicTensor(cellStateOutInfo) ||
2282 IsDynamicTensor(outputInfo))
2283 {
2284 return Fail("%s: Dynamic output tensors are not supported", __func__);
2285 }
2286
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002287 // Basic parameters
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002288 LstmInputParamsInfo paramsInfo;
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002289 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
2290 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
2291 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
2292 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
2293 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
2294 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
2295 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
2296 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
2297 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
2298
2299 // Optional parameters
2300 if(!desc.m_CifgEnabled)
2301 {
2302 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
2303 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
2304 if (params.m_CellToInputWeights != nullptr)
2305 {
2306 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
2307 }
2308 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
2309 }
2310
2311 if(desc.m_ProjectionEnabled)
2312 {
2313 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
2314 if (params.m_ProjectionBias != nullptr)
2315 {
2316 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
2317 }
2318 }
2319
2320 if(desc.m_PeepholeEnabled)
2321 {
2322 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
2323 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
2324 }
2325
2326 if (desc.m_LayerNormEnabled)
2327 {
2328 if(!desc.m_CifgEnabled)
2329 {
2330 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
2331 }
2332 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
2333 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
2334 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
2335 }
2336
2337 bool isSupported = false;
2338 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2339 IsLstmSupported,
2340 data.m_Backends,
2341 isSupported,
2342 inputInfo,
2343 outputStateInInfo,
2344 cellStateInInfo,
2345 scratchBufferInfo,
2346 outputStateOutInfo,
2347 cellStateOutInfo,
2348 outputInfo,
2349 desc,
2350 paramsInfo);
2351 if (!isSupported)
2352 {
2353 return false;
2354 }
2355
2356 // Add the layer
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002357 IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01002358
2359 input.Connect(layer->GetInputSlot(0));
2360 outputStateIn.Connect(layer->GetInputSlot(1));
2361 cellStateIn.Connect(layer->GetInputSlot(2));
2362
2363 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
2364 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data) &&
2365 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 2, *layer, 2, model, data) &&
2366 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 3, *layer, 3, model, data));
2367}
2368
Sadik Armagan701d9a02019-09-04 15:16:18 +01002369bool HalPolicy::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
2370{
2371 ALOGV("hal_1_2::HalPolicy::ConvertSqrt()");
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002372 ActivationDescriptor desc;
2373 desc.m_Function = ActivationFunction::Sqrt;
Sadik Armagan701d9a02019-09-04 15:16:18 +01002374
2375 return ::ConvertToActivation<hal_1_2::HalPolicy>(operation, __func__, desc, model, data);
2376}
2377
Mike Kelly46272802019-08-14 17:00:48 +01002378bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
2379{
Sadik Armagan701d9a02019-09-04 15:16:18 +01002380 ALOGV("hal_1_2::HalPolicy::ConvertSqueeze()");
Mike Kelly46272802019-08-14 17:00:48 +01002381 return ::ConvertSqueeze<hal_1_2::HalPolicy>(operation, model, data);
2382}
2383
2384bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
2385{
Sadik Armagan701d9a02019-09-04 15:16:18 +01002386 ALOGV("hal_1_2::HalPolicy::ConvertStridedSlice()");
Mike Kelly46272802019-08-14 17:00:48 +01002387 return ::ConvertStridedSlice<hal_1_2::HalPolicy>(operation, model, data);
2388}
2389
2390bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
2391{
Sadik Armagan701d9a02019-09-04 15:16:18 +01002392 ALOGV("hal_1_2::HalPolicy::ConvertTranspose()");
Mike Kelly46272802019-08-14 17:00:48 +01002393 return ::ConvertTranspose<hal_1_2::HalPolicy>(operation, model, data);
2394}
2395
Aron Virginas-Tar8b991682019-07-31 12:54:59 +01002396bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
David Monahan613b49c2019-06-27 11:37:47 +01002397{
2398 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
2399
2400 if (!input.IsValid())
2401 {
2402 return Fail("%s: Operation has invalid inputs", __func__);
2403 }
2404
2405 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
2406
2407 if (!output)
2408 {
2409 return Fail("%s: Could not read output 0", __func__);
2410 }
2411
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002412 const TensorInfo& inputInfo = input.GetTensorInfo();
2413 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
David Monahan613b49c2019-06-27 11:37:47 +01002414 if (IsDynamicTensor(outputInfo))
2415 {
2416 return Fail("%s: Dynamic output tensors are not supported", __func__);
2417 }
2418
2419 // ArmNN does not currently support non-fixed weights or bias
2420 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
2421 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
2422
2423 if (weightsOperand == nullptr)
2424 {
2425 return Fail("%s: Operand is invalid", __func__);
2426 }
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002427 TransposeConvolution2dDescriptor desc;
2428 desc.m_DataLayout = DataLayout::NHWC;
David Monahan613b49c2019-06-27 11:37:47 +01002429
2430 // Determine whether padding is implicit or explicit
2431 bool implicitPadding = operation.inputs.size() == 9;
2432
2433 if (implicitPadding )
2434 {
2435 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 8, model, data);
2436 }
2437 else
2438 {
2439 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
2440 }
2441
2442 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
2443 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
2444 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
2445
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002446 const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
David Monahan613b49c2019-06-27 11:37:47 +01002447
2448 // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
2449 // We have to permute it to OIHW if the data layout is NCHW.
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002450 const ConstTensorPin weightsPin = (desc.m_DataLayout == DataLayout::NCHW) ?
David Monahan613b49c2019-06-27 11:37:47 +01002451 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
2452 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
2453
2454 // Bias is a 1D tensor
2455 const ConstTensorPin biasPin =
2456 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
2457
2458 if (!weightsPin.IsValid())
2459 {
2460 return Fail("%s: Operation has invalid weights", __func__);
2461 }
2462
2463 if (!biasPin.IsValid())
2464 {
2465 return Fail("%s: Operation has invalid biases", __func__);
2466 }
2467
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002468 ConstTensor weights = weightsPin.GetConstTensor();
2469 ConstTensor bias = biasPin.GetConstTensor();
David Monahan613b49c2019-06-27 11:37:47 +01002470 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
2471
2472 ActivationFn activation;
2473
2474 if (implicitPadding)
2475 {
Sadik Armagan3e3003e2019-08-13 12:54:34 +01002476 int32_t strideX{0};
2477 int32_t strideY{0};
2478 int32_t padLeft{0};
2479 int32_t padRight{0};
2480 int32_t padTop{0};
2481 int32_t padBottom{0};
2482
David Monahan613b49c2019-06-27 11:37:47 +01002483 android::nn::PaddingScheme paddingScheme;
2484 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 4, paddingScheme, model, data) ||
Sadik Armagan3e3003e2019-08-13 12:54:34 +01002485 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, strideX, model, data) ||
2486 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, strideY, model, data) ||
David Monahan613b49c2019-06-27 11:37:47 +01002487 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data))
2488 {
2489 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
2490 }
2491
2492 const uint32_t kernelX = weights.GetShape()[widthIndex];
2493 const uint32_t kernelY = weights.GetShape()[heightIndex];
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +01002494 const uint32_t outputX = outputInfo.GetShape()[widthIndex];
2495 const uint32_t outputY = outputInfo.GetShape()[heightIndex];
David Monahan613b49c2019-06-27 11:37:47 +01002496
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +01002497 CalcPaddingTransposeConv(outputX, kernelX, desc.m_StrideX, padLeft, padRight, paddingScheme);
2498 CalcPaddingTransposeConv(outputY, kernelY, desc.m_StrideY, padTop, padBottom, paddingScheme);
2499
2500 // NOTE: The Android NN API allows for negative padding values in TransposeConv2d,
2501 // but Arm NN only supports values >= 0
2502 if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
2503 {
2504 return Fail("%s: Negative padding values are not supported", __func__);
2505 }
2506
Sadik Armagan3e3003e2019-08-13 12:54:34 +01002507 desc.m_StrideX = boost::numeric_cast<uint32_t>(strideX);
2508 desc.m_StrideY = boost::numeric_cast<uint32_t>(strideY);
Narumol Prangnawaratc8bdb392019-08-01 15:51:44 +01002509 desc.m_PadLeft = boost::numeric_cast<uint32_t>(padLeft);
2510 desc.m_PadRight = boost::numeric_cast<uint32_t>(padRight);
2511 desc.m_PadTop = boost::numeric_cast<uint32_t>(padTop);
2512 desc.m_PadBottom = boost::numeric_cast<uint32_t>(padBottom);
David Monahan613b49c2019-06-27 11:37:47 +01002513 }
2514 else if (operation.inputs.size() == 11)
2515 {
2516 // explicit padding
2517 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
2518 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
2519 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
2520 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
2521 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
2522 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
2523 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data))
2524 {
2525 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
2526 }
2527 }
2528 else
2529 {
2530 return Fail("%s: Unsupported number of operation inputs", __func__);
2531 }
2532
2533 desc.m_BiasEnabled = true;
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002534 Optional<TensorInfo> biases(bias.GetInfo());
David Monahan613b49c2019-06-27 11:37:47 +01002535
2536 bool isSupported = false;
2537 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2538 IsTransposeConvolution2dSupported,
2539 data.m_Backends,
2540 isSupported,
2541 inputInfo,
2542 outputInfo,
2543 desc,
2544 weights.GetInfo(),
2545 biases);
2546 if (!isSupported)
2547 {
2548 return false;
2549 }
2550
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002551 IConnectableLayer* startLayer =
2552 data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
David Monahan613b49c2019-06-27 11:37:47 +01002553 if (!startLayer)
2554 {
2555 return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
2556 }
2557
Teresa Charlin8f6429d2019-10-01 13:10:15 +01002558 IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
David Monahan613b49c2019-06-27 11:37:47 +01002559 if (!endLayer)
2560 {
2561 return Fail("%s: ProcessActivation failed", __func__);
2562 }
2563
2564 input.Connect(startLayer->GetInputSlot(0));
2565
2566 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
2567}
2568
Mike Kellyb5fdf382019-06-11 16:35:25 +01002569} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01002570} // namespace armnn_driver