blob: 575ae2b2171ee3b1ccf87af1c6aa481c2c0b9f44 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
Mike Kellyb5fdf382019-06-11 16:35:25 +010028 case V1_0::OperationType::DEPTH_TO_SPACE:
29 case V1_0::OperationType::DEQUANTIZE:
30 case V1_0::OperationType::EMBEDDING_LOOKUP:
31 case V1_0::OperationType::FLOOR:
32 case V1_0::OperationType::FULLY_CONNECTED:
33 case V1_0::OperationType::HASHTABLE_LOOKUP:
34 case V1_0::OperationType::L2_NORMALIZATION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010035 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
36 case V1_0::OperationType::LOGISTIC:
37 case V1_0::OperationType::LSH_PROJECTION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010038 case V1_0::OperationType::MUL:
Mike Kellyb5fdf382019-06-11 16:35:25 +010039 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010040 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010041 case V1_0::OperationType::SVDF:
Mike Kellyb5fdf382019-06-11 16:35:25 +010042 case V1_0::OperationType::OEM_OPERATION:
43 return true;
44 default:
45 return false;
46 }
47}
48
49bool HandledByV1_1(V1_2::OperationType operationType)
50{
51 if (HandledByV1_0(operationType))
52 {
53 return true;
54 }
55 switch (static_cast<V1_1::OperationType>(operationType))
56 {
Mike Kellyb5fdf382019-06-11 16:35:25 +010057 case V1_1::OperationType::DIV:
58 case V1_1::OperationType::MEAN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010059 case V1_1::OperationType::SQUEEZE:
60 case V1_1::OperationType::STRIDED_SLICE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010061 case V1_1::OperationType::TRANSPOSE:
62 return true;
63 default:
64 return false;
65 }
66}
67
68bool HandledByV1_0(const V1_2::Operation& operation)
69{
70 return HandledByV1_0(operation.type);
71}
72
73bool HandledByV1_1(const V1_2::Operation& operation)
74{
75 return HandledByV1_1(operation.type);
76}
77
78V1_0::OperationType CastToV1_0(V1_2::OperationType type)
79{
80 return static_cast<V1_0::OperationType>(type);
81}
82
83V1_1::OperationType CastToV1_1(V1_2::OperationType type)
84{
85 return static_cast<V1_1::OperationType>(type);
86}
87
88V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
89{
90 V1_0::Operation op;
91 op.type = CastToV1_0(operation.type);
92 op.inputs = operation.inputs;
93 op.outputs = operation.outputs;
94 return op;
95}
96
97V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
98{
99 V1_1::Operation op;
100 op.type = CastToV1_1(operation.type);
101 op.inputs = operation.inputs;
102 op.outputs = operation.outputs;
103 return op;
104}
105
106bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
107{
108 if (HandledByV1_0(operation) && compliantWithV1_0(model))
109 {
110 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
111 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
112
113 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
114 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100115
116 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100117 {
118 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
119 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
120
121 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
122 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100123
Mike Kellyb5fdf382019-06-11 16:35:25 +0100124 switch (operation.type)
125 {
Sadik Armagan15d63e22019-07-26 16:59:35 +0100126 case V1_2::OperationType::AVERAGE_POOL_2D:
127 return ConvertAveragePool2d(operation, model, data);
Finn Williams23b87b32019-07-30 11:44:05 +0100128 case V1_2::OperationType::BATCH_TO_SPACE_ND:
129 return ConvertBatchToSpaceNd(operation, model, data);
Mike Kellyb8805202019-07-31 17:25:43 +0100130 case V1_2::OperationType::CONCATENATION:
131 return ConvertConcatenation(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100133 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100134 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100135 return ConvertDepthwiseConv2d(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +0100136 case V1_2::OperationType::L2_POOL_2D:
137 return ConvertL2Pool2d(operation, model, data);
138 case V1_2::OperationType::MAX_POOL_2D:
139 return ConvertMaxPool2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100140 case V1_2::OperationType::MAXIMUM:
141 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100142 case V1_2::OperationType::MINIMUM:
143 return ConvertMinimum(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +0100144 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100145 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100146 case V1_2::OperationType::PAD_V2:
147 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100148 case V1_2::OperationType::PRELU:
149 return ConvertPrelu(operation, model, data);
Sadik Armagan5a476a82019-07-30 09:43:18 +0100150 case V1_2::OperationType::QUANTIZE:
151 return ConvertQuantize(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100152 case V1_2::OperationType::RELU:
153 return ConvertReLu(operation, model, data);
154 case V1_2::OperationType::RELU1:
155 return ConvertReLu1(operation, model, data);
156 case V1_2::OperationType::RELU6:
157 return ConvertReLu6(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100158 case V1_2::OperationType::RESIZE_BILINEAR:
159 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100160 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100161 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
David Monahan613b49c2019-06-27 11:37:47 +0100162 case V1_2::OperationType::TRANSPOSE_CONV_2D:
Aron Virginas-Tar8b991682019-07-31 12:54:59 +0100163 return ConvertTransposeConv2d(operation, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100164 case V1_2::OperationType::SOFTMAX:
165 return ConvertSoftmax(operation, model, data);
Finn Williamsd74c5052019-07-30 17:06:00 +0100166 case V1_2::OperationType::SPACE_TO_BATCH_ND :
167 return ConvertSpaceToBatchNd(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100168 case V1_2::OperationType::SPACE_TO_DEPTH:
169 return ConvertSpaceToDepth(operation, model, data);
Mike Kelly0a879362019-07-29 16:56:31 +0100170 case V1_2::OperationType::SUB:
171 return ConvertSub(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100172 case V1_2::OperationType::TANH:
173 return ConvertTanH(operation, model, data);
Ferran Balaguerb2397fd2019-07-25 12:12:39 +0100174 case V1_2::OperationType::LSTM:
175 return ConvertLstm(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100176 default:
177 return Fail("%s: Operation type %s not supported in ArmnnDriver",
178 __func__, toString(operation.type).c_str());
179 }
180}
181
Sadik Armagan15d63e22019-07-26 16:59:35 +0100182bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
183{
184 ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
185 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
186}
187
Finn Williams23b87b32019-07-30 11:44:05 +0100188bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
189{
190 ALOGV("hal_1_2::HalPolicy::ConvertBatchToSpaceNd()");
191 return ::ConvertBatchToSpaceNd<hal_1_2::HalPolicy>(operation, model, data);
192}
193
Mike Kellyb8805202019-07-31 17:25:43 +0100194bool HalPolicy::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
195{
196 ALOGV("hal_1_2::HalPolicy::ConvertConcatenation()");
197 return ::ConvertConcatenation<hal_1_2::HalPolicy>(operation, model, data);
198}
199
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100200bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
201{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100202 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
203
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100204 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
205 if (!input.IsValid())
206 {
207 return Fail("%s: Operation has invalid inputs", __func__);
208 }
209
210 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
211 if (!output)
212 {
213 return Fail("%s: Could not read output 0", __func__);
214 }
215
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100216 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
217 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
218
219 if (IsDynamicTensor(outputInfo))
220 {
221 return Fail("%s: Dynamic output tensors are not supported", __func__);
222 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100223
Mike Kellye1d60bb2019-07-11 11:44:52 +0100224 armnn::Convolution2dDescriptor desc;
225 desc.m_DataLayout = armnn::DataLayout::NHWC;
226
227 // Determine whether padding is implicit or explicit
228 bool implicitPadding = operation.inputs.size() == 7 ||
229 (operation.inputs.size() >= 8 &&
230 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
231
232 if (implicitPadding)
233 {
234 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
235 }
236 else if (operation.inputs.size() >= 10)
237 {
238 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
239 }
240
241 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
242
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100243 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100244 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
245 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
246 // the DataLayout is NCHW
247 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
248 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
249 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100250 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100251 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100252
253 if (!weightsPin.IsValid())
254 {
255 return Fail("%s: Operation has invalid weights", __func__);
256 }
257
258 if (!biasPin.IsValid())
259 {
260 return Fail("%s: Operation has invalid biases", __func__);
261 }
262
263 armnn::ConstTensor weights = weightsPin.GetConstTensor();
264 armnn::ConstTensor bias = biasPin.GetConstTensor();
265 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
266
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100267 ActivationFn activation;
268
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100269 if (implicitPadding)
270 {
271 android::nn::PaddingScheme paddingScheme;
272 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
273 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
274 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
275 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
276 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
277 {
278 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
279 }
280
Mike Kellye1d60bb2019-07-11 11:44:52 +0100281 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
282 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
283 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
284 const uint32_t kernelX = weights.GetShape()[widthIndex];
285 const uint32_t kernelY = weights.GetShape()[heightIndex];
286 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
287 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100288
Mike Kelly86b36d42019-07-12 16:39:33 +0100289 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
290 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100291
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100292 }
293 else if (operation.inputs.size() >= 10)
294 {
295 // explicit padding
296 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
297 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
298 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
299 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
300 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
301 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
302 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
303 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
304 {
305 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
306 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100307 }
308 else
309 {
310 return Fail("%s: Unsupported number of operation inputs", __func__);
311 }
312
313 desc.m_BiasEnabled = true;
314 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
315
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100316 bool isSupported = false;
317 FORWARD_LAYER_SUPPORT_FUNC(__func__,
318 IsConvolution2dSupported,
319 data.m_Backends,
320 isSupported,
321 inputInfo,
322 outputInfo,
323 desc,
324 weights.GetInfo(),
325 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100326
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100327 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100328 {
329 return false;
330 }
331
332 armnn::IConnectableLayer* startLayer =
333 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
334
335 if (!startLayer)
336 {
337 return Fail("%s: AddConvolution2dLayer failed", __func__);
338 }
339
340 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
341
342 if (!endLayer)
343 {
344 return Fail("%s: ProcessActivation failed", __func__);
345 }
346
347 input.Connect(startLayer->GetInputSlot(0));
348
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100349 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100350}
351
352bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
353{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100354 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
355
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100356 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
357
358 if (!input.IsValid())
359 {
360 return Fail("%s: Operation has invalid inputs", __func__);
361 }
362
363 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
364
365 if (!output)
366 {
367 return Fail("%s: Could not read output 0", __func__);
368 }
369
370 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100371 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
372
373 if (IsDynamicTensor(outputInfo))
374 {
375 return Fail("%s: Dynamic output tensors are not supported", __func__);
376 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100377
378 // ArmNN does not currently support non-fixed weights or bias
379 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
380 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
381
382 if (weightsOperand == nullptr)
383 {
384 return Fail("%s: Operand is invalid", __func__);
385 }
386 armnn::DepthwiseConvolution2dDescriptor desc;
387 desc.m_DataLayout = armnn::DataLayout::NHWC;
388
389 // Determine whether padding is implicit or explicit
390 bool implicitPadding = operation.inputs.size() == 8 ||
391 (operation.inputs.size() >= 9 &&
392 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
393
394 // Look ahead to find the optional DataLayout, if present
395 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
396 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
397
398 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
399 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
400 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
401 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
402
403 // Reinterpret weight data as [ H, W, I, M ]
404 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
405 weightsOperand->dimensions[2],
406 inputInfo.GetShape()[channelsIndex],
407 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
408
409 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
410 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
411
412 const ConstTensorPin weightsPin =
413 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
414 1,
415 model,
416 data,
417 HWIMToMIHW,
418 &weightsShape);
419
420 // Bias is a 1D tensor
421 const ConstTensorPin biasPin =
422 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
423
424 if (!weightsPin.IsValid())
425 {
426 return Fail("%s: Operation has invalid weights", __func__);
427 }
428
429 if (!biasPin.IsValid())
430 {
431 return Fail("%s: Operation has invalid biases", __func__);
432 }
433
434 armnn::ConstTensor weights = weightsPin.GetConstTensor();
435 armnn::ConstTensor bias = biasPin.GetConstTensor();
436 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
437
438 ActivationFn activation;
439
440 if (implicitPadding)
441 {
442 android::nn::PaddingScheme paddingScheme;
443 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
444 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
445 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
446 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
447 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
448 {
449 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
450 }
451
452 const uint32_t kernelX = weights.GetShape()[3];
453 const uint32_t kernelY = weights.GetShape()[2];
454 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
455 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
456
Mike Kelly86b36d42019-07-12 16:39:33 +0100457 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
458 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100459 }
460 else if (operation.inputs.size() >= 11)
461 {
462 // explicit padding
463 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
464 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
465 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
466 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
467 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
468 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
469 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
470 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
471 {
472 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
473 }
474 }
475 else
476 {
477 return Fail("%s: Unsupported number of operation inputs", __func__);
478 }
479
480 desc.m_BiasEnabled = true;
481 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
482
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100483 bool isSupported = false;
484 FORWARD_LAYER_SUPPORT_FUNC(__func__,
485 IsDepthwiseConvolutionSupported,
486 data.m_Backends,
487 isSupported,
488 inputInfo,
489 outputInfo,
490 desc,
491 weights.GetInfo(),
492 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100493
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100494 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100495 {
496 return false;
497 }
498
499 armnn::IConnectableLayer* startLayer =
500 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100501
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100502 if (!startLayer)
503 {
504 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
505 }
506
507 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
508 if (!endLayer)
509 {
510 return Fail("%s: ProcessActivation failed", __func__);
511 }
512
513 input.Connect(startLayer->GetInputSlot(0));
514
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100515 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100516}
517
Sadik Armagan15d63e22019-07-26 16:59:35 +0100518bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
519{
520 ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
521 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
522}
523
524bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
525{
526 ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
527 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
528}
529
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100530bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
531{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100532 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
533
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100534 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
535 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
536
537 if (!input0.IsValid() || !input1.IsValid())
538 {
539 return Fail("%s: Operation has invalid inputs", __func__);
540 }
541
542 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
543 if (!outputOperand)
544 {
545 return Fail("%s: Could not read output", __func__);
546 }
547
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100548 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100549 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100550 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100551 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100552 }
553
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100554 bool isSupported = false;
555 FORWARD_LAYER_SUPPORT_FUNC(__func__,
556 IsMaximumSupported,
557 data.m_Backends,
558 isSupported,
559 input0.GetTensorInfo(),
560 input1.GetTensorInfo(),
561 outInfo);
562
563 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100564 {
565 return false;
566 }
567
568 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
569 assert(layer != nullptr);
570 BroadcastTensor(input0, input1, layer, *data.m_Network);
571
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100572 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100573}
574
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100575bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
576{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100577 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
578
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100579 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
580 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
581
582 if (!input0.IsValid() || !input1.IsValid())
583 {
584 return Fail("%s: Operation has invalid inputs", __func__);
585 }
586
587 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
588 if (!output)
589 {
590 return Fail("%s: Could not read output 0", __func__);
591 }
592
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100593 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100594 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100595 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100596 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100597 }
598
599 bool isSupported = false;
600 FORWARD_LAYER_SUPPORT_FUNC(__func__,
601 IsMinimumSupported,
602 data.m_Backends,
603 isSupported,
604 input0.GetTensorInfo(),
605 input1.GetTensorInfo(),
606 outputInfo);
607
608 if (!isSupported)
609 {
610 return false;
611 }
612
613 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
614 assert(layer != nullptr);
615 BroadcastTensor(input0, input1, layer, *data.m_Network);
616
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100617 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100618}
619
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100620bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
621{
622 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
623 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
624}
625
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100626bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
627{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100628 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
629
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100630 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
631 if (!input.IsValid())
632 {
633 return Fail("%s: Could not read input 0", __func__);
634 }
635
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100636 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
637 if (!output)
638 {
639 return Fail("%s: Could not read output", __func__);
640 }
641
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100642 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
643 unsigned int rank = inputInfo.GetNumDimensions();
644
645 armnn::PadDescriptor descriptor;
646 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
647 {
648 return Fail("%s: Could not convert paddings", __func__);
649 }
650
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100651 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100652 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100653 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100654 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100655 }
656
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100657 // Determine type of padding value
658 OperandType operandType0;
659 OperandType operandType2;
660
661 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
662 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
663 {
664 return Fail("%s: Operation has invalid inputs", __func__);
665 }
666
667 // Read value to use for padding
668 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
669 {
670 armnn::Half f16PadValue;
671 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
672 {
673 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
674 }
675
676 descriptor.m_PadValue = f16PadValue;
677 }
678 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
679 {
680 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
681 {
682 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
683 }
684 }
685 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
686 {
Mike Kelly3c673942019-07-25 09:26:06 +0100687 int32_t intPadValue = 0;
688 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100689 {
690 return Fail("%s: Could not read input 2 (INT32)", __func__);
691 }
Mike Kelly3c673942019-07-25 09:26:06 +0100692 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100693 }
694 else
695 {
696 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
697 }
698
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100699 bool isSupported = false;
700 FORWARD_LAYER_SUPPORT_FUNC(__func__,
701 IsPadSupported,
702 data.m_Backends,
703 isSupported,
704 inputInfo,
705 outputInfo,
706 descriptor);
707 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100708 {
709 return false;
710 }
711
712 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
713 assert(layer != nullptr);
714 input.Connect(layer->GetInputSlot(0));
715 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
716
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100717 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100718}
719
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100720bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
721{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100722 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
723
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100724 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
725 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
726
727 if (!input.IsValid() || !alpha.IsValid())
728 {
729 return Fail("%s: Operation has invalid inputs", __func__);
730 }
731
732 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
733
734 if (!output)
735 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100736 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100737 }
738
739 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
740 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100741 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100742
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100743 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100744 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100745 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100746 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100747
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100748 bool isSupported = false;
749 FORWARD_LAYER_SUPPORT_FUNC(__func__,
750 IsPreluSupported,
751 data.m_Backends,
752 isSupported,
753 inputInfo,
754 alphaInfo,
755 outputInfo);
756 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100757 {
758 return false;
759 }
760
761 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
762
763 if (!layer)
764 {
765 return Fail("%s: AddPreluLayer failed", __func__);
766 }
767
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100768 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100769
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100770 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100771}
772
Sadik Armagan5a476a82019-07-30 09:43:18 +0100773bool HalPolicy::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
774{
775 ALOGV("hal_1_2::HalPolicy::ConvertQuantize()");
776
777 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
778 if (!input.IsValid())
779 {
780 return Fail("%s: Operation has invalid input", __func__);
781 }
782
783 const Operand* const outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
784 if (!outputOperand)
785 {
786 return Fail("%s: Operation has invalid outputs", __func__);
787 }
788
789 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
790 if (IsDynamicTensor(outputInfo))
791 {
792 return Fail("%s: Dynamic output tensors are not supported", __func__);
793 }
794
795 bool isSupported = false;
796 FORWARD_LAYER_SUPPORT_FUNC(__func__,
797 IsQuantizeSupported,
798 data.m_Backends,
799 isSupported,
800 input.GetTensorInfo(),
801 outputInfo);
802 if (!isSupported)
803 {
804 return false;
805 }
806
807 armnn::IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
808 assert(layer != nullptr);
809 input.Connect(layer->GetInputSlot(0));
810
811 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
812}
813
Sadik Armagan61113162019-07-25 09:09:40 +0100814bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
815{
816 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
817 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
818}
819
820bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
821{
822 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
823 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
824}
825
826bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
827{
828 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
829 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
830}
831
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100832bool HalPolicy::ConvertResize(const Operation& operation,
833 const Model& model,
834 ConversionData& data,
835 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100836{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100837 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
838
839 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100840 if (!input.IsValid())
841 {
842 return Fail("%s: Could not read input 0", __func__);
843 }
844
845 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
846 if (!output)
847 {
848 return Fail("%s: Could not read output 0", __func__);
849 }
850
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100851 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
852 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
853
854 if (IsDynamicTensor(outputInfo))
855 {
856 return Fail("%s: Dynamic output tensors are not supported", __func__);
857 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100858
859 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100860 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100861 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
862
863 OperandType operandType1;
864 OperandType operandType2;
865
866 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
867 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
868 {
869 return Fail("%s: Operation has invalid inputs", __func__);
870 }
871
872 if (operandType1 != operandType2)
873 {
874 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
875 }
876
877 if (operandType1 == OperandType::INT32)
878 {
879 // Case 1: resizing by shape
880 int32_t targetWidth = 0;
881 int32_t targetHeight = 0;
882
883 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
884 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
885 {
886 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
887 }
888
889 if (targetWidth < 0 || targetHeight < 0)
890 {
891 return Fail("%s: Operation has invalid inputs for resizing by shape. "
892 "Target width/height cannot be < 0", __func__);
893 }
894
895 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100896 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100897 }
898 else if (operandType1 == OperandType::FLOAT32)
899 {
900 // Case 2: resizing by scale
901 float widthScale = 1.0f;
902 float heightScale = 1.0f;
903
904 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
905 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
906 {
907 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
908 }
909
910 const armnn::TensorShape& inputShape = inputInfo.GetShape();
911 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
912
913 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
914 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
915
916 descriptor.m_TargetWidth = std::floor(width * widthScale);
917 descriptor.m_TargetHeight = std::floor(height * heightScale);
918 }
919 else
920 {
921 // NOTE: FLOAT16 scales are not supported
922 return false;
923 }
924
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100925 bool isSupported = false;
926 FORWARD_LAYER_SUPPORT_FUNC(__func__,
927 IsResizeSupported,
928 data.m_Backends,
929 isSupported,
930 inputInfo,
931 outputInfo,
932 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100933
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100934 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100935 {
936 return false;
937 }
938
939 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
940
941 assert(layer != nullptr);
942
943 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
944 input.Connect(layer->GetInputSlot(0));
945
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100946 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100947}
948
Finn Williamsd74c5052019-07-30 17:06:00 +0100949bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
950{
951 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToBatchNd()");
952 return ::ConvertSpaceToBatchNd<hal_1_2::HalPolicy>(operation, model, data);
953}
954
Keith Davisa6bc52f2019-06-26 09:39:49 +0100955bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
956{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100957 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100958
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100959 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100960 if (!input.IsValid() )
961 {
962 return Fail("%s: Operation has invalid inputs", __func__);
963 }
964
965 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
966 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +0100967 if (rank != 4)
968 {
969 return Fail("%s: Only inputs with rank 4 are supported", __func__);
970 }
971
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100972 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
973 if (!output)
974 {
975 return Fail("%s: Could not read output 0", __func__);
976 }
977
978 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
979 if (IsDynamicTensor(outputInfo))
980 {
981 return Fail("%s: Dynamic output tensors are not supported", __func__);
982 }
983
Keith Davisa6bc52f2019-06-26 09:39:49 +0100984 armnn::SpaceToDepthDescriptor desc;
985
986 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
987
988 if (desc.m_BlockSize <= 1)
989 {
990 return Fail("%s: Block size must be at least 1 in all dimensions");
991 }
992
993 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
994
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100995 bool isSupported = false;
996 FORWARD_LAYER_SUPPORT_FUNC(__func__,
997 IsSpaceToDepthSupported,
998 data.m_Backends,
999 isSupported,
1000 inputInfo,
1001 outputInfo,
1002 desc);
1003 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +01001004 {
1005 return false;
1006 }
1007
1008 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
1009 assert(layer != nullptr);
1010 input.Connect(layer->GetInputSlot(0));
1011
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001012 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +01001013}
1014
Francis Murtagh074c25a2019-07-22 16:40:57 +01001015bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1016{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001017 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
1018
Francis Murtagh074c25a2019-07-22 16:40:57 +01001019 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1020 if (!input.IsValid())
1021 {
1022 return Fail("%s: Operation has invalid inputs", __func__);
1023 }
1024
1025 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1026 if (!outputOperand)
1027 {
1028 return Fail("%s: Operation has no outputs", __func__);
1029 }
1030
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001031 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001032 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +01001033 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001034 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001035 }
1036
1037 armnn::SoftmaxDescriptor desc;
1038 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
1039 {
1040 return Fail("%s: Operation has invalid inputs", __func__);
1041 }
1042
1043 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
1044 2,
1045 HalPolicy::OperandType::INT32,
1046 desc.m_Axis,
1047 model,
1048 data))
1049 {
1050 return Fail("%s: Operation has invalid inputs", __func__);
1051 }
1052
1053 bool isSupported = false;
1054 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1055 IsSoftmaxSupported,
1056 data.m_Backends,
1057 isSupported,
1058 input.GetTensorInfo(),
1059 outputInfo,
1060 desc);
1061 if (!isSupported)
1062 {
1063 return false;
1064 }
1065
1066 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1067 assert(layer != nullptr);
1068 input.Connect(layer->GetInputSlot(0));
1069
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001070 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001071}
1072
Mike Kelly0a879362019-07-29 16:56:31 +01001073bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
1074{
1075 ALOGV("hal_1_2::HalPolicy::ConvertSub()");
1076 return ::ConvertSub<hal_1_2::HalPolicy>(operation, model, data);
1077}
1078
Sadik Armagan61113162019-07-25 09:09:40 +01001079bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1080{
1081 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1082 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1083}
1084
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001085bool HalPolicy::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
1086{
1087 // Inputs:
1088 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1089 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1090 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1091 if (!input.IsValid())
1092 {
1093 return Fail("%s: Could not read input 0: input", __func__);
1094 }
1095 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1096 LayerInputHandle outputStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 18, model, data);
1097 if (!outputStateIn.IsValid())
1098 {
1099 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1100 }
1101 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1102 LayerInputHandle cellStateIn = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 19, model, data);
1103 if (!cellStateIn.IsValid())
1104 {
1105 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1106 }
1107
1108 // Get the mandatory input tensors:
1109 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1110 // [num_units, input_size].
1111 const ConstTensorPin inputToForgetWeightsPin =
1112 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1113 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1114 // [num_units, input_size].
1115 const ConstTensorPin inputToCellWeightsPin =
1116 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 3, model, data);
1117 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1118 // [num_units, input_size].
1119 const ConstTensorPin inputToOutputWeightsPin =
1120 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 4, model, data);
1121 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1122 // [num_units, output_size].
1123 const ConstTensorPin recurrentToForgetWeightsPin =
1124 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 6, model, data);
1125 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1126 // [num_units, output_size].
1127 const ConstTensorPin recurrentToCellWeightsPin =
1128 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 7, model, data);
1129 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1130 // [num_units, output_size].
1131 const ConstTensorPin recurrentToOutputWeightsPin =
1132 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 8, model, data);
1133 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1134 const ConstTensorPin forgetGateBiasPin =
1135 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 13, model, data);
1136 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1137 const ConstTensorPin cellBiasPin =
1138 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 14, model, data);
1139 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1140 const ConstTensorPin outputGateBiasPin =
1141 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 15, model, data);
1142
1143 if (!inputToForgetWeightsPin.IsValid() ||
1144 !inputToCellWeightsPin.IsValid() ||
1145 !inputToOutputWeightsPin.IsValid() ||
1146 !recurrentToForgetWeightsPin.IsValid() ||
1147 !recurrentToCellWeightsPin.IsValid() ||
1148 !recurrentToOutputWeightsPin.IsValid() ||
1149 !forgetGateBiasPin.IsValid() ||
1150 !cellBiasPin.IsValid() ||
1151 !outputGateBiasPin.IsValid())
1152 {
1153 return Fail("%s: Operation has invalid tensor inputs", __func__);
1154 }
1155
1156 // Get the optional input tensors:
1157 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1158 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1159 const ConstTensorPin inputToInputWeightsPin =
1160 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1161 1,
1162 model,
1163 data,
1164 g_DontPermute,
1165 nullptr,
1166 true);
1167
1168 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1169 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1170 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1171 const ConstTensorPin recurrentToInputWeightsPin =
1172 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1173 5,
1174 model,
1175 data,
1176 g_DontPermute,
1177 nullptr,
1178 true);
1179
1180 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1181 const ConstTensorPin cellToInputWeightsPin =
1182 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1183 9,
1184 model,
1185 data,
1186 g_DontPermute,
1187 nullptr,
1188 true);
1189
1190 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1191 const ConstTensorPin cellToForgetWeightsPin =
1192 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1193 10,
1194 model,
1195 data,
1196 g_DontPermute,
1197 nullptr,
1198 true);
1199
1200 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1201 const ConstTensorPin cellToOutputWeightsPin =
1202 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1203 11,
1204 model,
1205 data,
1206 g_DontPermute,
1207 nullptr,
1208 true);
1209
1210 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1211 const ConstTensorPin inputGateBiasPin =
1212 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1213 12,
1214 model,
1215 data,
1216 g_DontPermute,
1217 nullptr,
1218 true);
1219
1220 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1221 // [output_size, num_units].
1222 const ConstTensorPin projectionWeightsPin =
1223 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1224 16,
1225 model,
1226 data,
1227 g_DontPermute,
1228 nullptr,
1229 true);
1230
1231 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1232 const ConstTensorPin projectionBiasPin =
1233 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1234 17,
1235 model,
1236 data,
1237 g_DontPermute,
1238 nullptr,
1239 true);
1240
1241 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
1242 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
1243 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
1244 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
1245 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
1246 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
1247 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
1248 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
1249 {
1250 return Fail("%s: Operation has invalid tensor inputs", __func__);
1251 }
1252
1253 // Get the mandatory input scalars (actually 1-D tensors of size 1):
1254 // 20: The activation function: A value indicating the activation function:
1255 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1256 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1257 // If set to 0.0 then clipping is disabled.
1258 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1259 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1260 ActivationFn activation;
1261 float cellClip;
1262 float projClip;
1263 if (!GetInputActivationFunctionFromTensor<hal_1_2::HalPolicy>(operation, 20, activation, model, data) ||
1264 !GetInputScalar<hal_1_2::HalPolicy>(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
1265 !GetInputScalar<hal_1_2::HalPolicy>(operation, 22, OperandType::FLOAT32, projClip, model, data))
1266 {
1267 return Fail("%s: Operation has invalid scalar inputs", __func__);
1268 }
1269
1270 // Get the normalization tensors
1271 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
1272 // Used to rescale normalized inputs to activation at input gate.
1273 const ConstTensorPin inputLayerNormWeightsPin =
1274 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1275 23,
1276 model,
1277 data,
1278 g_DontPermute,
1279 nullptr,
1280 true);
1281
1282 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
1283 // Used to rescale normalized inputs to activation at forget gate.
1284 const ConstTensorPin forgetLayerNormWeightsPin =
1285 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1286 24,
1287 model,
1288 data,
1289 g_DontPermute,
1290 nullptr,
1291 true);
1292
1293 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
1294 // Used to rescale normalized inputs to activation at cell gate.
1295 const ConstTensorPin cellLayerNormWeightsPin =
1296 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1297 25,
1298 model,
1299 data,
1300 g_DontPermute,
1301 nullptr,
1302 true);
1303
1304 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
1305 // Used to rescale normalized inputs to activation at output gate.
1306 const ConstTensorPin outputLayerNormWeightsPin =
1307 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
1308 26,
1309 model,
1310 data,
1311 g_DontPermute,
1312 nullptr,
1313 true);
1314
1315 // Outputs:
1316 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
1317 // with CIFG, or [batch_size, num_units * 3] without CIFG.
1318 const Operand* scratchBuffer = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1319 if (!scratchBuffer)
1320 {
1321 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
1322 }
1323 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1324 const Operand* outputStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1325 if (!outputStateOut)
1326 {
1327 return Fail("%s: Could not read output 1: outputStateOut", __func__);
1328 }
1329 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1330 const Operand* cellStateOut = GetOutputOperand<hal_1_2::HalPolicy>(operation, 2, model);
1331 if (!cellStateOut)
1332 {
1333 return Fail("%s: Could not read output 2: cellStateOut", __func__);
1334 }
1335 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1336 // effectively the same as the current “output state (out)” value.
1337 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 3, model);
1338 if (!output)
1339 {
1340 return Fail("%s: Could not read output 3: output", __func__);
1341 }
1342
1343 // set the params structure for the AddLstmLayer call
1344 armnn::LstmInputParams params;
1345 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1346 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1347 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1348 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1349 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1350 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1351 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1352 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1353 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
1354 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
1355 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
1356 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1357 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1358 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1359 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1360 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
1361 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
1362 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
1363 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
1364 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
1365 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
1366
1367 // set the layer descriptor
1368 armnn::LstmDescriptor desc;
1369 desc.m_ActivationFunc = activation;
1370 desc.m_ClippingThresCell = cellClip;
1371 desc.m_ClippingThresProj = projClip;
1372 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
1373 params.m_RecurrentToInputWeights == nullptr ||
1374 params.m_InputGateBias == nullptr);
1375 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
1376 params.m_CellToOutputWeights != nullptr);
1377 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
1378 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
1379 params.m_ForgetLayerNormWeights != nullptr ||
1380 params.m_CellLayerNormWeights != nullptr ||
1381 params.m_OutputLayerNormWeights != nullptr);
1382
1383 // validate the optional input groups
1384 if (desc.m_CifgEnabled &&
1385 (params.m_InputToInputWeights != nullptr ||
1386 params.m_RecurrentToInputWeights != nullptr ||
1387 params.m_InputGateBias != nullptr))
1388 {
1389 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
1390 " and input gate bias must be provided", __func__);
1391 }
1392
1393 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
1394 {
1395 return Fail("%s: projection bias should not be provided without projection weights", __func__);
1396 }
1397
1398 if (desc.m_PeepholeEnabled &&
1399 (params.m_CellToForgetWeights == nullptr ||
1400 params.m_CellToOutputWeights == nullptr ||
1401 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
1402 {
1403 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
1404 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
1405 }
1406
1407 if (desc.m_LayerNormEnabled &&
1408 (params.m_ForgetLayerNormWeights == nullptr ||
1409 params.m_CellLayerNormWeights == nullptr ||
1410 params.m_OutputLayerNormWeights == nullptr ||
1411 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
1412 {
1413 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
1414 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
1415 }
1416
1417 // Check if the layer is supported
1418 // Inputs
1419 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1420 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
1421 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
1422
1423 // Outputs
1424 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
1425 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
1426 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1427 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1428
Ferran Balaguera4a629a2019-07-30 10:16:13 +01001429 if (IsDynamicTensor(scratchBufferInfo) ||
1430 IsDynamicTensor(outputStateOutInfo) ||
1431 IsDynamicTensor(cellStateOutInfo) ||
1432 IsDynamicTensor(outputInfo))
1433 {
1434 return Fail("%s: Dynamic output tensors are not supported", __func__);
1435 }
1436
Ferran Balaguerb2397fd2019-07-25 12:12:39 +01001437 // Basic parameters
1438 armnn::LstmInputParamsInfo paramsInfo;
1439 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
1440 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
1441 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
1442 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
1443 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
1444 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
1445 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
1446 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
1447 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
1448
1449 // Optional parameters
1450 if(!desc.m_CifgEnabled)
1451 {
1452 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1453 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1454 if (params.m_CellToInputWeights != nullptr)
1455 {
1456 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1457 }
1458 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
1459 }
1460
1461 if(desc.m_ProjectionEnabled)
1462 {
1463 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
1464 if (params.m_ProjectionBias != nullptr)
1465 {
1466 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
1467 }
1468 }
1469
1470 if(desc.m_PeepholeEnabled)
1471 {
1472 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1473 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1474 }
1475
1476 if (desc.m_LayerNormEnabled)
1477 {
1478 if(!desc.m_CifgEnabled)
1479 {
1480 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
1481 }
1482 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
1483 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
1484 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
1485 }
1486
1487 bool isSupported = false;
1488 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1489 IsLstmSupported,
1490 data.m_Backends,
1491 isSupported,
1492 inputInfo,
1493 outputStateInInfo,
1494 cellStateInInfo,
1495 scratchBufferInfo,
1496 outputStateOutInfo,
1497 cellStateOutInfo,
1498 outputInfo,
1499 desc,
1500 paramsInfo);
1501 if (!isSupported)
1502 {
1503 return false;
1504 }
1505
1506 // Add the layer
1507 armnn::IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
1508
1509 input.Connect(layer->GetInputSlot(0));
1510 outputStateIn.Connect(layer->GetInputSlot(1));
1511 cellStateIn.Connect(layer->GetInputSlot(2));
1512
1513 return (SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, 0, model, data) &&
1514 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 1, *layer, 1, model, data) &&
1515 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 2, *layer, 2, model, data) &&
1516 SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 3, *layer, 3, model, data));
1517}
1518
Aron Virginas-Tar8b991682019-07-31 12:54:59 +01001519bool HalPolicy::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
David Monahan613b49c2019-06-27 11:37:47 +01001520{
1521 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1522
1523 if (!input.IsValid())
1524 {
1525 return Fail("%s: Operation has invalid inputs", __func__);
1526 }
1527
1528 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1529
1530 if (!output)
1531 {
1532 return Fail("%s: Could not read output 0", __func__);
1533 }
1534
1535 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1536 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1537 if (IsDynamicTensor(outputInfo))
1538 {
1539 return Fail("%s: Dynamic output tensors are not supported", __func__);
1540 }
1541
1542 // ArmNN does not currently support non-fixed weights or bias
1543 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1544 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
1545
1546 if (weightsOperand == nullptr)
1547 {
1548 return Fail("%s: Operand is invalid", __func__);
1549 }
1550 armnn::TransposeConvolution2dDescriptor desc;
1551 desc.m_DataLayout = armnn::DataLayout::NHWC;
1552
1553 // Determine whether padding is implicit or explicit
1554 bool implicitPadding = operation.inputs.size() == 9;
1555
1556 if (implicitPadding )
1557 {
1558 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 8, model, data);
1559 }
1560 else
1561 {
1562 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
1563 }
1564
1565 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1566 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1567 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1568
1569 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
1570
1571 // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
1572 // We have to permute it to OIHW if the data layout is NCHW.
1573 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
1574 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
1575 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
1576
1577 // Bias is a 1D tensor
1578 const ConstTensorPin biasPin =
1579 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
1580
1581 if (!weightsPin.IsValid())
1582 {
1583 return Fail("%s: Operation has invalid weights", __func__);
1584 }
1585
1586 if (!biasPin.IsValid())
1587 {
1588 return Fail("%s: Operation has invalid biases", __func__);
1589 }
1590
1591 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1592 armnn::ConstTensor bias = biasPin.GetConstTensor();
1593 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1594
1595 ActivationFn activation;
1596
1597 if (implicitPadding)
1598 {
1599 android::nn::PaddingScheme paddingScheme;
1600 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 4, paddingScheme, model, data) ||
1601 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideX, model, data) ||
1602 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_StrideY, model, data) ||
1603 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data))
1604 {
1605 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1606 }
1607
1608 const uint32_t kernelX = weights.GetShape()[widthIndex];
1609 const uint32_t kernelY = weights.GetShape()[heightIndex];
1610 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1611 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1612
1613 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1614 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1615 }
1616 else if (operation.inputs.size() == 11)
1617 {
1618 // explicit padding
1619 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
1620 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
1621 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
1622 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
1623 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
1624 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
1625 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data))
1626 {
1627 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1628 }
1629 }
1630 else
1631 {
1632 return Fail("%s: Unsupported number of operation inputs", __func__);
1633 }
1634
1635 desc.m_BiasEnabled = true;
1636 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1637
1638 bool isSupported = false;
1639 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1640 IsTransposeConvolution2dSupported,
1641 data.m_Backends,
1642 isSupported,
1643 inputInfo,
1644 outputInfo,
1645 desc,
1646 weights.GetInfo(),
1647 biases);
1648 if (!isSupported)
1649 {
1650 return false;
1651 }
1652
1653 armnn::IConnectableLayer* startLayer =
1654 data.m_Network->AddTransposeConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1655 if (!startLayer)
1656 {
1657 return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
1658 }
1659
1660 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1661 if (!endLayer)
1662 {
1663 return Fail("%s: ProcessActivation failed", __func__);
1664 }
1665
1666 input.Connect(startLayer->GetInputSlot(0));
1667
1668 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
1669}
1670
Mike Kellyb5fdf382019-06-11 16:35:25 +01001671} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001672} // namespace armnn_driver