blob: 9cad29fae16e2770a35fe029319743542855845d [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
Mike Kellyb5fdf382019-06-11 16:35:25 +010028 case V1_0::OperationType::CONCATENATION:
29 case V1_0::OperationType::DEPTH_TO_SPACE:
30 case V1_0::OperationType::DEQUANTIZE:
31 case V1_0::OperationType::EMBEDDING_LOOKUP:
32 case V1_0::OperationType::FLOOR:
33 case V1_0::OperationType::FULLY_CONNECTED:
34 case V1_0::OperationType::HASHTABLE_LOOKUP:
35 case V1_0::OperationType::L2_NORMALIZATION:
Mike Kellyb5fdf382019-06-11 16:35:25 +010036 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
37 case V1_0::OperationType::LOGISTIC:
38 case V1_0::OperationType::LSH_PROJECTION:
39 case V1_0::OperationType::LSTM:
Mike Kellyb5fdf382019-06-11 16:35:25 +010040 case V1_0::OperationType::MUL:
Mike Kellyb5fdf382019-06-11 16:35:25 +010041 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010042 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010043 case V1_0::OperationType::SVDF:
Mike Kellyb5fdf382019-06-11 16:35:25 +010044 case V1_0::OperationType::OEM_OPERATION:
45 return true;
46 default:
47 return false;
48 }
49}
50
51bool HandledByV1_1(V1_2::OperationType operationType)
52{
53 if (HandledByV1_0(operationType))
54 {
55 return true;
56 }
57 switch (static_cast<V1_1::OperationType>(operationType))
58 {
59 case V1_1::OperationType::BATCH_TO_SPACE_ND:
60 case V1_1::OperationType::DIV:
61 case V1_1::OperationType::MEAN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010062 case V1_1::OperationType::SPACE_TO_BATCH_ND:
63 case V1_1::OperationType::SQUEEZE:
64 case V1_1::OperationType::STRIDED_SLICE:
65 case V1_1::OperationType::SUB:
66 case V1_1::OperationType::TRANSPOSE:
67 return true;
68 default:
69 return false;
70 }
71}
72
73bool HandledByV1_0(const V1_2::Operation& operation)
74{
75 return HandledByV1_0(operation.type);
76}
77
78bool HandledByV1_1(const V1_2::Operation& operation)
79{
80 return HandledByV1_1(operation.type);
81}
82
83V1_0::OperationType CastToV1_0(V1_2::OperationType type)
84{
85 return static_cast<V1_0::OperationType>(type);
86}
87
88V1_1::OperationType CastToV1_1(V1_2::OperationType type)
89{
90 return static_cast<V1_1::OperationType>(type);
91}
92
93V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
94{
95 V1_0::Operation op;
96 op.type = CastToV1_0(operation.type);
97 op.inputs = operation.inputs;
98 op.outputs = operation.outputs;
99 return op;
100}
101
102V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
103{
104 V1_1::Operation op;
105 op.type = CastToV1_1(operation.type);
106 op.inputs = operation.inputs;
107 op.outputs = operation.outputs;
108 return op;
109}
110
111bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
112{
113 if (HandledByV1_0(operation) && compliantWithV1_0(model))
114 {
115 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
116 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
117
118 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
119 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100120
121 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100122 {
123 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
124 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
125
126 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
127 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100128
Mike Kellyb5fdf382019-06-11 16:35:25 +0100129 switch (operation.type)
130 {
Sadik Armagan15d63e22019-07-26 16:59:35 +0100131 case V1_2::OperationType::AVERAGE_POOL_2D:
132 return ConvertAveragePool2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100133 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100134 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100135 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100136 return ConvertDepthwiseConv2d(operation, model, data);
Sadik Armagan15d63e22019-07-26 16:59:35 +0100137 case V1_2::OperationType::L2_POOL_2D:
138 return ConvertL2Pool2d(operation, model, data);
139 case V1_2::OperationType::MAX_POOL_2D:
140 return ConvertMaxPool2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100141 case V1_2::OperationType::MAXIMUM:
142 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100143 case V1_2::OperationType::MINIMUM:
144 return ConvertMinimum(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +0100145 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100146 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100147 case V1_2::OperationType::PAD_V2:
148 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100149 case V1_2::OperationType::PRELU:
150 return ConvertPrelu(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100151 case V1_2::OperationType::RELU:
152 return ConvertReLu(operation, model, data);
153 case V1_2::OperationType::RELU1:
154 return ConvertReLu1(operation, model, data);
155 case V1_2::OperationType::RELU6:
156 return ConvertReLu6(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100157 case V1_2::OperationType::RESIZE_BILINEAR:
158 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100159 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100160 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100161 case V1_2::OperationType::SOFTMAX:
162 return ConvertSoftmax(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100163 case V1_2::OperationType::SPACE_TO_DEPTH:
164 return ConvertSpaceToDepth(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100165 case V1_2::OperationType::TANH:
166 return ConvertTanH(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100167 default:
168 return Fail("%s: Operation type %s not supported in ArmnnDriver",
169 __func__, toString(operation.type).c_str());
170 }
171}
172
Sadik Armagan15d63e22019-07-26 16:59:35 +0100173bool HalPolicy::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
174{
175 ALOGV("hal_1_2::HalPolicy::ConvertAveragePool2d()");
176 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Average, model, data);
177}
178
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100179bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
180{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100181 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
182
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100183 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
184 if (!input.IsValid())
185 {
186 return Fail("%s: Operation has invalid inputs", __func__);
187 }
188
189 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
190 if (!output)
191 {
192 return Fail("%s: Could not read output 0", __func__);
193 }
194
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100195 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
196 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
197
198 if (IsDynamicTensor(outputInfo))
199 {
200 return Fail("%s: Dynamic output tensors are not supported", __func__);
201 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100202
Mike Kellye1d60bb2019-07-11 11:44:52 +0100203 armnn::Convolution2dDescriptor desc;
204 desc.m_DataLayout = armnn::DataLayout::NHWC;
205
206 // Determine whether padding is implicit or explicit
207 bool implicitPadding = operation.inputs.size() == 7 ||
208 (operation.inputs.size() >= 8 &&
209 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
210
211 if (implicitPadding)
212 {
213 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
214 }
215 else if (operation.inputs.size() >= 10)
216 {
217 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
218 }
219
220 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
221
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100222 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100223 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
224 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
225 // the DataLayout is NCHW
226 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
227 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
228 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100229 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100230 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100231
232 if (!weightsPin.IsValid())
233 {
234 return Fail("%s: Operation has invalid weights", __func__);
235 }
236
237 if (!biasPin.IsValid())
238 {
239 return Fail("%s: Operation has invalid biases", __func__);
240 }
241
242 armnn::ConstTensor weights = weightsPin.GetConstTensor();
243 armnn::ConstTensor bias = biasPin.GetConstTensor();
244 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
245
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100246 ActivationFn activation;
247
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100248 if (implicitPadding)
249 {
250 android::nn::PaddingScheme paddingScheme;
251 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
252 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
253 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
254 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
255 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
256 {
257 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
258 }
259
Mike Kellye1d60bb2019-07-11 11:44:52 +0100260 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
261 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
262 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
263 const uint32_t kernelX = weights.GetShape()[widthIndex];
264 const uint32_t kernelY = weights.GetShape()[heightIndex];
265 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
266 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100267
Mike Kelly86b36d42019-07-12 16:39:33 +0100268 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
269 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100270
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100271 }
272 else if (operation.inputs.size() >= 10)
273 {
274 // explicit padding
275 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
276 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
277 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
278 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
279 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
280 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
281 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
282 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
283 {
284 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
285 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100286 }
287 else
288 {
289 return Fail("%s: Unsupported number of operation inputs", __func__);
290 }
291
292 desc.m_BiasEnabled = true;
293 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
294
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100295 bool isSupported = false;
296 FORWARD_LAYER_SUPPORT_FUNC(__func__,
297 IsConvolution2dSupported,
298 data.m_Backends,
299 isSupported,
300 inputInfo,
301 outputInfo,
302 desc,
303 weights.GetInfo(),
304 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100305
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100306 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100307 {
308 return false;
309 }
310
311 armnn::IConnectableLayer* startLayer =
312 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
313
314 if (!startLayer)
315 {
316 return Fail("%s: AddConvolution2dLayer failed", __func__);
317 }
318
319 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
320
321 if (!endLayer)
322 {
323 return Fail("%s: ProcessActivation failed", __func__);
324 }
325
326 input.Connect(startLayer->GetInputSlot(0));
327
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100328 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100329}
330
331bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
332{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100333 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
334
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100335 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
336
337 if (!input.IsValid())
338 {
339 return Fail("%s: Operation has invalid inputs", __func__);
340 }
341
342 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
343
344 if (!output)
345 {
346 return Fail("%s: Could not read output 0", __func__);
347 }
348
349 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100350 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
351
352 if (IsDynamicTensor(outputInfo))
353 {
354 return Fail("%s: Dynamic output tensors are not supported", __func__);
355 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100356
357 // ArmNN does not currently support non-fixed weights or bias
358 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
359 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
360
361 if (weightsOperand == nullptr)
362 {
363 return Fail("%s: Operand is invalid", __func__);
364 }
365 armnn::DepthwiseConvolution2dDescriptor desc;
366 desc.m_DataLayout = armnn::DataLayout::NHWC;
367
368 // Determine whether padding is implicit or explicit
369 bool implicitPadding = operation.inputs.size() == 8 ||
370 (operation.inputs.size() >= 9 &&
371 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
372
373 // Look ahead to find the optional DataLayout, if present
374 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
375 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
376
377 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
378 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
379 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
380 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
381
382 // Reinterpret weight data as [ H, W, I, M ]
383 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
384 weightsOperand->dimensions[2],
385 inputInfo.GetShape()[channelsIndex],
386 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
387
388 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
389 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
390
391 const ConstTensorPin weightsPin =
392 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
393 1,
394 model,
395 data,
396 HWIMToMIHW,
397 &weightsShape);
398
399 // Bias is a 1D tensor
400 const ConstTensorPin biasPin =
401 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
402
403 if (!weightsPin.IsValid())
404 {
405 return Fail("%s: Operation has invalid weights", __func__);
406 }
407
408 if (!biasPin.IsValid())
409 {
410 return Fail("%s: Operation has invalid biases", __func__);
411 }
412
413 armnn::ConstTensor weights = weightsPin.GetConstTensor();
414 armnn::ConstTensor bias = biasPin.GetConstTensor();
415 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
416
417 ActivationFn activation;
418
419 if (implicitPadding)
420 {
421 android::nn::PaddingScheme paddingScheme;
422 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
423 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
424 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
425 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
426 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
427 {
428 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
429 }
430
431 const uint32_t kernelX = weights.GetShape()[3];
432 const uint32_t kernelY = weights.GetShape()[2];
433 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
434 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
435
Mike Kelly86b36d42019-07-12 16:39:33 +0100436 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
437 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100438 }
439 else if (operation.inputs.size() >= 11)
440 {
441 // explicit padding
442 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
443 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
444 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
445 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
446 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
447 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
448 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
449 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
450 {
451 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
452 }
453 }
454 else
455 {
456 return Fail("%s: Unsupported number of operation inputs", __func__);
457 }
458
459 desc.m_BiasEnabled = true;
460 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
461
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100462 bool isSupported = false;
463 FORWARD_LAYER_SUPPORT_FUNC(__func__,
464 IsDepthwiseConvolutionSupported,
465 data.m_Backends,
466 isSupported,
467 inputInfo,
468 outputInfo,
469 desc,
470 weights.GetInfo(),
471 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100472
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100473 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100474 {
475 return false;
476 }
477
478 armnn::IConnectableLayer* startLayer =
479 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100480
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100481 if (!startLayer)
482 {
483 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
484 }
485
486 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
487 if (!endLayer)
488 {
489 return Fail("%s: ProcessActivation failed", __func__);
490 }
491
492 input.Connect(startLayer->GetInputSlot(0));
493
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100494 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100495}
496
Sadik Armagan15d63e22019-07-26 16:59:35 +0100497bool HalPolicy::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
498{
499 ALOGV("hal_1_2::HalPolicy::ConvertL2Pool2d()");
500 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::L2, model, data);
501}
502
503bool HalPolicy::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
504{
505 ALOGV("hal_1_2::HalPolicy::ConvertMaxPool2d()");
506 return ConvertPooling2d<hal_1_2::HalPolicy>(operation, __func__, armnn::PoolingAlgorithm::Max, model, data);
507}
508
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100509bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
510{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100511 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
512
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100513 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
514 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
515
516 if (!input0.IsValid() || !input1.IsValid())
517 {
518 return Fail("%s: Operation has invalid inputs", __func__);
519 }
520
521 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
522 if (!outputOperand)
523 {
524 return Fail("%s: Could not read output", __func__);
525 }
526
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100527 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100528 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100529 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100530 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100531 }
532
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100533 bool isSupported = false;
534 FORWARD_LAYER_SUPPORT_FUNC(__func__,
535 IsMaximumSupported,
536 data.m_Backends,
537 isSupported,
538 input0.GetTensorInfo(),
539 input1.GetTensorInfo(),
540 outInfo);
541
542 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100543 {
544 return false;
545 }
546
547 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
548 assert(layer != nullptr);
549 BroadcastTensor(input0, input1, layer, *data.m_Network);
550
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100551 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100552}
553
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100554bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
555{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100556 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
557
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100558 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
559 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
560
561 if (!input0.IsValid() || !input1.IsValid())
562 {
563 return Fail("%s: Operation has invalid inputs", __func__);
564 }
565
566 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
567 if (!output)
568 {
569 return Fail("%s: Could not read output 0", __func__);
570 }
571
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100572 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100573 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100574 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100575 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100576 }
577
578 bool isSupported = false;
579 FORWARD_LAYER_SUPPORT_FUNC(__func__,
580 IsMinimumSupported,
581 data.m_Backends,
582 isSupported,
583 input0.GetTensorInfo(),
584 input1.GetTensorInfo(),
585 outputInfo);
586
587 if (!isSupported)
588 {
589 return false;
590 }
591
592 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
593 assert(layer != nullptr);
594 BroadcastTensor(input0, input1, layer, *data.m_Network);
595
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100596 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100597}
598
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100599bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
600{
601 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
602 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
603}
604
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100605bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
606{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100607 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
608
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100609 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
610 if (!input.IsValid())
611 {
612 return Fail("%s: Could not read input 0", __func__);
613 }
614
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100615 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
616 if (!output)
617 {
618 return Fail("%s: Could not read output", __func__);
619 }
620
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100621 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
622 unsigned int rank = inputInfo.GetNumDimensions();
623
624 armnn::PadDescriptor descriptor;
625 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
626 {
627 return Fail("%s: Could not convert paddings", __func__);
628 }
629
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100630 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100631 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100632 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100633 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100634 }
635
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100636 // Determine type of padding value
637 OperandType operandType0;
638 OperandType operandType2;
639
640 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
641 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
642 {
643 return Fail("%s: Operation has invalid inputs", __func__);
644 }
645
646 // Read value to use for padding
647 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
648 {
649 armnn::Half f16PadValue;
650 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
651 {
652 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
653 }
654
655 descriptor.m_PadValue = f16PadValue;
656 }
657 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
658 {
659 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
660 {
661 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
662 }
663 }
664 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
665 {
Mike Kelly3c673942019-07-25 09:26:06 +0100666 int32_t intPadValue = 0;
667 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100668 {
669 return Fail("%s: Could not read input 2 (INT32)", __func__);
670 }
Mike Kelly3c673942019-07-25 09:26:06 +0100671 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100672 }
673 else
674 {
675 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
676 }
677
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100678 bool isSupported = false;
679 FORWARD_LAYER_SUPPORT_FUNC(__func__,
680 IsPadSupported,
681 data.m_Backends,
682 isSupported,
683 inputInfo,
684 outputInfo,
685 descriptor);
686 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100687 {
688 return false;
689 }
690
691 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
692 assert(layer != nullptr);
693 input.Connect(layer->GetInputSlot(0));
694 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
695
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100696 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100697}
698
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100699bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
700{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100701 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
702
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100703 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
704 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
705
706 if (!input.IsValid() || !alpha.IsValid())
707 {
708 return Fail("%s: Operation has invalid inputs", __func__);
709 }
710
711 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
712
713 if (!output)
714 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100715 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100716 }
717
718 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
719 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100720 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100721
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100722 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100723 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100724 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100725 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100726
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100727 bool isSupported = false;
728 FORWARD_LAYER_SUPPORT_FUNC(__func__,
729 IsPreluSupported,
730 data.m_Backends,
731 isSupported,
732 inputInfo,
733 alphaInfo,
734 outputInfo);
735 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100736 {
737 return false;
738 }
739
740 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
741
742 if (!layer)
743 {
744 return Fail("%s: AddPreluLayer failed", __func__);
745 }
746
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100747 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100748
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100749 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100750}
751
Sadik Armagan61113162019-07-25 09:09:40 +0100752bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
753{
754 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
755 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
756}
757
758bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
759{
760 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
761 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
762}
763
764bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
765{
766 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
767 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
768}
769
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100770bool HalPolicy::ConvertResize(const Operation& operation,
771 const Model& model,
772 ConversionData& data,
773 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100774{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100775 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
776
777 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100778 if (!input.IsValid())
779 {
780 return Fail("%s: Could not read input 0", __func__);
781 }
782
783 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
784 if (!output)
785 {
786 return Fail("%s: Could not read output 0", __func__);
787 }
788
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100789 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
790 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
791
792 if (IsDynamicTensor(outputInfo))
793 {
794 return Fail("%s: Dynamic output tensors are not supported", __func__);
795 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100796
797 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100798 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100799 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
800
801 OperandType operandType1;
802 OperandType operandType2;
803
804 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
805 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
806 {
807 return Fail("%s: Operation has invalid inputs", __func__);
808 }
809
810 if (operandType1 != operandType2)
811 {
812 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
813 }
814
815 if (operandType1 == OperandType::INT32)
816 {
817 // Case 1: resizing by shape
818 int32_t targetWidth = 0;
819 int32_t targetHeight = 0;
820
821 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
822 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
823 {
824 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
825 }
826
827 if (targetWidth < 0 || targetHeight < 0)
828 {
829 return Fail("%s: Operation has invalid inputs for resizing by shape. "
830 "Target width/height cannot be < 0", __func__);
831 }
832
833 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100834 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100835 }
836 else if (operandType1 == OperandType::FLOAT32)
837 {
838 // Case 2: resizing by scale
839 float widthScale = 1.0f;
840 float heightScale = 1.0f;
841
842 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
843 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
844 {
845 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
846 }
847
848 const armnn::TensorShape& inputShape = inputInfo.GetShape();
849 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
850
851 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
852 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
853
854 descriptor.m_TargetWidth = std::floor(width * widthScale);
855 descriptor.m_TargetHeight = std::floor(height * heightScale);
856 }
857 else
858 {
859 // NOTE: FLOAT16 scales are not supported
860 return false;
861 }
862
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100863 bool isSupported = false;
864 FORWARD_LAYER_SUPPORT_FUNC(__func__,
865 IsResizeSupported,
866 data.m_Backends,
867 isSupported,
868 inputInfo,
869 outputInfo,
870 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100871
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100872 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100873 {
874 return false;
875 }
876
877 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
878
879 assert(layer != nullptr);
880
881 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
882 input.Connect(layer->GetInputSlot(0));
883
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100884 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100885}
886
Keith Davisa6bc52f2019-06-26 09:39:49 +0100887bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
888{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100889 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100890
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100891 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100892 if (!input.IsValid() )
893 {
894 return Fail("%s: Operation has invalid inputs", __func__);
895 }
896
897 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
898 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +0100899 if (rank != 4)
900 {
901 return Fail("%s: Only inputs with rank 4 are supported", __func__);
902 }
903
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100904 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
905 if (!output)
906 {
907 return Fail("%s: Could not read output 0", __func__);
908 }
909
910 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
911 if (IsDynamicTensor(outputInfo))
912 {
913 return Fail("%s: Dynamic output tensors are not supported", __func__);
914 }
915
Keith Davisa6bc52f2019-06-26 09:39:49 +0100916 armnn::SpaceToDepthDescriptor desc;
917
918 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
919
920 if (desc.m_BlockSize <= 1)
921 {
922 return Fail("%s: Block size must be at least 1 in all dimensions");
923 }
924
925 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
926
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100927 bool isSupported = false;
928 FORWARD_LAYER_SUPPORT_FUNC(__func__,
929 IsSpaceToDepthSupported,
930 data.m_Backends,
931 isSupported,
932 inputInfo,
933 outputInfo,
934 desc);
935 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100936 {
937 return false;
938 }
939
940 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
941 assert(layer != nullptr);
942 input.Connect(layer->GetInputSlot(0));
943
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100944 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100945}
946
Francis Murtagh074c25a2019-07-22 16:40:57 +0100947bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
948{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100949 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
950
Francis Murtagh074c25a2019-07-22 16:40:57 +0100951 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
952 if (!input.IsValid())
953 {
954 return Fail("%s: Operation has invalid inputs", __func__);
955 }
956
957 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
958 if (!outputOperand)
959 {
960 return Fail("%s: Operation has no outputs", __func__);
961 }
962
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100963 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100964 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +0100965 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100966 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100967 }
968
969 armnn::SoftmaxDescriptor desc;
970 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
971 {
972 return Fail("%s: Operation has invalid inputs", __func__);
973 }
974
975 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
976 2,
977 HalPolicy::OperandType::INT32,
978 desc.m_Axis,
979 model,
980 data))
981 {
982 return Fail("%s: Operation has invalid inputs", __func__);
983 }
984
985 bool isSupported = false;
986 FORWARD_LAYER_SUPPORT_FUNC(__func__,
987 IsSoftmaxSupported,
988 data.m_Backends,
989 isSupported,
990 input.GetTensorInfo(),
991 outputInfo,
992 desc);
993 if (!isSupported)
994 {
995 return false;
996 }
997
998 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
999 assert(layer != nullptr);
1000 input.Connect(layer->GetInputSlot(0));
1001
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +01001002 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +01001003}
1004
Sadik Armagan61113162019-07-25 09:09:40 +01001005bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1006{
1007 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1008 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1009}
1010
Mike Kellyb5fdf382019-06-11 16:35:25 +01001011} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001012} // namespace armnn_driver