blob: 4ef7ea4f0aa4bc5b3b71039a1629930241397503 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01008#include "OutputShapeUtils.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01009#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +010010
Mike Kellyb5fdf382019-06-11 16:35:25 +010011#include "../1.0/HalPolicy.hpp"
12#include "../1.1/HalPolicy.hpp"
13
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010014#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010015#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010016
17#include <cmath>
18
Mike Kellyb5fdf382019-06-11 16:35:25 +010019namespace armnn_driver
20{
21namespace hal_1_2
22{
23
24bool HandledByV1_0(V1_2::OperationType operationType)
25{
26 switch (static_cast<V1_0::OperationType>(operationType))
27 {
28 case V1_0::OperationType::ADD:
29 case V1_0::OperationType::AVERAGE_POOL_2D:
30 case V1_0::OperationType::CONCATENATION:
31 case V1_0::OperationType::DEPTH_TO_SPACE:
32 case V1_0::OperationType::DEQUANTIZE:
33 case V1_0::OperationType::EMBEDDING_LOOKUP:
34 case V1_0::OperationType::FLOOR:
35 case V1_0::OperationType::FULLY_CONNECTED:
36 case V1_0::OperationType::HASHTABLE_LOOKUP:
37 case V1_0::OperationType::L2_NORMALIZATION:
38 case V1_0::OperationType::L2_POOL_2D:
39 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
40 case V1_0::OperationType::LOGISTIC:
41 case V1_0::OperationType::LSH_PROJECTION:
42 case V1_0::OperationType::LSTM:
43 case V1_0::OperationType::MAX_POOL_2D:
44 case V1_0::OperationType::MUL:
Mike Kellyb5fdf382019-06-11 16:35:25 +010045 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010046 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010047 case V1_0::OperationType::SVDF:
Mike Kellyb5fdf382019-06-11 16:35:25 +010048 case V1_0::OperationType::OEM_OPERATION:
49 return true;
50 default:
51 return false;
52 }
53}
54
55bool HandledByV1_1(V1_2::OperationType operationType)
56{
57 if (HandledByV1_0(operationType))
58 {
59 return true;
60 }
61 switch (static_cast<V1_1::OperationType>(operationType))
62 {
63 case V1_1::OperationType::BATCH_TO_SPACE_ND:
64 case V1_1::OperationType::DIV:
65 case V1_1::OperationType::MEAN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010066 case V1_1::OperationType::SPACE_TO_BATCH_ND:
67 case V1_1::OperationType::SQUEEZE:
68 case V1_1::OperationType::STRIDED_SLICE:
69 case V1_1::OperationType::SUB:
70 case V1_1::OperationType::TRANSPOSE:
71 return true;
72 default:
73 return false;
74 }
75}
76
77bool HandledByV1_0(const V1_2::Operation& operation)
78{
79 return HandledByV1_0(operation.type);
80}
81
82bool HandledByV1_1(const V1_2::Operation& operation)
83{
84 return HandledByV1_1(operation.type);
85}
86
87V1_0::OperationType CastToV1_0(V1_2::OperationType type)
88{
89 return static_cast<V1_0::OperationType>(type);
90}
91
92V1_1::OperationType CastToV1_1(V1_2::OperationType type)
93{
94 return static_cast<V1_1::OperationType>(type);
95}
96
97V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
98{
99 V1_0::Operation op;
100 op.type = CastToV1_0(operation.type);
101 op.inputs = operation.inputs;
102 op.outputs = operation.outputs;
103 return op;
104}
105
106V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
107{
108 V1_1::Operation op;
109 op.type = CastToV1_1(operation.type);
110 op.inputs = operation.inputs;
111 op.outputs = operation.outputs;
112 return op;
113}
114
115bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
116{
117 if (HandledByV1_0(operation) && compliantWithV1_0(model))
118 {
119 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
120 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
121
122 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
123 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100124
125 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100126 {
127 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
128 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
129
130 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
131 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100132
Mike Kellyb5fdf382019-06-11 16:35:25 +0100133 switch (operation.type)
134 {
135 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100136 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100137 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100138 return ConvertDepthwiseConv2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100139 case V1_2::OperationType::MAXIMUM:
140 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100141 case V1_2::OperationType::MINIMUM:
142 return ConvertMinimum(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +0100143 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100144 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100145 case V1_2::OperationType::PAD_V2:
146 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100147 case V1_2::OperationType::PRELU:
148 return ConvertPrelu(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100149 case V1_2::OperationType::RELU:
150 return ConvertReLu(operation, model, data);
151 case V1_2::OperationType::RELU1:
152 return ConvertReLu1(operation, model, data);
153 case V1_2::OperationType::RELU6:
154 return ConvertReLu6(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100155 case V1_2::OperationType::RESIZE_BILINEAR:
156 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100157 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100158 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100159 case V1_2::OperationType::SOFTMAX:
160 return ConvertSoftmax(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100161 case V1_2::OperationType::SPACE_TO_DEPTH:
162 return ConvertSpaceToDepth(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100163 case V1_2::OperationType::TANH:
164 return ConvertTanH(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100165 default:
166 return Fail("%s: Operation type %s not supported in ArmnnDriver",
167 __func__, toString(operation.type).c_str());
168 }
169}
170
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100171bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
172{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100173 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
174
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100175 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
176 if (!input.IsValid())
177 {
178 return Fail("%s: Operation has invalid inputs", __func__);
179 }
180
181 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
182 if (!output)
183 {
184 return Fail("%s: Could not read output 0", __func__);
185 }
186
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100187 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
188 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100189
Mike Kellye1d60bb2019-07-11 11:44:52 +0100190 armnn::Convolution2dDescriptor desc;
191 desc.m_DataLayout = armnn::DataLayout::NHWC;
192
193 // Determine whether padding is implicit or explicit
194 bool implicitPadding = operation.inputs.size() == 7 ||
195 (operation.inputs.size() >= 8 &&
196 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
197
198 if (implicitPadding)
199 {
200 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
201 }
202 else if (operation.inputs.size() >= 10)
203 {
204 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
205 }
206
207 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
208
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100209 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100210 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
211 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
212 // the DataLayout is NCHW
213 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
214 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
215 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100216 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100217 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100218
219 if (!weightsPin.IsValid())
220 {
221 return Fail("%s: Operation has invalid weights", __func__);
222 }
223
224 if (!biasPin.IsValid())
225 {
226 return Fail("%s: Operation has invalid biases", __func__);
227 }
228
229 armnn::ConstTensor weights = weightsPin.GetConstTensor();
230 armnn::ConstTensor bias = biasPin.GetConstTensor();
231 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
232
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100233 ActivationFn activation;
234
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100235 if (implicitPadding)
236 {
237 android::nn::PaddingScheme paddingScheme;
238 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
239 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
240 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
241 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
242 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
243 {
244 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
245 }
246
Mike Kellye1d60bb2019-07-11 11:44:52 +0100247 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
248 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
249 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
250 const uint32_t kernelX = weights.GetShape()[widthIndex];
251 const uint32_t kernelY = weights.GetShape()[heightIndex];
252 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
253 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100254
Mike Kelly86b36d42019-07-12 16:39:33 +0100255 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
256 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100257
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100258 }
259 else if (operation.inputs.size() >= 10)
260 {
261 // explicit padding
262 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
263 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
264 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
265 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
266 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
267 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
268 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
269 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
270 {
271 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
272 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100273 }
274 else
275 {
276 return Fail("%s: Unsupported number of operation inputs", __func__);
277 }
278
279 desc.m_BiasEnabled = true;
280 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
281
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100282 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100283 {
284 try
285 {
286 ALOGD("Output shape not set, will infer from inputs");
287 outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
288 weights.GetInfo().GetShape(),
289 desc));
290 }
291 catch (armnn::Exception& e)
292 {
293 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
294 }
295 }
296
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100297 bool isSupported = false;
298 FORWARD_LAYER_SUPPORT_FUNC(__func__,
299 IsConvolution2dSupported,
300 data.m_Backends,
301 isSupported,
302 inputInfo,
303 outputInfo,
304 desc,
305 weights.GetInfo(),
306 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100307
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100308 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100309 {
310 return false;
311 }
312
313 armnn::IConnectableLayer* startLayer =
314 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
315
316 if (!startLayer)
317 {
318 return Fail("%s: AddConvolution2dLayer failed", __func__);
319 }
320
321 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
322
323 if (!endLayer)
324 {
325 return Fail("%s: ProcessActivation failed", __func__);
326 }
327
328 input.Connect(startLayer->GetInputSlot(0));
329
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100330 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
331 0,
332 *endLayer,
333 model,
334 data,
335 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100336}
337
338bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
339{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100340 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
341
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100342 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
343
344 if (!input.IsValid())
345 {
346 return Fail("%s: Operation has invalid inputs", __func__);
347 }
348
349 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
350
351 if (!output)
352 {
353 return Fail("%s: Could not read output 0", __func__);
354 }
355
356 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100357
358 // ArmNN does not currently support non-fixed weights or bias
359 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
360 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
361
362 if (weightsOperand == nullptr)
363 {
364 return Fail("%s: Operand is invalid", __func__);
365 }
366 armnn::DepthwiseConvolution2dDescriptor desc;
367 desc.m_DataLayout = armnn::DataLayout::NHWC;
368
369 // Determine whether padding is implicit or explicit
370 bool implicitPadding = operation.inputs.size() == 8 ||
371 (operation.inputs.size() >= 9 &&
372 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
373
374 // Look ahead to find the optional DataLayout, if present
375 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
376 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
377
378 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
379 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
380 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
381 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
382
383 // Reinterpret weight data as [ H, W, I, M ]
384 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
385 weightsOperand->dimensions[2],
386 inputInfo.GetShape()[channelsIndex],
387 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
388
389 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
390 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
391
392 const ConstTensorPin weightsPin =
393 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
394 1,
395 model,
396 data,
397 HWIMToMIHW,
398 &weightsShape);
399
400 // Bias is a 1D tensor
401 const ConstTensorPin biasPin =
402 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
403
404 if (!weightsPin.IsValid())
405 {
406 return Fail("%s: Operation has invalid weights", __func__);
407 }
408
409 if (!biasPin.IsValid())
410 {
411 return Fail("%s: Operation has invalid biases", __func__);
412 }
413
414 armnn::ConstTensor weights = weightsPin.GetConstTensor();
415 armnn::ConstTensor bias = biasPin.GetConstTensor();
416 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
417
418 ActivationFn activation;
419
420 if (implicitPadding)
421 {
422 android::nn::PaddingScheme paddingScheme;
423 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
424 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
425 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
426 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
427 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
428 {
429 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
430 }
431
432 const uint32_t kernelX = weights.GetShape()[3];
433 const uint32_t kernelY = weights.GetShape()[2];
434 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
435 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
436
Mike Kelly86b36d42019-07-12 16:39:33 +0100437 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
438 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100439 }
440 else if (operation.inputs.size() >= 11)
441 {
442 // explicit padding
443 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
444 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
445 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
446 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
447 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
448 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
449 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
450 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
451 {
452 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
453 }
454 }
455 else
456 {
457 return Fail("%s: Unsupported number of operation inputs", __func__);
458 }
459
460 desc.m_BiasEnabled = true;
461 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
462
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100463 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100464 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100465 {
466 try
467 {
468 ALOGD("Output shape not set, will infer from inputs");
469 outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
470 weights.GetInfo().GetShape(),
471 desc));
472 }
473 catch (armnn::Exception& e)
474 {
475 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
476 }
477 }
478
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100479 bool isSupported = false;
480 FORWARD_LAYER_SUPPORT_FUNC(__func__,
481 IsDepthwiseConvolutionSupported,
482 data.m_Backends,
483 isSupported,
484 inputInfo,
485 outputInfo,
486 desc,
487 weights.GetInfo(),
488 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100489
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100490 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100491 {
492 return false;
493 }
494
495 armnn::IConnectableLayer* startLayer =
496 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100497
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100498 if (!startLayer)
499 {
500 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
501 }
502
503 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
504 if (!endLayer)
505 {
506 return Fail("%s: ProcessActivation failed", __func__);
507 }
508
509 input.Connect(startLayer->GetInputSlot(0));
510
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100511 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
512 0,
513 *endLayer,
514 model,
515 data,
516 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100517}
518
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100519bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
520{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100521 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
522
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100523 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
524 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
525
526 if (!input0.IsValid() || !input1.IsValid())
527 {
528 return Fail("%s: Operation has invalid inputs", __func__);
529 }
530
531 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
532 if (!outputOperand)
533 {
534 return Fail("%s: Could not read output", __func__);
535 }
536
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100537 armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100538 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100539 {
540 ALOGD("Output shape not set, will infer from inputs");
541 outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
542 }
543
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100544 bool isSupported = false;
545 FORWARD_LAYER_SUPPORT_FUNC(__func__,
546 IsMaximumSupported,
547 data.m_Backends,
548 isSupported,
549 input0.GetTensorInfo(),
550 input1.GetTensorInfo(),
551 outInfo);
552
553 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100554 {
555 return false;
556 }
557
558 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
559 assert(layer != nullptr);
560 BroadcastTensor(input0, input1, layer, *data.m_Network);
561
562 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
563 0,
564 *layer,
565 model,
566 data,
567 armnn::Optional<armnn::TensorInfo>(outInfo));
568}
569
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100570bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
571{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100572 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
573
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100574 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
575 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
576
577 if (!input0.IsValid() || !input1.IsValid())
578 {
579 return Fail("%s: Operation has invalid inputs", __func__);
580 }
581
582 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
583 if (!output)
584 {
585 return Fail("%s: Could not read output 0", __func__);
586 }
587
588 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100589 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100590 {
591 ALOGD("Output shape not set, will infer from inputs");
592 outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(),
593 input1.GetTensorInfo().GetShape()));
594 }
595
596 bool isSupported = false;
597 FORWARD_LAYER_SUPPORT_FUNC(__func__,
598 IsMinimumSupported,
599 data.m_Backends,
600 isSupported,
601 input0.GetTensorInfo(),
602 input1.GetTensorInfo(),
603 outputInfo);
604
605 if (!isSupported)
606 {
607 return false;
608 }
609
610 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
611 assert(layer != nullptr);
612 BroadcastTensor(input0, input1, layer, *data.m_Network);
613
614 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
615 0,
616 *layer,
617 model,
618 data,
619 armnn::Optional<armnn::TensorInfo>(outputInfo));
620}
621
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100622bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
623{
624 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
625 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
626}
627
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100628bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
629{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100630 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
631
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100632 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
633 if (!input.IsValid())
634 {
635 return Fail("%s: Could not read input 0", __func__);
636 }
637
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100638 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
639 if (!output)
640 {
641 return Fail("%s: Could not read output", __func__);
642 }
643
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100644 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
645 unsigned int rank = inputInfo.GetNumDimensions();
646
647 armnn::PadDescriptor descriptor;
648 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
649 {
650 return Fail("%s: Could not convert paddings", __func__);
651 }
652
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100653 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100654 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100655 {
656 ALOGD("Output shape not set, will infer from inputs");
657 outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
658 }
659
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100660 // Determine type of padding value
661 OperandType operandType0;
662 OperandType operandType2;
663
664 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
665 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
666 {
667 return Fail("%s: Operation has invalid inputs", __func__);
668 }
669
670 // Read value to use for padding
671 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
672 {
673 armnn::Half f16PadValue;
674 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
675 {
676 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
677 }
678
679 descriptor.m_PadValue = f16PadValue;
680 }
681 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
682 {
683 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
684 {
685 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
686 }
687 }
688 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
689 {
Mike Kelly3c673942019-07-25 09:26:06 +0100690 int32_t intPadValue = 0;
691 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100692 {
693 return Fail("%s: Could not read input 2 (INT32)", __func__);
694 }
Mike Kelly3c673942019-07-25 09:26:06 +0100695 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100696 }
697 else
698 {
699 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
700 }
701
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100702 bool isSupported = false;
703 FORWARD_LAYER_SUPPORT_FUNC(__func__,
704 IsPadSupported,
705 data.m_Backends,
706 isSupported,
707 inputInfo,
708 outputInfo,
709 descriptor);
710 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100711 {
712 return false;
713 }
714
715 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
716 assert(layer != nullptr);
717 input.Connect(layer->GetInputSlot(0));
718 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
719
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100720 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
721 0,
722 *layer,
723 model,
724 data,
725 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100726}
727
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100728bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
729{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100730 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
731
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100732 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
733 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
734
735 if (!input.IsValid() || !alpha.IsValid())
736 {
737 return Fail("%s: Operation has invalid inputs", __func__);
738 }
739
740 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
741
742 if (!output)
743 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100744 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100745 }
746
747 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
748 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100749
750 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100751 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100752 {
753 ALOGD("Output shape not set, will infer from inputs");
754 outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
755 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100756
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100757 bool isSupported = false;
758 FORWARD_LAYER_SUPPORT_FUNC(__func__,
759 IsPreluSupported,
760 data.m_Backends,
761 isSupported,
762 inputInfo,
763 alphaInfo,
764 outputInfo);
765 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100766 {
767 return false;
768 }
769
770 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
771
772 if (!layer)
773 {
774 return Fail("%s: AddPreluLayer failed", __func__);
775 }
776
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100777 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100778
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100779 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
780 0,
781 *layer,
782 model,
783 data,
784 armnn::Optional<armnn::TensorInfo>(outputInfo));
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100785}
786
Sadik Armagan61113162019-07-25 09:09:40 +0100787bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
788{
789 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
790 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
791}
792
793bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
794{
795 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
796 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
797}
798
799bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
800{
801 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
802 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
803}
804
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100805bool HalPolicy::ConvertResize(const Operation& operation,
806 const Model& model,
807 ConversionData& data,
808 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100809{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100810 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
811
812 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100813 if (!input.IsValid())
814 {
815 return Fail("%s: Could not read input 0", __func__);
816 }
817
818 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
819 if (!output)
820 {
821 return Fail("%s: Could not read output 0", __func__);
822 }
823
824 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100825 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100826
827 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100828 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100829 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
830
831 OperandType operandType1;
832 OperandType operandType2;
833
834 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
835 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
836 {
837 return Fail("%s: Operation has invalid inputs", __func__);
838 }
839
840 if (operandType1 != operandType2)
841 {
842 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
843 }
844
845 if (operandType1 == OperandType::INT32)
846 {
847 // Case 1: resizing by shape
848 int32_t targetWidth = 0;
849 int32_t targetHeight = 0;
850
851 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
852 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
853 {
854 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
855 }
856
857 if (targetWidth < 0 || targetHeight < 0)
858 {
859 return Fail("%s: Operation has invalid inputs for resizing by shape. "
860 "Target width/height cannot be < 0", __func__);
861 }
862
863 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100864 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100865 }
866 else if (operandType1 == OperandType::FLOAT32)
867 {
868 // Case 2: resizing by scale
869 float widthScale = 1.0f;
870 float heightScale = 1.0f;
871
872 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
873 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
874 {
875 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
876 }
877
878 const armnn::TensorShape& inputShape = inputInfo.GetShape();
879 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
880
881 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
882 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
883
884 descriptor.m_TargetWidth = std::floor(width * widthScale);
885 descriptor.m_TargetHeight = std::floor(height * heightScale);
886 }
887 else
888 {
889 // NOTE: FLOAT16 scales are not supported
890 return false;
891 }
892
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100893 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100894 {
895 try
896 {
897 ALOGD("Output shape not set, will infer from inputs");
898 outputInfo.SetShape(InferResizeOutputShape(inputInfo.GetShape(), descriptor));
899 }
900 catch (armnn::Exception& e)
901 {
902 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
903 }
904 }
905
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100906 bool isSupported = false;
907 FORWARD_LAYER_SUPPORT_FUNC(__func__,
908 IsResizeSupported,
909 data.m_Backends,
910 isSupported,
911 inputInfo,
912 outputInfo,
913 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100914
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100915 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100916 {
917 return false;
918 }
919
920 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
921
922 assert(layer != nullptr);
923
924 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
925 input.Connect(layer->GetInputSlot(0));
926
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100927 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
928 0,
929 *layer,
930 model,
931 data,
932 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100933}
934
Keith Davisa6bc52f2019-06-26 09:39:49 +0100935bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
936{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100937 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100938
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100939 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100940 if (!input.IsValid() )
941 {
942 return Fail("%s: Operation has invalid inputs", __func__);
943 }
944
945 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
946 unsigned int rank = inputInfo.GetNumDimensions();
947
948 if (rank != 4)
949 {
950 return Fail("%s: Only inputs with rank 4 are supported", __func__);
951 }
952
953 armnn::SpaceToDepthDescriptor desc;
954
955 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
956
957 if (desc.m_BlockSize <= 1)
958 {
959 return Fail("%s: Block size must be at least 1 in all dimensions");
960 }
961
962 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
963
964 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
965 if (!output)
966 {
967 return Fail("%s: Could not read output 0", __func__);
968 }
969
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100970 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
971 if (IsDynamicTensor(outputInfo))
972 {
973 try
974 {
975 ALOGD("Output shape not set, will infer from inputs");
976 outputInfo.SetShape(InferSpaceToDepthOutputShape(inputInfo.GetShape(), desc));
977 }
978 catch (armnn::Exception& e)
979 {
980 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
981 }
982 }
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100983
984 bool isSupported = false;
985 FORWARD_LAYER_SUPPORT_FUNC(__func__,
986 IsSpaceToDepthSupported,
987 data.m_Backends,
988 isSupported,
989 inputInfo,
990 outputInfo,
991 desc);
992 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100993 {
994 return false;
995 }
996
997 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
998 assert(layer != nullptr);
999 input.Connect(layer->GetInputSlot(0));
1000
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +01001001 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
1002 0,
1003 *layer,
1004 model,
1005 data,
1006 armnn::Optional<armnn::TensorInfo>(outputInfo));
Keith Davisa6bc52f2019-06-26 09:39:49 +01001007}
1008
Francis Murtagh074c25a2019-07-22 16:40:57 +01001009bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
1010{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +01001011 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
1012
Francis Murtagh074c25a2019-07-22 16:40:57 +01001013 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
1014 if (!input.IsValid())
1015 {
1016 return Fail("%s: Operation has invalid inputs", __func__);
1017 }
1018
1019 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1020 if (!outputOperand)
1021 {
1022 return Fail("%s: Operation has no outputs", __func__);
1023 }
1024
1025 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001026 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +01001027 {
1028 ALOGD("Output shape not set, will infer from input");
1029 outputInfo.SetShape(input.GetTensorInfo().GetShape());
1030 }
1031
1032 armnn::SoftmaxDescriptor desc;
1033 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
1034 {
1035 return Fail("%s: Operation has invalid inputs", __func__);
1036 }
1037
1038 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
1039 2,
1040 HalPolicy::OperandType::INT32,
1041 desc.m_Axis,
1042 model,
1043 data))
1044 {
1045 return Fail("%s: Operation has invalid inputs", __func__);
1046 }
1047
1048 bool isSupported = false;
1049 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1050 IsSoftmaxSupported,
1051 data.m_Backends,
1052 isSupported,
1053 input.GetTensorInfo(),
1054 outputInfo,
1055 desc);
1056 if (!isSupported)
1057 {
1058 return false;
1059 }
1060
1061 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1062 assert(layer != nullptr);
1063 input.Connect(layer->GetInputSlot(0));
1064
1065 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
1066 0,
1067 *layer,
1068 model,
1069 data,
1070 armnn::Optional<armnn::TensorInfo>(outputInfo));
1071}
1072
Sadik Armagan61113162019-07-25 09:09:40 +01001073bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1074{
1075 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1076 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1077}
1078
Mike Kellyb5fdf382019-06-11 16:35:25 +01001079} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001080} // namespace armnn_driver