blob: 5f327c209d4b70fe3d8de67f8232fd6e7ba1fc12 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01008#include "OutputShapeUtils.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01009#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +010010
Mike Kellyb5fdf382019-06-11 16:35:25 +010011#include "../1.0/HalPolicy.hpp"
12#include "../1.1/HalPolicy.hpp"
13
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010014#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010015#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010016
17#include <cmath>
18
Mike Kellyb5fdf382019-06-11 16:35:25 +010019namespace armnn_driver
20{
21namespace hal_1_2
22{
23
24bool HandledByV1_0(V1_2::OperationType operationType)
25{
26 switch (static_cast<V1_0::OperationType>(operationType))
27 {
28 case V1_0::OperationType::ADD:
29 case V1_0::OperationType::AVERAGE_POOL_2D:
30 case V1_0::OperationType::CONCATENATION:
31 case V1_0::OperationType::DEPTH_TO_SPACE:
32 case V1_0::OperationType::DEQUANTIZE:
33 case V1_0::OperationType::EMBEDDING_LOOKUP:
34 case V1_0::OperationType::FLOOR:
35 case V1_0::OperationType::FULLY_CONNECTED:
36 case V1_0::OperationType::HASHTABLE_LOOKUP:
37 case V1_0::OperationType::L2_NORMALIZATION:
38 case V1_0::OperationType::L2_POOL_2D:
39 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
40 case V1_0::OperationType::LOGISTIC:
41 case V1_0::OperationType::LSH_PROJECTION:
42 case V1_0::OperationType::LSTM:
43 case V1_0::OperationType::MAX_POOL_2D:
44 case V1_0::OperationType::MUL:
Mike Kellyb5fdf382019-06-11 16:35:25 +010045 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010046 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010047 case V1_0::OperationType::SPACE_TO_DEPTH:
48 case V1_0::OperationType::SVDF:
Mike Kellyb5fdf382019-06-11 16:35:25 +010049 case V1_0::OperationType::OEM_OPERATION:
50 return true;
51 default:
52 return false;
53 }
54}
55
56bool HandledByV1_1(V1_2::OperationType operationType)
57{
58 if (HandledByV1_0(operationType))
59 {
60 return true;
61 }
62 switch (static_cast<V1_1::OperationType>(operationType))
63 {
64 case V1_1::OperationType::BATCH_TO_SPACE_ND:
65 case V1_1::OperationType::DIV:
66 case V1_1::OperationType::MEAN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010067 case V1_1::OperationType::SPACE_TO_BATCH_ND:
68 case V1_1::OperationType::SQUEEZE:
69 case V1_1::OperationType::STRIDED_SLICE:
70 case V1_1::OperationType::SUB:
71 case V1_1::OperationType::TRANSPOSE:
72 return true;
73 default:
74 return false;
75 }
76}
77
78bool HandledByV1_0(const V1_2::Operation& operation)
79{
80 return HandledByV1_0(operation.type);
81}
82
83bool HandledByV1_1(const V1_2::Operation& operation)
84{
85 return HandledByV1_1(operation.type);
86}
87
88V1_0::OperationType CastToV1_0(V1_2::OperationType type)
89{
90 return static_cast<V1_0::OperationType>(type);
91}
92
93V1_1::OperationType CastToV1_1(V1_2::OperationType type)
94{
95 return static_cast<V1_1::OperationType>(type);
96}
97
98V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
99{
100 V1_0::Operation op;
101 op.type = CastToV1_0(operation.type);
102 op.inputs = operation.inputs;
103 op.outputs = operation.outputs;
104 return op;
105}
106
107V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
108{
109 V1_1::Operation op;
110 op.type = CastToV1_1(operation.type);
111 op.inputs = operation.inputs;
112 op.outputs = operation.outputs;
113 return op;
114}
115
116bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
117{
118 if (HandledByV1_0(operation) && compliantWithV1_0(model))
119 {
120 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
121 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
122
123 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
124 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100125
126 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100127 {
128 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
129 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
130
131 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
132 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100133
Mike Kellyb5fdf382019-06-11 16:35:25 +0100134 switch (operation.type)
135 {
136 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100137 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100138 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100139 return ConvertDepthwiseConv2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100140 case V1_2::OperationType::MAXIMUM:
141 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100142 case V1_2::OperationType::MINIMUM:
143 return ConvertMinimum(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +0100144 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100145 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100146 case V1_2::OperationType::PAD_V2:
147 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100148 case V1_2::OperationType::PRELU:
149 return ConvertPrelu(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100150 case V1_2::OperationType::RELU:
151 return ConvertReLu(operation, model, data);
152 case V1_2::OperationType::RELU1:
153 return ConvertReLu1(operation, model, data);
154 case V1_2::OperationType::RELU6:
155 return ConvertReLu6(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100156 case V1_2::OperationType::RESIZE_BILINEAR:
157 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100158 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100159 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100160 case V1_2::OperationType::SOFTMAX:
161 return ConvertSoftmax(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100162 case V1_2::OperationType::TANH:
163 return ConvertTanH(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100164 default:
165 return Fail("%s: Operation type %s not supported in ArmnnDriver",
166 __func__, toString(operation.type).c_str());
167 }
168}
169
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100170bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
171{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100172 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
173
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100174 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
175 if (!input.IsValid())
176 {
177 return Fail("%s: Operation has invalid inputs", __func__);
178 }
179
180 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
181 if (!output)
182 {
183 return Fail("%s: Could not read output 0", __func__);
184 }
185
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100186 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
187 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100188
Mike Kellye1d60bb2019-07-11 11:44:52 +0100189 armnn::Convolution2dDescriptor desc;
190 desc.m_DataLayout = armnn::DataLayout::NHWC;
191
192 // Determine whether padding is implicit or explicit
193 bool implicitPadding = operation.inputs.size() == 7 ||
194 (operation.inputs.size() >= 8 &&
195 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
196
197 if (implicitPadding)
198 {
199 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
200 }
201 else if (operation.inputs.size() >= 10)
202 {
203 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
204 }
205
206 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
207
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100208 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100209 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
210 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
211 // the DataLayout is NCHW
212 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
213 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
214 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100215 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100216 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100217
218 if (!weightsPin.IsValid())
219 {
220 return Fail("%s: Operation has invalid weights", __func__);
221 }
222
223 if (!biasPin.IsValid())
224 {
225 return Fail("%s: Operation has invalid biases", __func__);
226 }
227
228 armnn::ConstTensor weights = weightsPin.GetConstTensor();
229 armnn::ConstTensor bias = biasPin.GetConstTensor();
230 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
231
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100232 ActivationFn activation;
233
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100234 if (implicitPadding)
235 {
236 android::nn::PaddingScheme paddingScheme;
237 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
238 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
239 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
240 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
241 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
242 {
243 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
244 }
245
Mike Kellye1d60bb2019-07-11 11:44:52 +0100246 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
247 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
248 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
249 const uint32_t kernelX = weights.GetShape()[widthIndex];
250 const uint32_t kernelY = weights.GetShape()[heightIndex];
251 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
252 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100253
Mike Kelly86b36d42019-07-12 16:39:33 +0100254 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
255 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100256
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100257 }
258 else if (operation.inputs.size() >= 10)
259 {
260 // explicit padding
261 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
262 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
263 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
264 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
265 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
266 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
267 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
268 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
269 {
270 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
271 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100272 }
273 else
274 {
275 return Fail("%s: Unsupported number of operation inputs", __func__);
276 }
277
278 desc.m_BiasEnabled = true;
279 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
280
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100281 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100282 {
283 try
284 {
285 ALOGD("Output shape not set, will infer from inputs");
286 outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
287 weights.GetInfo().GetShape(),
288 desc));
289 }
290 catch (armnn::Exception& e)
291 {
292 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
293 }
294 }
295
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100296 bool isSupported = false;
297 FORWARD_LAYER_SUPPORT_FUNC(__func__,
298 IsConvolution2dSupported,
299 data.m_Backends,
300 isSupported,
301 inputInfo,
302 outputInfo,
303 desc,
304 weights.GetInfo(),
305 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100306
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100307 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100308 {
309 return false;
310 }
311
312 armnn::IConnectableLayer* startLayer =
313 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
314
315 if (!startLayer)
316 {
317 return Fail("%s: AddConvolution2dLayer failed", __func__);
318 }
319
320 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
321
322 if (!endLayer)
323 {
324 return Fail("%s: ProcessActivation failed", __func__);
325 }
326
327 input.Connect(startLayer->GetInputSlot(0));
328
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100329 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
330 0,
331 *endLayer,
332 model,
333 data,
334 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100335}
336
337bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
338{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100339 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
340
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100341 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
342
343 if (!input.IsValid())
344 {
345 return Fail("%s: Operation has invalid inputs", __func__);
346 }
347
348 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
349
350 if (!output)
351 {
352 return Fail("%s: Could not read output 0", __func__);
353 }
354
355 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100356
357 // ArmNN does not currently support non-fixed weights or bias
358 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
359 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
360
361 if (weightsOperand == nullptr)
362 {
363 return Fail("%s: Operand is invalid", __func__);
364 }
365 armnn::DepthwiseConvolution2dDescriptor desc;
366 desc.m_DataLayout = armnn::DataLayout::NHWC;
367
368 // Determine whether padding is implicit or explicit
369 bool implicitPadding = operation.inputs.size() == 8 ||
370 (operation.inputs.size() >= 9 &&
371 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
372
373 // Look ahead to find the optional DataLayout, if present
374 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
375 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
376
377 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
378 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
379 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
380 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
381
382 // Reinterpret weight data as [ H, W, I, M ]
383 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
384 weightsOperand->dimensions[2],
385 inputInfo.GetShape()[channelsIndex],
386 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
387
388 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
389 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
390
391 const ConstTensorPin weightsPin =
392 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
393 1,
394 model,
395 data,
396 HWIMToMIHW,
397 &weightsShape);
398
399 // Bias is a 1D tensor
400 const ConstTensorPin biasPin =
401 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
402
403 if (!weightsPin.IsValid())
404 {
405 return Fail("%s: Operation has invalid weights", __func__);
406 }
407
408 if (!biasPin.IsValid())
409 {
410 return Fail("%s: Operation has invalid biases", __func__);
411 }
412
413 armnn::ConstTensor weights = weightsPin.GetConstTensor();
414 armnn::ConstTensor bias = biasPin.GetConstTensor();
415 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
416
417 ActivationFn activation;
418
419 if (implicitPadding)
420 {
421 android::nn::PaddingScheme paddingScheme;
422 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
423 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
424 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
425 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
426 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
427 {
428 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
429 }
430
431 const uint32_t kernelX = weights.GetShape()[3];
432 const uint32_t kernelY = weights.GetShape()[2];
433 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
434 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
435
Mike Kelly86b36d42019-07-12 16:39:33 +0100436 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
437 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100438 }
439 else if (operation.inputs.size() >= 11)
440 {
441 // explicit padding
442 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
443 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
444 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
445 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
446 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
447 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
448 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
449 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
450 {
451 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
452 }
453 }
454 else
455 {
456 return Fail("%s: Unsupported number of operation inputs", __func__);
457 }
458
459 desc.m_BiasEnabled = true;
460 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
461
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100462 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100463 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100464 {
465 try
466 {
467 ALOGD("Output shape not set, will infer from inputs");
468 outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
469 weights.GetInfo().GetShape(),
470 desc));
471 }
472 catch (armnn::Exception& e)
473 {
474 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
475 }
476 }
477
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100478 bool isSupported = false;
479 FORWARD_LAYER_SUPPORT_FUNC(__func__,
480 IsDepthwiseConvolutionSupported,
481 data.m_Backends,
482 isSupported,
483 inputInfo,
484 outputInfo,
485 desc,
486 weights.GetInfo(),
487 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100488
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100489 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100490 {
491 return false;
492 }
493
494 armnn::IConnectableLayer* startLayer =
495 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100496
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100497 if (!startLayer)
498 {
499 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
500 }
501
502 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
503 if (!endLayer)
504 {
505 return Fail("%s: ProcessActivation failed", __func__);
506 }
507
508 input.Connect(startLayer->GetInputSlot(0));
509
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100510 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
511 0,
512 *endLayer,
513 model,
514 data,
515 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100516}
517
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100518bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
519{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100520 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
521
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100522 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
523 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
524
525 if (!input0.IsValid() || !input1.IsValid())
526 {
527 return Fail("%s: Operation has invalid inputs", __func__);
528 }
529
530 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
531 if (!outputOperand)
532 {
533 return Fail("%s: Could not read output", __func__);
534 }
535
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100536 armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100537 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100538 {
539 ALOGD("Output shape not set, will infer from inputs");
540 outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
541 }
542
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100543 bool isSupported = false;
544 FORWARD_LAYER_SUPPORT_FUNC(__func__,
545 IsMaximumSupported,
546 data.m_Backends,
547 isSupported,
548 input0.GetTensorInfo(),
549 input1.GetTensorInfo(),
550 outInfo);
551
552 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100553 {
554 return false;
555 }
556
557 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
558 assert(layer != nullptr);
559 BroadcastTensor(input0, input1, layer, *data.m_Network);
560
561 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
562 0,
563 *layer,
564 model,
565 data,
566 armnn::Optional<armnn::TensorInfo>(outInfo));
567}
568
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100569bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
570{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100571 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
572
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100573 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
574 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
575
576 if (!input0.IsValid() || !input1.IsValid())
577 {
578 return Fail("%s: Operation has invalid inputs", __func__);
579 }
580
581 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
582 if (!output)
583 {
584 return Fail("%s: Could not read output 0", __func__);
585 }
586
587 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100588 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100589 {
590 ALOGD("Output shape not set, will infer from inputs");
591 outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(),
592 input1.GetTensorInfo().GetShape()));
593 }
594
595 bool isSupported = false;
596 FORWARD_LAYER_SUPPORT_FUNC(__func__,
597 IsMinimumSupported,
598 data.m_Backends,
599 isSupported,
600 input0.GetTensorInfo(),
601 input1.GetTensorInfo(),
602 outputInfo);
603
604 if (!isSupported)
605 {
606 return false;
607 }
608
609 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
610 assert(layer != nullptr);
611 BroadcastTensor(input0, input1, layer, *data.m_Network);
612
613 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
614 0,
615 *layer,
616 model,
617 data,
618 armnn::Optional<armnn::TensorInfo>(outputInfo));
619}
620
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100621bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
622{
623 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
624 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
625}
626
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100627bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
628{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100629 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
630
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100631 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
632 if (!input.IsValid())
633 {
634 return Fail("%s: Could not read input 0", __func__);
635 }
636
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100637 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
638 if (!output)
639 {
640 return Fail("%s: Could not read output", __func__);
641 }
642
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100643 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
644 unsigned int rank = inputInfo.GetNumDimensions();
645
646 armnn::PadDescriptor descriptor;
647 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
648 {
649 return Fail("%s: Could not convert paddings", __func__);
650 }
651
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100652 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100653 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100654 {
655 ALOGD("Output shape not set, will infer from inputs");
656 outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
657 }
658
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100659 // Determine type of padding value
660 OperandType operandType0;
661 OperandType operandType2;
662
663 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
664 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
665 {
666 return Fail("%s: Operation has invalid inputs", __func__);
667 }
668
669 // Read value to use for padding
670 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
671 {
672 armnn::Half f16PadValue;
673 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
674 {
675 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
676 }
677
678 descriptor.m_PadValue = f16PadValue;
679 }
680 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
681 {
682 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
683 {
684 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
685 }
686 }
687 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
688 {
Mike Kelly3c673942019-07-25 09:26:06 +0100689 int32_t intPadValue = 0;
690 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100691 {
692 return Fail("%s: Could not read input 2 (INT32)", __func__);
693 }
Mike Kelly3c673942019-07-25 09:26:06 +0100694 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100695 }
696 else
697 {
698 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
699 }
700
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100701 bool isSupported = false;
702 FORWARD_LAYER_SUPPORT_FUNC(__func__,
703 IsPadSupported,
704 data.m_Backends,
705 isSupported,
706 inputInfo,
707 outputInfo,
708 descriptor);
709 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100710 {
711 return false;
712 }
713
714 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
715 assert(layer != nullptr);
716 input.Connect(layer->GetInputSlot(0));
717 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
718
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100719 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
720 0,
721 *layer,
722 model,
723 data,
724 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100725}
726
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100727bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
728{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100729 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
730
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100731 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
732 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
733
734 if (!input.IsValid() || !alpha.IsValid())
735 {
736 return Fail("%s: Operation has invalid inputs", __func__);
737 }
738
739 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
740
741 if (!output)
742 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100743 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100744 }
745
746 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
747 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100748
749 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100750 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100751 {
752 ALOGD("Output shape not set, will infer from inputs");
753 outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
754 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100755
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100756 bool isSupported = false;
757 FORWARD_LAYER_SUPPORT_FUNC(__func__,
758 IsPreluSupported,
759 data.m_Backends,
760 isSupported,
761 inputInfo,
762 alphaInfo,
763 outputInfo);
764 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100765 {
766 return false;
767 }
768
769 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
770
771 if (!layer)
772 {
773 return Fail("%s: AddPreluLayer failed", __func__);
774 }
775
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100776 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100777
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100778 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
779 0,
780 *layer,
781 model,
782 data,
783 armnn::Optional<armnn::TensorInfo>(outputInfo));
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100784}
785
Sadik Armagan61113162019-07-25 09:09:40 +0100786bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
787{
788 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
789 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
790}
791
792bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
793{
794 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
795 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
796}
797
798bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
799{
800 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
801 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
802}
803
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100804bool HalPolicy::ConvertResize(const Operation& operation,
805 const Model& model,
806 ConversionData& data,
807 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100808{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100809 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
810
811 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100812 if (!input.IsValid())
813 {
814 return Fail("%s: Could not read input 0", __func__);
815 }
816
817 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
818 if (!output)
819 {
820 return Fail("%s: Could not read output 0", __func__);
821 }
822
823 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100824 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100825
826 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100827 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100828 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
829
830 OperandType operandType1;
831 OperandType operandType2;
832
833 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
834 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
835 {
836 return Fail("%s: Operation has invalid inputs", __func__);
837 }
838
839 if (operandType1 != operandType2)
840 {
841 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
842 }
843
844 if (operandType1 == OperandType::INT32)
845 {
846 // Case 1: resizing by shape
847 int32_t targetWidth = 0;
848 int32_t targetHeight = 0;
849
850 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
851 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
852 {
853 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
854 }
855
856 if (targetWidth < 0 || targetHeight < 0)
857 {
858 return Fail("%s: Operation has invalid inputs for resizing by shape. "
859 "Target width/height cannot be < 0", __func__);
860 }
861
862 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100863 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100864 }
865 else if (operandType1 == OperandType::FLOAT32)
866 {
867 // Case 2: resizing by scale
868 float widthScale = 1.0f;
869 float heightScale = 1.0f;
870
871 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
872 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
873 {
874 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
875 }
876
877 const armnn::TensorShape& inputShape = inputInfo.GetShape();
878 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
879
880 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
881 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
882
883 descriptor.m_TargetWidth = std::floor(width * widthScale);
884 descriptor.m_TargetHeight = std::floor(height * heightScale);
885 }
886 else
887 {
888 // NOTE: FLOAT16 scales are not supported
889 return false;
890 }
891
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100892 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100893 {
894 try
895 {
896 ALOGD("Output shape not set, will infer from inputs");
897 outputInfo.SetShape(InferResizeOutputShape(inputInfo.GetShape(), descriptor));
898 }
899 catch (armnn::Exception& e)
900 {
901 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
902 }
903 }
904
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100905 bool isSupported = false;
906 FORWARD_LAYER_SUPPORT_FUNC(__func__,
907 IsResizeSupported,
908 data.m_Backends,
909 isSupported,
910 inputInfo,
911 outputInfo,
912 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100913
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100914 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100915 {
916 return false;
917 }
918
919 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
920
921 assert(layer != nullptr);
922
923 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
924 input.Connect(layer->GetInputSlot(0));
925
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100926 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
927 0,
928 *layer,
929 model,
930 data,
931 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100932}
933
Keith Davisa6bc52f2019-06-26 09:39:49 +0100934bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
935{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100936 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100937
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100938 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100939 if (!input.IsValid() )
940 {
941 return Fail("%s: Operation has invalid inputs", __func__);
942 }
943
944 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
945 unsigned int rank = inputInfo.GetNumDimensions();
946
947 if (rank != 4)
948 {
949 return Fail("%s: Only inputs with rank 4 are supported", __func__);
950 }
951
952 armnn::SpaceToDepthDescriptor desc;
953
954 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
955
956 if (desc.m_BlockSize <= 1)
957 {
958 return Fail("%s: Block size must be at least 1 in all dimensions");
959 }
960
961 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
962
963 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
964 if (!output)
965 {
966 return Fail("%s: Could not read output 0", __func__);
967 }
968
969 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100970
971 bool isSupported = false;
972 FORWARD_LAYER_SUPPORT_FUNC(__func__,
973 IsSpaceToDepthSupported,
974 data.m_Backends,
975 isSupported,
976 inputInfo,
977 outputInfo,
978 desc);
979 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100980 {
981 return false;
982 }
983
984 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
985 assert(layer != nullptr);
986 input.Connect(layer->GetInputSlot(0));
987
988 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
989}
990
Francis Murtagh074c25a2019-07-22 16:40:57 +0100991bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
992{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100993 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
994
Francis Murtagh074c25a2019-07-22 16:40:57 +0100995 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
996 if (!input.IsValid())
997 {
998 return Fail("%s: Operation has invalid inputs", __func__);
999 }
1000
1001 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
1002 if (!outputOperand)
1003 {
1004 return Fail("%s: Operation has no outputs", __func__);
1005 }
1006
1007 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01001008 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +01001009 {
1010 ALOGD("Output shape not set, will infer from input");
1011 outputInfo.SetShape(input.GetTensorInfo().GetShape());
1012 }
1013
1014 armnn::SoftmaxDescriptor desc;
1015 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
1016 {
1017 return Fail("%s: Operation has invalid inputs", __func__);
1018 }
1019
1020 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
1021 2,
1022 HalPolicy::OperandType::INT32,
1023 desc.m_Axis,
1024 model,
1025 data))
1026 {
1027 return Fail("%s: Operation has invalid inputs", __func__);
1028 }
1029
1030 bool isSupported = false;
1031 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1032 IsSoftmaxSupported,
1033 data.m_Backends,
1034 isSupported,
1035 input.GetTensorInfo(),
1036 outputInfo,
1037 desc);
1038 if (!isSupported)
1039 {
1040 return false;
1041 }
1042
1043 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1044 assert(layer != nullptr);
1045 input.Connect(layer->GetInputSlot(0));
1046
1047 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
1048 0,
1049 *layer,
1050 model,
1051 data,
1052 armnn::Optional<armnn::TensorInfo>(outputInfo));
1053}
1054
Sadik Armagan61113162019-07-25 09:09:40 +01001055bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
1056{
1057 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
1058 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
1059}
1060
Mike Kellyb5fdf382019-06-11 16:35:25 +01001061} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001062} // namespace armnn_driver