blob: 906d6bcf590e7baf6ff62e967a9436c6b47b898a [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01008#include "OutputShapeUtils.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01009#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +010010
Mike Kellyb5fdf382019-06-11 16:35:25 +010011#include "../1.0/HalPolicy.hpp"
12#include "../1.1/HalPolicy.hpp"
13
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010014#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010015#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010016
17#include <cmath>
18
Mike Kellyb5fdf382019-06-11 16:35:25 +010019namespace armnn_driver
20{
21namespace hal_1_2
22{
23
24bool HandledByV1_0(V1_2::OperationType operationType)
25{
26 switch (static_cast<V1_0::OperationType>(operationType))
27 {
28 case V1_0::OperationType::ADD:
29 case V1_0::OperationType::AVERAGE_POOL_2D:
30 case V1_0::OperationType::CONCATENATION:
31 case V1_0::OperationType::DEPTH_TO_SPACE:
32 case V1_0::OperationType::DEQUANTIZE:
33 case V1_0::OperationType::EMBEDDING_LOOKUP:
34 case V1_0::OperationType::FLOOR:
35 case V1_0::OperationType::FULLY_CONNECTED:
36 case V1_0::OperationType::HASHTABLE_LOOKUP:
37 case V1_0::OperationType::L2_NORMALIZATION:
38 case V1_0::OperationType::L2_POOL_2D:
39 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
40 case V1_0::OperationType::LOGISTIC:
41 case V1_0::OperationType::LSH_PROJECTION:
42 case V1_0::OperationType::LSTM:
43 case V1_0::OperationType::MAX_POOL_2D:
44 case V1_0::OperationType::MUL:
45 case V1_0::OperationType::RELU:
46 case V1_0::OperationType::RELU1:
47 case V1_0::OperationType::RELU6:
48 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010049 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010050 case V1_0::OperationType::SPACE_TO_DEPTH:
51 case V1_0::OperationType::SVDF:
52 case V1_0::OperationType::TANH:
53 case V1_0::OperationType::OEM_OPERATION:
54 return true;
55 default:
56 return false;
57 }
58}
59
60bool HandledByV1_1(V1_2::OperationType operationType)
61{
62 if (HandledByV1_0(operationType))
63 {
64 return true;
65 }
66 switch (static_cast<V1_1::OperationType>(operationType))
67 {
68 case V1_1::OperationType::BATCH_TO_SPACE_ND:
69 case V1_1::OperationType::DIV:
70 case V1_1::OperationType::MEAN:
71 case V1_1::OperationType::PAD:
72 case V1_1::OperationType::SPACE_TO_BATCH_ND:
73 case V1_1::OperationType::SQUEEZE:
74 case V1_1::OperationType::STRIDED_SLICE:
75 case V1_1::OperationType::SUB:
76 case V1_1::OperationType::TRANSPOSE:
77 return true;
78 default:
79 return false;
80 }
81}
82
83bool HandledByV1_0(const V1_2::Operation& operation)
84{
85 return HandledByV1_0(operation.type);
86}
87
88bool HandledByV1_1(const V1_2::Operation& operation)
89{
90 return HandledByV1_1(operation.type);
91}
92
93V1_0::OperationType CastToV1_0(V1_2::OperationType type)
94{
95 return static_cast<V1_0::OperationType>(type);
96}
97
98V1_1::OperationType CastToV1_1(V1_2::OperationType type)
99{
100 return static_cast<V1_1::OperationType>(type);
101}
102
103V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
104{
105 V1_0::Operation op;
106 op.type = CastToV1_0(operation.type);
107 op.inputs = operation.inputs;
108 op.outputs = operation.outputs;
109 return op;
110}
111
112V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
113{
114 V1_1::Operation op;
115 op.type = CastToV1_1(operation.type);
116 op.inputs = operation.inputs;
117 op.outputs = operation.outputs;
118 return op;
119}
120
121bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
122{
123 if (HandledByV1_0(operation) && compliantWithV1_0(model))
124 {
125 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
126 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
127
128 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
129 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100130
131 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132 {
133 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
134 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
135
136 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
137 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100138
Mike Kellyb5fdf382019-06-11 16:35:25 +0100139 switch (operation.type)
140 {
141 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100142 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100143 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100144 return ConvertDepthwiseConv2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100145 case V1_2::OperationType::MAXIMUM:
146 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100147 case V1_2::OperationType::MINIMUM:
148 return ConvertMinimum(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100149 case V1_2::OperationType::PAD_V2:
150 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100151 case V1_2::OperationType::PRELU:
152 return ConvertPrelu(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100153 case V1_2::OperationType::RESIZE_BILINEAR:
154 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100155 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100156 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100157 case V1_2::OperationType::SOFTMAX:
158 return ConvertSoftmax(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100159 default:
160 return Fail("%s: Operation type %s not supported in ArmnnDriver",
161 __func__, toString(operation.type).c_str());
162 }
163}
164
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100165bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
166{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100167 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
168
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100169 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
170 if (!input.IsValid())
171 {
172 return Fail("%s: Operation has invalid inputs", __func__);
173 }
174
175 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
176 if (!output)
177 {
178 return Fail("%s: Could not read output 0", __func__);
179 }
180
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100181 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
182 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100183
Mike Kellye1d60bb2019-07-11 11:44:52 +0100184 armnn::Convolution2dDescriptor desc;
185 desc.m_DataLayout = armnn::DataLayout::NHWC;
186
187 // Determine whether padding is implicit or explicit
188 bool implicitPadding = operation.inputs.size() == 7 ||
189 (operation.inputs.size() >= 8 &&
190 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
191
192 if (implicitPadding)
193 {
194 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
195 }
196 else if (operation.inputs.size() >= 10)
197 {
198 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
199 }
200
201 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
202
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100203 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100204 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
205 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
206 // the DataLayout is NCHW
207 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
208 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
209 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100210 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100211 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100212
213 if (!weightsPin.IsValid())
214 {
215 return Fail("%s: Operation has invalid weights", __func__);
216 }
217
218 if (!biasPin.IsValid())
219 {
220 return Fail("%s: Operation has invalid biases", __func__);
221 }
222
223 armnn::ConstTensor weights = weightsPin.GetConstTensor();
224 armnn::ConstTensor bias = biasPin.GetConstTensor();
225 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
226
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100227 ActivationFn activation;
228
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100229 if (implicitPadding)
230 {
231 android::nn::PaddingScheme paddingScheme;
232 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
233 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
234 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
235 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
236 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
237 {
238 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
239 }
240
Mike Kellye1d60bb2019-07-11 11:44:52 +0100241 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
242 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
243 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
244 const uint32_t kernelX = weights.GetShape()[widthIndex];
245 const uint32_t kernelY = weights.GetShape()[heightIndex];
246 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
247 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100248
Mike Kelly86b36d42019-07-12 16:39:33 +0100249 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
250 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100251
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100252 }
253 else if (operation.inputs.size() >= 10)
254 {
255 // explicit padding
256 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
257 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
258 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
259 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
260 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
261 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
262 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
263 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
264 {
265 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
266 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100267 }
268 else
269 {
270 return Fail("%s: Unsupported number of operation inputs", __func__);
271 }
272
273 desc.m_BiasEnabled = true;
274 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
275
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100276 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100277 {
278 try
279 {
280 ALOGD("Output shape not set, will infer from inputs");
281 outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
282 weights.GetInfo().GetShape(),
283 desc));
284 }
285 catch (armnn::Exception& e)
286 {
287 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
288 }
289 }
290
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100291 bool isSupported = false;
292 FORWARD_LAYER_SUPPORT_FUNC(__func__,
293 IsConvolution2dSupported,
294 data.m_Backends,
295 isSupported,
296 inputInfo,
297 outputInfo,
298 desc,
299 weights.GetInfo(),
300 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100301
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100302 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100303 {
304 return false;
305 }
306
307 armnn::IConnectableLayer* startLayer =
308 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
309
310 if (!startLayer)
311 {
312 return Fail("%s: AddConvolution2dLayer failed", __func__);
313 }
314
315 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
316
317 if (!endLayer)
318 {
319 return Fail("%s: ProcessActivation failed", __func__);
320 }
321
322 input.Connect(startLayer->GetInputSlot(0));
323
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100324 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
325 0,
326 *endLayer,
327 model,
328 data,
329 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100330}
331
332bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
333{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100334 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
335
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100336 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
337
338 if (!input.IsValid())
339 {
340 return Fail("%s: Operation has invalid inputs", __func__);
341 }
342
343 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
344
345 if (!output)
346 {
347 return Fail("%s: Could not read output 0", __func__);
348 }
349
350 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100351
352 // ArmNN does not currently support non-fixed weights or bias
353 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
354 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
355
356 if (weightsOperand == nullptr)
357 {
358 return Fail("%s: Operand is invalid", __func__);
359 }
360 armnn::DepthwiseConvolution2dDescriptor desc;
361 desc.m_DataLayout = armnn::DataLayout::NHWC;
362
363 // Determine whether padding is implicit or explicit
364 bool implicitPadding = operation.inputs.size() == 8 ||
365 (operation.inputs.size() >= 9 &&
366 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
367
368 // Look ahead to find the optional DataLayout, if present
369 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
370 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
371
372 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
373 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
374 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
375 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
376
377 // Reinterpret weight data as [ H, W, I, M ]
378 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
379 weightsOperand->dimensions[2],
380 inputInfo.GetShape()[channelsIndex],
381 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
382
383 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
384 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
385
386 const ConstTensorPin weightsPin =
387 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
388 1,
389 model,
390 data,
391 HWIMToMIHW,
392 &weightsShape);
393
394 // Bias is a 1D tensor
395 const ConstTensorPin biasPin =
396 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
397
398 if (!weightsPin.IsValid())
399 {
400 return Fail("%s: Operation has invalid weights", __func__);
401 }
402
403 if (!biasPin.IsValid())
404 {
405 return Fail("%s: Operation has invalid biases", __func__);
406 }
407
408 armnn::ConstTensor weights = weightsPin.GetConstTensor();
409 armnn::ConstTensor bias = biasPin.GetConstTensor();
410 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
411
412 ActivationFn activation;
413
414 if (implicitPadding)
415 {
416 android::nn::PaddingScheme paddingScheme;
417 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
418 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
419 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
420 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
421 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
422 {
423 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
424 }
425
426 const uint32_t kernelX = weights.GetShape()[3];
427 const uint32_t kernelY = weights.GetShape()[2];
428 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
429 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
430
Mike Kelly86b36d42019-07-12 16:39:33 +0100431 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
432 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100433 }
434 else if (operation.inputs.size() >= 11)
435 {
436 // explicit padding
437 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
438 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
439 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
440 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
441 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
442 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
443 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
444 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
445 {
446 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
447 }
448 }
449 else
450 {
451 return Fail("%s: Unsupported number of operation inputs", __func__);
452 }
453
454 desc.m_BiasEnabled = true;
455 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
456
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100457 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100458 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100459 {
460 try
461 {
462 ALOGD("Output shape not set, will infer from inputs");
463 outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
464 weights.GetInfo().GetShape(),
465 desc));
466 }
467 catch (armnn::Exception& e)
468 {
469 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
470 }
471 }
472
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100473 bool isSupported = false;
474 FORWARD_LAYER_SUPPORT_FUNC(__func__,
475 IsDepthwiseConvolutionSupported,
476 data.m_Backends,
477 isSupported,
478 inputInfo,
479 outputInfo,
480 desc,
481 weights.GetInfo(),
482 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100483
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100484 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100485 {
486 return false;
487 }
488
489 armnn::IConnectableLayer* startLayer =
490 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100491
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100492 if (!startLayer)
493 {
494 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
495 }
496
497 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
498 if (!endLayer)
499 {
500 return Fail("%s: ProcessActivation failed", __func__);
501 }
502
503 input.Connect(startLayer->GetInputSlot(0));
504
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100505 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
506 0,
507 *endLayer,
508 model,
509 data,
510 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100511}
512
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100513bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
514{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100515 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
516
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100517 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
518 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
519
520 if (!input0.IsValid() || !input1.IsValid())
521 {
522 return Fail("%s: Operation has invalid inputs", __func__);
523 }
524
525 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
526 if (!outputOperand)
527 {
528 return Fail("%s: Could not read output", __func__);
529 }
530
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100531 armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100532 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100533 {
534 ALOGD("Output shape not set, will infer from inputs");
535 outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
536 }
537
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100538 bool isSupported = false;
539 FORWARD_LAYER_SUPPORT_FUNC(__func__,
540 IsMaximumSupported,
541 data.m_Backends,
542 isSupported,
543 input0.GetTensorInfo(),
544 input1.GetTensorInfo(),
545 outInfo);
546
547 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100548 {
549 return false;
550 }
551
552 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
553 assert(layer != nullptr);
554 BroadcastTensor(input0, input1, layer, *data.m_Network);
555
556 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
557 0,
558 *layer,
559 model,
560 data,
561 armnn::Optional<armnn::TensorInfo>(outInfo));
562}
563
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100564bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
565{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100566 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
567
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100568 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
569 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
570
571 if (!input0.IsValid() || !input1.IsValid())
572 {
573 return Fail("%s: Operation has invalid inputs", __func__);
574 }
575
576 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
577 if (!output)
578 {
579 return Fail("%s: Could not read output 0", __func__);
580 }
581
582 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100583 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100584 {
585 ALOGD("Output shape not set, will infer from inputs");
586 outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(),
587 input1.GetTensorInfo().GetShape()));
588 }
589
590 bool isSupported = false;
591 FORWARD_LAYER_SUPPORT_FUNC(__func__,
592 IsMinimumSupported,
593 data.m_Backends,
594 isSupported,
595 input0.GetTensorInfo(),
596 input1.GetTensorInfo(),
597 outputInfo);
598
599 if (!isSupported)
600 {
601 return false;
602 }
603
604 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
605 assert(layer != nullptr);
606 BroadcastTensor(input0, input1, layer, *data.m_Network);
607
608 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
609 0,
610 *layer,
611 model,
612 data,
613 armnn::Optional<armnn::TensorInfo>(outputInfo));
614}
615
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100616bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
617{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100618 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
619
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100620 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
621 if (!input.IsValid())
622 {
623 return Fail("%s: Could not read input 0", __func__);
624 }
625
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100626 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
627 if (!output)
628 {
629 return Fail("%s: Could not read output", __func__);
630 }
631
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100632 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
633 unsigned int rank = inputInfo.GetNumDimensions();
634
635 armnn::PadDescriptor descriptor;
636 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
637 {
638 return Fail("%s: Could not convert paddings", __func__);
639 }
640
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100641 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100642 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100643 {
644 ALOGD("Output shape not set, will infer from inputs");
645 outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
646 }
647
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100648 // Determine type of padding value
649 OperandType operandType0;
650 OperandType operandType2;
651
652 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
653 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
654 {
655 return Fail("%s: Operation has invalid inputs", __func__);
656 }
657
658 // Read value to use for padding
659 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
660 {
661 armnn::Half f16PadValue;
662 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
663 {
664 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
665 }
666
667 descriptor.m_PadValue = f16PadValue;
668 }
669 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
670 {
671 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
672 {
673 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
674 }
675 }
676 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
677 {
678 int32_t quantizedPadValue = 0;
679 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
680 {
681 return Fail("%s: Could not read input 2 (INT32)", __func__);
682 }
683
684 descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
685 inputInfo.GetQuantizationScale(),
686 inputInfo.GetQuantizationOffset());
687 }
688 else
689 {
690 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
691 }
692
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100693 bool isSupported = false;
694 FORWARD_LAYER_SUPPORT_FUNC(__func__,
695 IsPadSupported,
696 data.m_Backends,
697 isSupported,
698 inputInfo,
699 outputInfo,
700 descriptor);
701 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100702 {
703 return false;
704 }
705
706 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
707 assert(layer != nullptr);
708 input.Connect(layer->GetInputSlot(0));
709 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
710
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100711 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
712 0,
713 *layer,
714 model,
715 data,
716 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100717}
718
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100719bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
720{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100721 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
722
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100723 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
724 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
725
726 if (!input.IsValid() || !alpha.IsValid())
727 {
728 return Fail("%s: Operation has invalid inputs", __func__);
729 }
730
731 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
732
733 if (!output)
734 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100735 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100736 }
737
738 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
739 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100740
741 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100742 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100743 {
744 ALOGD("Output shape not set, will infer from inputs");
745 outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
746 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100747
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100748 bool isSupported = false;
749 FORWARD_LAYER_SUPPORT_FUNC(__func__,
750 IsPreluSupported,
751 data.m_Backends,
752 isSupported,
753 inputInfo,
754 alphaInfo,
755 outputInfo);
756 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100757 {
758 return false;
759 }
760
761 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
762
763 if (!layer)
764 {
765 return Fail("%s: AddPreluLayer failed", __func__);
766 }
767
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100768 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100769
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100770 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
771 0,
772 *layer,
773 model,
774 data,
775 armnn::Optional<armnn::TensorInfo>(outputInfo));
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100776}
777
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100778bool HalPolicy::ConvertResize(const Operation& operation,
779 const Model& model,
780 ConversionData& data,
781 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100782{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100783 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
784
785 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100786 if (!input.IsValid())
787 {
788 return Fail("%s: Could not read input 0", __func__);
789 }
790
791 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
792 if (!output)
793 {
794 return Fail("%s: Could not read output 0", __func__);
795 }
796
797 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100798 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100799
800 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100801 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100802 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
803
804 OperandType operandType1;
805 OperandType operandType2;
806
807 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
808 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
809 {
810 return Fail("%s: Operation has invalid inputs", __func__);
811 }
812
813 if (operandType1 != operandType2)
814 {
815 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
816 }
817
818 if (operandType1 == OperandType::INT32)
819 {
820 // Case 1: resizing by shape
821 int32_t targetWidth = 0;
822 int32_t targetHeight = 0;
823
824 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
825 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
826 {
827 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
828 }
829
830 if (targetWidth < 0 || targetHeight < 0)
831 {
832 return Fail("%s: Operation has invalid inputs for resizing by shape. "
833 "Target width/height cannot be < 0", __func__);
834 }
835
836 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100837 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100838 }
839 else if (operandType1 == OperandType::FLOAT32)
840 {
841 // Case 2: resizing by scale
842 float widthScale = 1.0f;
843 float heightScale = 1.0f;
844
845 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
846 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
847 {
848 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
849 }
850
851 const armnn::TensorShape& inputShape = inputInfo.GetShape();
852 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
853
854 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
855 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
856
857 descriptor.m_TargetWidth = std::floor(width * widthScale);
858 descriptor.m_TargetHeight = std::floor(height * heightScale);
859 }
860 else
861 {
862 // NOTE: FLOAT16 scales are not supported
863 return false;
864 }
865
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100866 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100867 {
868 try
869 {
870 ALOGD("Output shape not set, will infer from inputs");
871 outputInfo.SetShape(InferResizeOutputShape(inputInfo.GetShape(), descriptor));
872 }
873 catch (armnn::Exception& e)
874 {
875 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
876 }
877 }
878
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100879 bool isSupported = false;
880 FORWARD_LAYER_SUPPORT_FUNC(__func__,
881 IsResizeSupported,
882 data.m_Backends,
883 isSupported,
884 inputInfo,
885 outputInfo,
886 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100887
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100888 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100889 {
890 return false;
891 }
892
893 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
894
895 assert(layer != nullptr);
896
897 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
898 input.Connect(layer->GetInputSlot(0));
899
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100900 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
901 0,
902 *layer,
903 model,
904 data,
905 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100906}
907
Keith Davisa6bc52f2019-06-26 09:39:49 +0100908bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
909{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100910 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100911
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100912 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100913 if (!input.IsValid() )
914 {
915 return Fail("%s: Operation has invalid inputs", __func__);
916 }
917
918 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
919 unsigned int rank = inputInfo.GetNumDimensions();
920
921 if (rank != 4)
922 {
923 return Fail("%s: Only inputs with rank 4 are supported", __func__);
924 }
925
926 armnn::SpaceToDepthDescriptor desc;
927
928 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
929
930 if (desc.m_BlockSize <= 1)
931 {
932 return Fail("%s: Block size must be at least 1 in all dimensions");
933 }
934
935 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
936
937 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
938 if (!output)
939 {
940 return Fail("%s: Could not read output 0", __func__);
941 }
942
943 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100944
945 bool isSupported = false;
946 FORWARD_LAYER_SUPPORT_FUNC(__func__,
947 IsSpaceToDepthSupported,
948 data.m_Backends,
949 isSupported,
950 inputInfo,
951 outputInfo,
952 desc);
953 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100954 {
955 return false;
956 }
957
958 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
959 assert(layer != nullptr);
960 input.Connect(layer->GetInputSlot(0));
961
962 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
963}
964
Francis Murtagh074c25a2019-07-22 16:40:57 +0100965bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
966{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100967 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
968
Francis Murtagh074c25a2019-07-22 16:40:57 +0100969 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
970 if (!input.IsValid())
971 {
972 return Fail("%s: Operation has invalid inputs", __func__);
973 }
974
975 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
976 if (!outputOperand)
977 {
978 return Fail("%s: Operation has no outputs", __func__);
979 }
980
981 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100982 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +0100983 {
984 ALOGD("Output shape not set, will infer from input");
985 outputInfo.SetShape(input.GetTensorInfo().GetShape());
986 }
987
988 armnn::SoftmaxDescriptor desc;
989 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
990 {
991 return Fail("%s: Operation has invalid inputs", __func__);
992 }
993
994 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
995 2,
996 HalPolicy::OperandType::INT32,
997 desc.m_Axis,
998 model,
999 data))
1000 {
1001 return Fail("%s: Operation has invalid inputs", __func__);
1002 }
1003
1004 bool isSupported = false;
1005 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1006 IsSoftmaxSupported,
1007 data.m_Backends,
1008 isSupported,
1009 input.GetTensorInfo(),
1010 outputInfo,
1011 desc);
1012 if (!isSupported)
1013 {
1014 return false;
1015 }
1016
1017 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1018 assert(layer != nullptr);
1019 input.Connect(layer->GetInputSlot(0));
1020
1021 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
1022 0,
1023 *layer,
1024 model,
1025 data,
1026 armnn::Optional<armnn::TensorInfo>(outputInfo));
1027}
1028
Mike Kellyb5fdf382019-06-11 16:35:25 +01001029} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001030} // namespace armnn_driver