blob: f93629ef0cbfe12fdf472f5f04a79a22d5ad1381 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01008#include "OutputShapeUtils.hpp"
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01009#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +010010
Mike Kellyb5fdf382019-06-11 16:35:25 +010011#include "../1.0/HalPolicy.hpp"
12#include "../1.1/HalPolicy.hpp"
13
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010014#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010015#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010016
17#include <cmath>
18
Mike Kellyb5fdf382019-06-11 16:35:25 +010019namespace armnn_driver
20{
21namespace hal_1_2
22{
23
24bool HandledByV1_0(V1_2::OperationType operationType)
25{
26 switch (static_cast<V1_0::OperationType>(operationType))
27 {
28 case V1_0::OperationType::ADD:
29 case V1_0::OperationType::AVERAGE_POOL_2D:
30 case V1_0::OperationType::CONCATENATION:
31 case V1_0::OperationType::DEPTH_TO_SPACE:
32 case V1_0::OperationType::DEQUANTIZE:
33 case V1_0::OperationType::EMBEDDING_LOOKUP:
34 case V1_0::OperationType::FLOOR:
35 case V1_0::OperationType::FULLY_CONNECTED:
36 case V1_0::OperationType::HASHTABLE_LOOKUP:
37 case V1_0::OperationType::L2_NORMALIZATION:
38 case V1_0::OperationType::L2_POOL_2D:
39 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
40 case V1_0::OperationType::LOGISTIC:
41 case V1_0::OperationType::LSH_PROJECTION:
42 case V1_0::OperationType::LSTM:
43 case V1_0::OperationType::MAX_POOL_2D:
44 case V1_0::OperationType::MUL:
45 case V1_0::OperationType::RELU:
46 case V1_0::OperationType::RELU1:
47 case V1_0::OperationType::RELU6:
48 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010049 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010050 case V1_0::OperationType::SPACE_TO_DEPTH:
51 case V1_0::OperationType::SVDF:
52 case V1_0::OperationType::TANH:
53 case V1_0::OperationType::OEM_OPERATION:
54 return true;
55 default:
56 return false;
57 }
58}
59
60bool HandledByV1_1(V1_2::OperationType operationType)
61{
62 if (HandledByV1_0(operationType))
63 {
64 return true;
65 }
66 switch (static_cast<V1_1::OperationType>(operationType))
67 {
68 case V1_1::OperationType::BATCH_TO_SPACE_ND:
69 case V1_1::OperationType::DIV:
70 case V1_1::OperationType::MEAN:
71 case V1_1::OperationType::PAD:
72 case V1_1::OperationType::SPACE_TO_BATCH_ND:
73 case V1_1::OperationType::SQUEEZE:
74 case V1_1::OperationType::STRIDED_SLICE:
75 case V1_1::OperationType::SUB:
76 case V1_1::OperationType::TRANSPOSE:
77 return true;
78 default:
79 return false;
80 }
81}
82
83bool HandledByV1_0(const V1_2::Operation& operation)
84{
85 return HandledByV1_0(operation.type);
86}
87
88bool HandledByV1_1(const V1_2::Operation& operation)
89{
90 return HandledByV1_1(operation.type);
91}
92
93V1_0::OperationType CastToV1_0(V1_2::OperationType type)
94{
95 return static_cast<V1_0::OperationType>(type);
96}
97
98V1_1::OperationType CastToV1_1(V1_2::OperationType type)
99{
100 return static_cast<V1_1::OperationType>(type);
101}
102
103V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
104{
105 V1_0::Operation op;
106 op.type = CastToV1_0(operation.type);
107 op.inputs = operation.inputs;
108 op.outputs = operation.outputs;
109 return op;
110}
111
112V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
113{
114 V1_1::Operation op;
115 op.type = CastToV1_1(operation.type);
116 op.inputs = operation.inputs;
117 op.outputs = operation.outputs;
118 return op;
119}
120
121bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
122{
123 if (HandledByV1_0(operation) && compliantWithV1_0(model))
124 {
125 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
126 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
127
128 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
129 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100130
131 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132 {
133 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
134 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
135
136 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
137 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100138
Mike Kellyb5fdf382019-06-11 16:35:25 +0100139 switch (operation.type)
140 {
141 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100142 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100143 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100144 return ConvertDepthwiseConv2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100145 case V1_2::OperationType::MAXIMUM:
146 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100147 case V1_2::OperationType::MINIMUM:
148 return ConvertMinimum(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100149 case V1_2::OperationType::PAD_V2:
150 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100151 case V1_2::OperationType::PRELU:
152 return ConvertPrelu(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100153 case V1_2::OperationType::RESIZE_BILINEAR:
154 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100155 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100156 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100157 case V1_2::OperationType::SOFTMAX:
158 return ConvertSoftmax(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100159 default:
160 return Fail("%s: Operation type %s not supported in ArmnnDriver",
161 __func__, toString(operation.type).c_str());
162 }
163}
164
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100165bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
166{
167 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
168 if (!input.IsValid())
169 {
170 return Fail("%s: Operation has invalid inputs", __func__);
171 }
172
173 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
174 if (!output)
175 {
176 return Fail("%s: Could not read output 0", __func__);
177 }
178
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100179 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
180 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100181
Mike Kellye1d60bb2019-07-11 11:44:52 +0100182 armnn::Convolution2dDescriptor desc;
183 desc.m_DataLayout = armnn::DataLayout::NHWC;
184
185 // Determine whether padding is implicit or explicit
186 bool implicitPadding = operation.inputs.size() == 7 ||
187 (operation.inputs.size() >= 8 &&
188 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
189
190 if (implicitPadding)
191 {
192 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
193 }
194 else if (operation.inputs.size() >= 10)
195 {
196 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
197 }
198
199 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
200
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100201 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100202 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
203 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
204 // the DataLayout is NCHW
205 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
206 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
207 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100208 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100209 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100210
211 if (!weightsPin.IsValid())
212 {
213 return Fail("%s: Operation has invalid weights", __func__);
214 }
215
216 if (!biasPin.IsValid())
217 {
218 return Fail("%s: Operation has invalid biases", __func__);
219 }
220
221 armnn::ConstTensor weights = weightsPin.GetConstTensor();
222 armnn::ConstTensor bias = biasPin.GetConstTensor();
223 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
224
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100225 ActivationFn activation;
226
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100227 if (implicitPadding)
228 {
229 android::nn::PaddingScheme paddingScheme;
230 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
231 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
232 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
233 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
234 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
235 {
236 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
237 }
238
Mike Kellye1d60bb2019-07-11 11:44:52 +0100239 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
240 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
241 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
242 const uint32_t kernelX = weights.GetShape()[widthIndex];
243 const uint32_t kernelY = weights.GetShape()[heightIndex];
244 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
245 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100246
Mike Kelly86b36d42019-07-12 16:39:33 +0100247 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
248 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100249
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100250 }
251 else if (operation.inputs.size() >= 10)
252 {
253 // explicit padding
254 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
255 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
256 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
257 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
258 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
259 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
260 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
261 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
262 {
263 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
264 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100265 }
266 else
267 {
268 return Fail("%s: Unsupported number of operation inputs", __func__);
269 }
270
271 desc.m_BiasEnabled = true;
272 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
273
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100274 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100275 {
276 try
277 {
278 ALOGD("Output shape not set, will infer from inputs");
279 outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
280 weights.GetInfo().GetShape(),
281 desc));
282 }
283 catch (armnn::Exception& e)
284 {
285 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
286 }
287 }
288
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100289 bool isSupported = false;
290 FORWARD_LAYER_SUPPORT_FUNC(__func__,
291 IsConvolution2dSupported,
292 data.m_Backends,
293 isSupported,
294 inputInfo,
295 outputInfo,
296 desc,
297 weights.GetInfo(),
298 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100299
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100300 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100301 {
302 return false;
303 }
304
305 armnn::IConnectableLayer* startLayer =
306 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
307
308 if (!startLayer)
309 {
310 return Fail("%s: AddConvolution2dLayer failed", __func__);
311 }
312
313 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
314
315 if (!endLayer)
316 {
317 return Fail("%s: ProcessActivation failed", __func__);
318 }
319
320 input.Connect(startLayer->GetInputSlot(0));
321
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100322 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
323 0,
324 *endLayer,
325 model,
326 data,
327 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100328}
329
330bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
331{
332 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
333
334 if (!input.IsValid())
335 {
336 return Fail("%s: Operation has invalid inputs", __func__);
337 }
338
339 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
340
341 if (!output)
342 {
343 return Fail("%s: Could not read output 0", __func__);
344 }
345
346 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100347
348 // ArmNN does not currently support non-fixed weights or bias
349 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
350 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
351
352 if (weightsOperand == nullptr)
353 {
354 return Fail("%s: Operand is invalid", __func__);
355 }
356 armnn::DepthwiseConvolution2dDescriptor desc;
357 desc.m_DataLayout = armnn::DataLayout::NHWC;
358
359 // Determine whether padding is implicit or explicit
360 bool implicitPadding = operation.inputs.size() == 8 ||
361 (operation.inputs.size() >= 9 &&
362 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
363
364 // Look ahead to find the optional DataLayout, if present
365 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
366 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
367
368 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
369 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
370 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
371 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
372
373 // Reinterpret weight data as [ H, W, I, M ]
374 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
375 weightsOperand->dimensions[2],
376 inputInfo.GetShape()[channelsIndex],
377 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
378
379 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
380 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
381
382 const ConstTensorPin weightsPin =
383 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
384 1,
385 model,
386 data,
387 HWIMToMIHW,
388 &weightsShape);
389
390 // Bias is a 1D tensor
391 const ConstTensorPin biasPin =
392 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
393
394 if (!weightsPin.IsValid())
395 {
396 return Fail("%s: Operation has invalid weights", __func__);
397 }
398
399 if (!biasPin.IsValid())
400 {
401 return Fail("%s: Operation has invalid biases", __func__);
402 }
403
404 armnn::ConstTensor weights = weightsPin.GetConstTensor();
405 armnn::ConstTensor bias = biasPin.GetConstTensor();
406 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
407
408 ActivationFn activation;
409
410 if (implicitPadding)
411 {
412 android::nn::PaddingScheme paddingScheme;
413 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
414 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
415 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
416 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
417 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
418 {
419 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
420 }
421
422 const uint32_t kernelX = weights.GetShape()[3];
423 const uint32_t kernelY = weights.GetShape()[2];
424 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
425 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
426
Mike Kelly86b36d42019-07-12 16:39:33 +0100427 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
428 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100429 }
430 else if (operation.inputs.size() >= 11)
431 {
432 // explicit padding
433 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
434 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
435 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
436 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
437 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
438 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
439 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
440 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
441 {
442 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
443 }
444 }
445 else
446 {
447 return Fail("%s: Unsupported number of operation inputs", __func__);
448 }
449
450 desc.m_BiasEnabled = true;
451 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
452
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100453 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100454 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100455 {
456 try
457 {
458 ALOGD("Output shape not set, will infer from inputs");
459 outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
460 weights.GetInfo().GetShape(),
461 desc));
462 }
463 catch (armnn::Exception& e)
464 {
465 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
466 }
467 }
468
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100469 bool isSupported = false;
470 FORWARD_LAYER_SUPPORT_FUNC(__func__,
471 IsDepthwiseConvolutionSupported,
472 data.m_Backends,
473 isSupported,
474 inputInfo,
475 outputInfo,
476 desc,
477 weights.GetInfo(),
478 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100479
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100480 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100481 {
482 return false;
483 }
484
485 armnn::IConnectableLayer* startLayer =
486 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100487
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100488 if (!startLayer)
489 {
490 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
491 }
492
493 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
494 if (!endLayer)
495 {
496 return Fail("%s: ProcessActivation failed", __func__);
497 }
498
499 input.Connect(startLayer->GetInputSlot(0));
500
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100501 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
502 0,
503 *endLayer,
504 model,
505 data,
506 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100507}
508
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100509bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
510{
511 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
512 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
513
514 if (!input0.IsValid() || !input1.IsValid())
515 {
516 return Fail("%s: Operation has invalid inputs", __func__);
517 }
518
519 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
520 if (!outputOperand)
521 {
522 return Fail("%s: Could not read output", __func__);
523 }
524
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100525 armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100526 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100527 {
528 ALOGD("Output shape not set, will infer from inputs");
529 outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
530 }
531
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100532 bool isSupported = false;
533 FORWARD_LAYER_SUPPORT_FUNC(__func__,
534 IsMaximumSupported,
535 data.m_Backends,
536 isSupported,
537 input0.GetTensorInfo(),
538 input1.GetTensorInfo(),
539 outInfo);
540
541 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100542 {
543 return false;
544 }
545
546 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
547 assert(layer != nullptr);
548 BroadcastTensor(input0, input1, layer, *data.m_Network);
549
550 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
551 0,
552 *layer,
553 model,
554 data,
555 armnn::Optional<armnn::TensorInfo>(outInfo));
556}
557
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100558bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
559{
560 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
561 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
562
563 if (!input0.IsValid() || !input1.IsValid())
564 {
565 return Fail("%s: Operation has invalid inputs", __func__);
566 }
567
568 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
569 if (!output)
570 {
571 return Fail("%s: Could not read output 0", __func__);
572 }
573
574 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100575 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100576 {
577 ALOGD("Output shape not set, will infer from inputs");
578 outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(),
579 input1.GetTensorInfo().GetShape()));
580 }
581
582 bool isSupported = false;
583 FORWARD_LAYER_SUPPORT_FUNC(__func__,
584 IsMinimumSupported,
585 data.m_Backends,
586 isSupported,
587 input0.GetTensorInfo(),
588 input1.GetTensorInfo(),
589 outputInfo);
590
591 if (!isSupported)
592 {
593 return false;
594 }
595
596 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
597 assert(layer != nullptr);
598 BroadcastTensor(input0, input1, layer, *data.m_Network);
599
600 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
601 0,
602 *layer,
603 model,
604 data,
605 armnn::Optional<armnn::TensorInfo>(outputInfo));
606}
607
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100608bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
609{
610 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
611 if (!input.IsValid())
612 {
613 return Fail("%s: Could not read input 0", __func__);
614 }
615
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100616 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
617 if (!output)
618 {
619 return Fail("%s: Could not read output", __func__);
620 }
621
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100622 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
623 unsigned int rank = inputInfo.GetNumDimensions();
624
625 armnn::PadDescriptor descriptor;
626 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
627 {
628 return Fail("%s: Could not convert paddings", __func__);
629 }
630
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100631 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100632 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100633 {
634 ALOGD("Output shape not set, will infer from inputs");
635 outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
636 }
637
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100638 // Determine type of padding value
639 OperandType operandType0;
640 OperandType operandType2;
641
642 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
643 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
644 {
645 return Fail("%s: Operation has invalid inputs", __func__);
646 }
647
648 // Read value to use for padding
649 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
650 {
651 armnn::Half f16PadValue;
652 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
653 {
654 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
655 }
656
657 descriptor.m_PadValue = f16PadValue;
658 }
659 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
660 {
661 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
662 {
663 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
664 }
665 }
666 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
667 {
668 int32_t quantizedPadValue = 0;
669 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
670 {
671 return Fail("%s: Could not read input 2 (INT32)", __func__);
672 }
673
674 descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
675 inputInfo.GetQuantizationScale(),
676 inputInfo.GetQuantizationOffset());
677 }
678 else
679 {
680 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
681 }
682
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100683 bool isSupported = false;
684 FORWARD_LAYER_SUPPORT_FUNC(__func__,
685 IsPadSupported,
686 data.m_Backends,
687 isSupported,
688 inputInfo,
689 outputInfo,
690 descriptor);
691 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100692 {
693 return false;
694 }
695
696 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
697 assert(layer != nullptr);
698 input.Connect(layer->GetInputSlot(0));
699 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
700
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100701 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
702 0,
703 *layer,
704 model,
705 data,
706 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100707}
708
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100709bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
710{
711 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
712 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
713
714 if (!input.IsValid() || !alpha.IsValid())
715 {
716 return Fail("%s: Operation has invalid inputs", __func__);
717 }
718
719 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
720
721 if (!output)
722 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100723 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100724 }
725
726 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
727 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100728
729 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100730 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100731 {
732 ALOGD("Output shape not set, will infer from inputs");
733 outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
734 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100735
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100736 bool isSupported = false;
737 FORWARD_LAYER_SUPPORT_FUNC(__func__,
738 IsPreluSupported,
739 data.m_Backends,
740 isSupported,
741 inputInfo,
742 alphaInfo,
743 outputInfo);
744 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100745 {
746 return false;
747 }
748
749 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
750
751 if (!layer)
752 {
753 return Fail("%s: AddPreluLayer failed", __func__);
754 }
755
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100756 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100757
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100758 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
759 0,
760 *layer,
761 model,
762 data,
763 armnn::Optional<armnn::TensorInfo>(outputInfo));
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100764}
765
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100766bool HalPolicy::ConvertResize(const Operation& operation,
767 const Model& model,
768 ConversionData& data,
769 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100770{
771 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
772 if (!input.IsValid())
773 {
774 return Fail("%s: Could not read input 0", __func__);
775 }
776
777 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
778 if (!output)
779 {
780 return Fail("%s: Could not read output 0", __func__);
781 }
782
783 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100784 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100785
786 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100787 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100788 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
789
790 OperandType operandType1;
791 OperandType operandType2;
792
793 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
794 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
795 {
796 return Fail("%s: Operation has invalid inputs", __func__);
797 }
798
799 if (operandType1 != operandType2)
800 {
801 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
802 }
803
804 if (operandType1 == OperandType::INT32)
805 {
806 // Case 1: resizing by shape
807 int32_t targetWidth = 0;
808 int32_t targetHeight = 0;
809
810 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
811 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
812 {
813 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
814 }
815
816 if (targetWidth < 0 || targetHeight < 0)
817 {
818 return Fail("%s: Operation has invalid inputs for resizing by shape. "
819 "Target width/height cannot be < 0", __func__);
820 }
821
822 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100823 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100824 }
825 else if (operandType1 == OperandType::FLOAT32)
826 {
827 // Case 2: resizing by scale
828 float widthScale = 1.0f;
829 float heightScale = 1.0f;
830
831 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
832 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
833 {
834 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
835 }
836
837 const armnn::TensorShape& inputShape = inputInfo.GetShape();
838 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
839
840 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
841 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
842
843 descriptor.m_TargetWidth = std::floor(width * widthScale);
844 descriptor.m_TargetHeight = std::floor(height * heightScale);
845 }
846 else
847 {
848 // NOTE: FLOAT16 scales are not supported
849 return false;
850 }
851
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100852 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100853 {
854 try
855 {
856 ALOGD("Output shape not set, will infer from inputs");
857 outputInfo.SetShape(InferResizeOutputShape(inputInfo.GetShape(), descriptor));
858 }
859 catch (armnn::Exception& e)
860 {
861 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
862 }
863 }
864
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100865 bool isSupported = false;
866 FORWARD_LAYER_SUPPORT_FUNC(__func__,
867 IsResizeSupported,
868 data.m_Backends,
869 isSupported,
870 inputInfo,
871 outputInfo,
872 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100873
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100874 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100875 {
876 return false;
877 }
878
879 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
880
881 assert(layer != nullptr);
882
883 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
884 input.Connect(layer->GetInputSlot(0));
885
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100886 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
887 0,
888 *layer,
889 model,
890 data,
891 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100892}
893
Keith Davisa6bc52f2019-06-26 09:39:49 +0100894bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
895{
896 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
897
898 if (!input.IsValid() )
899 {
900 return Fail("%s: Operation has invalid inputs", __func__);
901 }
902
903 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
904 unsigned int rank = inputInfo.GetNumDimensions();
905
906 if (rank != 4)
907 {
908 return Fail("%s: Only inputs with rank 4 are supported", __func__);
909 }
910
911 armnn::SpaceToDepthDescriptor desc;
912
913 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
914
915 if (desc.m_BlockSize <= 1)
916 {
917 return Fail("%s: Block size must be at least 1 in all dimensions");
918 }
919
920 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
921
922 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
923 if (!output)
924 {
925 return Fail("%s: Could not read output 0", __func__);
926 }
927
928 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100929
930 bool isSupported = false;
931 FORWARD_LAYER_SUPPORT_FUNC(__func__,
932 IsSpaceToDepthSupported,
933 data.m_Backends,
934 isSupported,
935 inputInfo,
936 outputInfo,
937 desc);
938 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100939 {
940 return false;
941 }
942
943 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
944 assert(layer != nullptr);
945 input.Connect(layer->GetInputSlot(0));
946
947 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
948}
949
Francis Murtagh074c25a2019-07-22 16:40:57 +0100950bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
951{
952 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
953 if (!input.IsValid())
954 {
955 return Fail("%s: Operation has invalid inputs", __func__);
956 }
957
958 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
959 if (!outputOperand)
960 {
961 return Fail("%s: Operation has no outputs", __func__);
962 }
963
964 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100965 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +0100966 {
967 ALOGD("Output shape not set, will infer from input");
968 outputInfo.SetShape(input.GetTensorInfo().GetShape());
969 }
970
971 armnn::SoftmaxDescriptor desc;
972 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
973 {
974 return Fail("%s: Operation has invalid inputs", __func__);
975 }
976
977 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
978 2,
979 HalPolicy::OperandType::INT32,
980 desc.m_Axis,
981 model,
982 data))
983 {
984 return Fail("%s: Operation has invalid inputs", __func__);
985 }
986
987 bool isSupported = false;
988 FORWARD_LAYER_SUPPORT_FUNC(__func__,
989 IsSoftmaxSupported,
990 data.m_Backends,
991 isSupported,
992 input.GetTensorInfo(),
993 outputInfo,
994 desc);
995 if (!isSupported)
996 {
997 return false;
998 }
999
1000 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1001 assert(layer != nullptr);
1002 input.Connect(layer->GetInputSlot(0));
1003
1004 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
1005 0,
1006 *layer,
1007 model,
1008 data,
1009 armnn::Optional<armnn::TensorInfo>(outputInfo));
1010}
1011
Mike Kellyb5fdf382019-06-11 16:35:25 +01001012} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001013} // namespace armnn_driver