blob: 7515eb3e3ebbf757b6f500bb8fb8db6539e83d3c [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01009
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
28 case V1_0::OperationType::AVERAGE_POOL_2D:
29 case V1_0::OperationType::CONCATENATION:
30 case V1_0::OperationType::DEPTH_TO_SPACE:
31 case V1_0::OperationType::DEQUANTIZE:
32 case V1_0::OperationType::EMBEDDING_LOOKUP:
33 case V1_0::OperationType::FLOOR:
34 case V1_0::OperationType::FULLY_CONNECTED:
35 case V1_0::OperationType::HASHTABLE_LOOKUP:
36 case V1_0::OperationType::L2_NORMALIZATION:
37 case V1_0::OperationType::L2_POOL_2D:
38 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
39 case V1_0::OperationType::LOGISTIC:
40 case V1_0::OperationType::LSH_PROJECTION:
41 case V1_0::OperationType::LSTM:
42 case V1_0::OperationType::MAX_POOL_2D:
43 case V1_0::OperationType::MUL:
Mike Kellyb5fdf382019-06-11 16:35:25 +010044 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010045 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010046 case V1_0::OperationType::SVDF:
Mike Kellyb5fdf382019-06-11 16:35:25 +010047 case V1_0::OperationType::OEM_OPERATION:
48 return true;
49 default:
50 return false;
51 }
52}
53
54bool HandledByV1_1(V1_2::OperationType operationType)
55{
56 if (HandledByV1_0(operationType))
57 {
58 return true;
59 }
60 switch (static_cast<V1_1::OperationType>(operationType))
61 {
62 case V1_1::OperationType::BATCH_TO_SPACE_ND:
63 case V1_1::OperationType::DIV:
64 case V1_1::OperationType::MEAN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010065 case V1_1::OperationType::SPACE_TO_BATCH_ND:
66 case V1_1::OperationType::SQUEEZE:
67 case V1_1::OperationType::STRIDED_SLICE:
68 case V1_1::OperationType::SUB:
69 case V1_1::OperationType::TRANSPOSE:
70 return true;
71 default:
72 return false;
73 }
74}
75
76bool HandledByV1_0(const V1_2::Operation& operation)
77{
78 return HandledByV1_0(operation.type);
79}
80
81bool HandledByV1_1(const V1_2::Operation& operation)
82{
83 return HandledByV1_1(operation.type);
84}
85
86V1_0::OperationType CastToV1_0(V1_2::OperationType type)
87{
88 return static_cast<V1_0::OperationType>(type);
89}
90
91V1_1::OperationType CastToV1_1(V1_2::OperationType type)
92{
93 return static_cast<V1_1::OperationType>(type);
94}
95
96V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
97{
98 V1_0::Operation op;
99 op.type = CastToV1_0(operation.type);
100 op.inputs = operation.inputs;
101 op.outputs = operation.outputs;
102 return op;
103}
104
105V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
106{
107 V1_1::Operation op;
108 op.type = CastToV1_1(operation.type);
109 op.inputs = operation.inputs;
110 op.outputs = operation.outputs;
111 return op;
112}
113
114bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
115{
116 if (HandledByV1_0(operation) && compliantWithV1_0(model))
117 {
118 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
119 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
120
121 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
122 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100123
124 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100125 {
126 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
127 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
128
129 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
130 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100131
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132 switch (operation.type)
133 {
134 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100135 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100136 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100137 return ConvertDepthwiseConv2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100138 case V1_2::OperationType::MAXIMUM:
139 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100140 case V1_2::OperationType::MINIMUM:
141 return ConvertMinimum(operation, model, data);
Mike Kelly3c673942019-07-25 09:26:06 +0100142 case V1_2::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100143 return ConvertPad(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100144 case V1_2::OperationType::PAD_V2:
145 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100146 case V1_2::OperationType::PRELU:
147 return ConvertPrelu(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100148 case V1_2::OperationType::RELU:
149 return ConvertReLu(operation, model, data);
150 case V1_2::OperationType::RELU1:
151 return ConvertReLu1(operation, model, data);
152 case V1_2::OperationType::RELU6:
153 return ConvertReLu6(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100154 case V1_2::OperationType::RESIZE_BILINEAR:
155 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100156 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100157 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100158 case V1_2::OperationType::SOFTMAX:
159 return ConvertSoftmax(operation, model, data);
Aron Virginas-Tarad1ab532019-07-25 11:24:42 +0100160 case V1_2::OperationType::SPACE_TO_DEPTH:
161 return ConvertSpaceToDepth(operation, model, data);
Sadik Armagan61113162019-07-25 09:09:40 +0100162 case V1_2::OperationType::TANH:
163 return ConvertTanH(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100164 default:
165 return Fail("%s: Operation type %s not supported in ArmnnDriver",
166 __func__, toString(operation.type).c_str());
167 }
168}
169
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100170bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
171{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100172 ALOGV("hal_1_2::HalPolicy::ConvertConv2d()");
173
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100174 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
175 if (!input.IsValid())
176 {
177 return Fail("%s: Operation has invalid inputs", __func__);
178 }
179
180 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
181 if (!output)
182 {
183 return Fail("%s: Could not read output 0", __func__);
184 }
185
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100186 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
187 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
188
189 if (IsDynamicTensor(outputInfo))
190 {
191 return Fail("%s: Dynamic output tensors are not supported", __func__);
192 }
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100193
Mike Kellye1d60bb2019-07-11 11:44:52 +0100194 armnn::Convolution2dDescriptor desc;
195 desc.m_DataLayout = armnn::DataLayout::NHWC;
196
197 // Determine whether padding is implicit or explicit
198 bool implicitPadding = operation.inputs.size() == 7 ||
199 (operation.inputs.size() >= 8 &&
200 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
201
202 if (implicitPadding)
203 {
204 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
205 }
206 else if (operation.inputs.size() >= 10)
207 {
208 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
209 }
210
211 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
212
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100213 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100214 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
215 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
216 // the DataLayout is NCHW
217 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
218 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
219 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100220 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100221 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100222
223 if (!weightsPin.IsValid())
224 {
225 return Fail("%s: Operation has invalid weights", __func__);
226 }
227
228 if (!biasPin.IsValid())
229 {
230 return Fail("%s: Operation has invalid biases", __func__);
231 }
232
233 armnn::ConstTensor weights = weightsPin.GetConstTensor();
234 armnn::ConstTensor bias = biasPin.GetConstTensor();
235 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
236
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100237 ActivationFn activation;
238
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100239 if (implicitPadding)
240 {
241 android::nn::PaddingScheme paddingScheme;
242 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
243 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
244 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
245 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
246 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
247 {
248 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
249 }
250
Mike Kellye1d60bb2019-07-11 11:44:52 +0100251 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
252 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
253 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
254 const uint32_t kernelX = weights.GetShape()[widthIndex];
255 const uint32_t kernelY = weights.GetShape()[heightIndex];
256 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
257 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100258
Mike Kelly86b36d42019-07-12 16:39:33 +0100259 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
260 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100261
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100262 }
263 else if (operation.inputs.size() >= 10)
264 {
265 // explicit padding
266 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
267 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
268 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
269 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
270 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
271 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
272 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
273 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
274 {
275 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
276 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100277 }
278 else
279 {
280 return Fail("%s: Unsupported number of operation inputs", __func__);
281 }
282
283 desc.m_BiasEnabled = true;
284 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
285
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100286 bool isSupported = false;
287 FORWARD_LAYER_SUPPORT_FUNC(__func__,
288 IsConvolution2dSupported,
289 data.m_Backends,
290 isSupported,
291 inputInfo,
292 outputInfo,
293 desc,
294 weights.GetInfo(),
295 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100296
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100297 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100298 {
299 return false;
300 }
301
302 armnn::IConnectableLayer* startLayer =
303 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
304
305 if (!startLayer)
306 {
307 return Fail("%s: AddConvolution2dLayer failed", __func__);
308 }
309
310 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
311
312 if (!endLayer)
313 {
314 return Fail("%s: ProcessActivation failed", __func__);
315 }
316
317 input.Connect(startLayer->GetInputSlot(0));
318
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100319 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100320}
321
322bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
323{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100324 ALOGV("hal_1_2::HalPolicy::ConvertDepthwiseConv2d()");
325
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100326 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
327
328 if (!input.IsValid())
329 {
330 return Fail("%s: Operation has invalid inputs", __func__);
331 }
332
333 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
334
335 if (!output)
336 {
337 return Fail("%s: Could not read output 0", __func__);
338 }
339
340 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100341 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
342
343 if (IsDynamicTensor(outputInfo))
344 {
345 return Fail("%s: Dynamic output tensors are not supported", __func__);
346 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100347
348 // ArmNN does not currently support non-fixed weights or bias
349 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
350 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
351
352 if (weightsOperand == nullptr)
353 {
354 return Fail("%s: Operand is invalid", __func__);
355 }
356 armnn::DepthwiseConvolution2dDescriptor desc;
357 desc.m_DataLayout = armnn::DataLayout::NHWC;
358
359 // Determine whether padding is implicit or explicit
360 bool implicitPadding = operation.inputs.size() == 8 ||
361 (operation.inputs.size() >= 9 &&
362 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
363
364 // Look ahead to find the optional DataLayout, if present
365 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
366 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
367
368 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
369 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
370 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
371 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
372
373 // Reinterpret weight data as [ H, W, I, M ]
374 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
375 weightsOperand->dimensions[2],
376 inputInfo.GetShape()[channelsIndex],
377 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
378
379 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
380 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
381
382 const ConstTensorPin weightsPin =
383 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
384 1,
385 model,
386 data,
387 HWIMToMIHW,
388 &weightsShape);
389
390 // Bias is a 1D tensor
391 const ConstTensorPin biasPin =
392 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
393
394 if (!weightsPin.IsValid())
395 {
396 return Fail("%s: Operation has invalid weights", __func__);
397 }
398
399 if (!biasPin.IsValid())
400 {
401 return Fail("%s: Operation has invalid biases", __func__);
402 }
403
404 armnn::ConstTensor weights = weightsPin.GetConstTensor();
405 armnn::ConstTensor bias = biasPin.GetConstTensor();
406 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
407
408 ActivationFn activation;
409
410 if (implicitPadding)
411 {
412 android::nn::PaddingScheme paddingScheme;
413 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
414 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
415 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
416 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
417 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
418 {
419 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
420 }
421
422 const uint32_t kernelX = weights.GetShape()[3];
423 const uint32_t kernelY = weights.GetShape()[2];
424 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
425 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
426
Mike Kelly86b36d42019-07-12 16:39:33 +0100427 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
428 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100429 }
430 else if (operation.inputs.size() >= 11)
431 {
432 // explicit padding
433 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
434 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
435 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
436 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
437 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
438 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
439 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
440 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
441 {
442 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
443 }
444 }
445 else
446 {
447 return Fail("%s: Unsupported number of operation inputs", __func__);
448 }
449
450 desc.m_BiasEnabled = true;
451 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
452
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100453 bool isSupported = false;
454 FORWARD_LAYER_SUPPORT_FUNC(__func__,
455 IsDepthwiseConvolutionSupported,
456 data.m_Backends,
457 isSupported,
458 inputInfo,
459 outputInfo,
460 desc,
461 weights.GetInfo(),
462 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100463
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100464 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100465 {
466 return false;
467 }
468
469 armnn::IConnectableLayer* startLayer =
470 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100471
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100472 if (!startLayer)
473 {
474 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
475 }
476
477 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
478 if (!endLayer)
479 {
480 return Fail("%s: ProcessActivation failed", __func__);
481 }
482
483 input.Connect(startLayer->GetInputSlot(0));
484
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100485 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100486}
487
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100488bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
489{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100490 ALOGV("hal_1_2::HalPolicy::ConvertMaximum()");
491
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100492 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
493 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
494
495 if (!input0.IsValid() || !input1.IsValid())
496 {
497 return Fail("%s: Operation has invalid inputs", __func__);
498 }
499
500 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
501 if (!outputOperand)
502 {
503 return Fail("%s: Could not read output", __func__);
504 }
505
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100506 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100507 if (IsDynamicTensor(outInfo))
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100508 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100509 return Fail("%s: Dynamic output tensors are not supported", __func__);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100510 }
511
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100512 bool isSupported = false;
513 FORWARD_LAYER_SUPPORT_FUNC(__func__,
514 IsMaximumSupported,
515 data.m_Backends,
516 isSupported,
517 input0.GetTensorInfo(),
518 input1.GetTensorInfo(),
519 outInfo);
520
521 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100522 {
523 return false;
524 }
525
526 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
527 assert(layer != nullptr);
528 BroadcastTensor(input0, input1, layer, *data.m_Network);
529
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100530 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100531}
532
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100533bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
534{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100535 ALOGV("hal_1_2::HalPolicy::ConvertMinimum()");
536
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100537 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
538 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
539
540 if (!input0.IsValid() || !input1.IsValid())
541 {
542 return Fail("%s: Operation has invalid inputs", __func__);
543 }
544
545 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
546 if (!output)
547 {
548 return Fail("%s: Could not read output 0", __func__);
549 }
550
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100551 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100552 if (IsDynamicTensor(outputInfo))
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100553 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100554 return Fail("%s: Dynamic output tensors are not supported", __func__);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100555 }
556
557 bool isSupported = false;
558 FORWARD_LAYER_SUPPORT_FUNC(__func__,
559 IsMinimumSupported,
560 data.m_Backends,
561 isSupported,
562 input0.GetTensorInfo(),
563 input1.GetTensorInfo(),
564 outputInfo);
565
566 if (!isSupported)
567 {
568 return false;
569 }
570
571 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
572 assert(layer != nullptr);
573 BroadcastTensor(input0, input1, layer, *data.m_Network);
574
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100575 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100576}
577
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100578bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
579{
580 ALOGV("hal_1_2::HalPolicy::ConvertPad()");
581 return ::ConvertPad<hal_1_2::HalPolicy>(operation, model, data);
582}
583
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100584bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
585{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100586 ALOGV("hal_1_2::HalPolicy::ConvertPadV2()");
587
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100588 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
589 if (!input.IsValid())
590 {
591 return Fail("%s: Could not read input 0", __func__);
592 }
593
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100594 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
595 if (!output)
596 {
597 return Fail("%s: Could not read output", __func__);
598 }
599
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100600 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
601 unsigned int rank = inputInfo.GetNumDimensions();
602
603 armnn::PadDescriptor descriptor;
604 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
605 {
606 return Fail("%s: Could not convert paddings", __func__);
607 }
608
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100609 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100610 if (IsDynamicTensor(outputInfo))
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100611 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100612 return Fail("%s: Dynamic output tensors are not supported", __func__);
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100613 }
614
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100615 // Determine type of padding value
616 OperandType operandType0;
617 OperandType operandType2;
618
619 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
620 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
621 {
622 return Fail("%s: Operation has invalid inputs", __func__);
623 }
624
625 // Read value to use for padding
626 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
627 {
628 armnn::Half f16PadValue;
629 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
630 {
631 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
632 }
633
634 descriptor.m_PadValue = f16PadValue;
635 }
636 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
637 {
638 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
639 {
640 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
641 }
642 }
643 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
644 {
Mike Kelly3c673942019-07-25 09:26:06 +0100645 int32_t intPadValue = 0;
646 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, intPadValue, model, data))
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100647 {
648 return Fail("%s: Could not read input 2 (INT32)", __func__);
649 }
Mike Kelly3c673942019-07-25 09:26:06 +0100650 descriptor.m_PadValue = intPadValue;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100651 }
652 else
653 {
654 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
655 }
656
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100657 bool isSupported = false;
658 FORWARD_LAYER_SUPPORT_FUNC(__func__,
659 IsPadSupported,
660 data.m_Backends,
661 isSupported,
662 inputInfo,
663 outputInfo,
664 descriptor);
665 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100666 {
667 return false;
668 }
669
670 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
671 assert(layer != nullptr);
672 input.Connect(layer->GetInputSlot(0));
673 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
674
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100675 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100676}
677
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100678bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
679{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100680 ALOGV("hal_1_2::HalPolicy::ConvertPrelu()");
681
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100682 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
683 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
684
685 if (!input.IsValid() || !alpha.IsValid())
686 {
687 return Fail("%s: Operation has invalid inputs", __func__);
688 }
689
690 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
691
692 if (!output)
693 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100694 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100695 }
696
697 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
698 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100699 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100700
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100701 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100702 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100703 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100704 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100705
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100706 bool isSupported = false;
707 FORWARD_LAYER_SUPPORT_FUNC(__func__,
708 IsPreluSupported,
709 data.m_Backends,
710 isSupported,
711 inputInfo,
712 alphaInfo,
713 outputInfo);
714 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100715 {
716 return false;
717 }
718
719 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
720
721 if (!layer)
722 {
723 return Fail("%s: AddPreluLayer failed", __func__);
724 }
725
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100726 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100727
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100728 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100729}
730
Sadik Armagan61113162019-07-25 09:09:40 +0100731bool HalPolicy::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
732{
733 ALOGV("hal_1_2::HalPolicy::ConvertReLu()");
734 return ::ConvertReLu<hal_1_2::HalPolicy>(operation, model, data);
735}
736
737bool HalPolicy::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
738{
739 ALOGV("hal_1_2::HalPolicy::ConvertReLu1()");
740 return ::ConvertReLu1<hal_1_2::HalPolicy>(operation, model, data);
741}
742
743bool HalPolicy::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
744{
745 ALOGV("hal_1_2::HalPolicy::ConvertReLu6()");
746 return ::ConvertReLu6<hal_1_2::HalPolicy>(operation, model, data);
747}
748
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100749bool HalPolicy::ConvertResize(const Operation& operation,
750 const Model& model,
751 ConversionData& data,
752 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100753{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100754 ALOGV("hal_1_2::HalPolicy::ConvertResize()");
755
756 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100757 if (!input.IsValid())
758 {
759 return Fail("%s: Could not read input 0", __func__);
760 }
761
762 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
763 if (!output)
764 {
765 return Fail("%s: Could not read output 0", __func__);
766 }
767
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100768 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
769 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
770
771 if (IsDynamicTensor(outputInfo))
772 {
773 return Fail("%s: Dynamic output tensors are not supported", __func__);
774 }
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100775
776 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100777 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100778 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
779
780 OperandType operandType1;
781 OperandType operandType2;
782
783 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
784 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
785 {
786 return Fail("%s: Operation has invalid inputs", __func__);
787 }
788
789 if (operandType1 != operandType2)
790 {
791 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
792 }
793
794 if (operandType1 == OperandType::INT32)
795 {
796 // Case 1: resizing by shape
797 int32_t targetWidth = 0;
798 int32_t targetHeight = 0;
799
800 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
801 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
802 {
803 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
804 }
805
806 if (targetWidth < 0 || targetHeight < 0)
807 {
808 return Fail("%s: Operation has invalid inputs for resizing by shape. "
809 "Target width/height cannot be < 0", __func__);
810 }
811
812 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100813 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100814 }
815 else if (operandType1 == OperandType::FLOAT32)
816 {
817 // Case 2: resizing by scale
818 float widthScale = 1.0f;
819 float heightScale = 1.0f;
820
821 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
822 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
823 {
824 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
825 }
826
827 const armnn::TensorShape& inputShape = inputInfo.GetShape();
828 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
829
830 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
831 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
832
833 descriptor.m_TargetWidth = std::floor(width * widthScale);
834 descriptor.m_TargetHeight = std::floor(height * heightScale);
835 }
836 else
837 {
838 // NOTE: FLOAT16 scales are not supported
839 return false;
840 }
841
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100842 bool isSupported = false;
843 FORWARD_LAYER_SUPPORT_FUNC(__func__,
844 IsResizeSupported,
845 data.m_Backends,
846 isSupported,
847 inputInfo,
848 outputInfo,
849 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100850
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100851 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100852 {
853 return false;
854 }
855
856 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
857
858 assert(layer != nullptr);
859
860 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
861 input.Connect(layer->GetInputSlot(0));
862
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100863 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100864}
865
Keith Davisa6bc52f2019-06-26 09:39:49 +0100866bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
867{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100868 ALOGV("hal_1_2::HalPolicy::ConvertSpaceToDepth()");
Keith Davisa6bc52f2019-06-26 09:39:49 +0100869
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100870 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100871 if (!input.IsValid() )
872 {
873 return Fail("%s: Operation has invalid inputs", __func__);
874 }
875
876 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
877 unsigned int rank = inputInfo.GetNumDimensions();
Keith Davisa6bc52f2019-06-26 09:39:49 +0100878 if (rank != 4)
879 {
880 return Fail("%s: Only inputs with rank 4 are supported", __func__);
881 }
882
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100883 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
884 if (!output)
885 {
886 return Fail("%s: Could not read output 0", __func__);
887 }
888
889 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
890 if (IsDynamicTensor(outputInfo))
891 {
892 return Fail("%s: Dynamic output tensors are not supported", __func__);
893 }
894
Keith Davisa6bc52f2019-06-26 09:39:49 +0100895 armnn::SpaceToDepthDescriptor desc;
896
897 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
898
899 if (desc.m_BlockSize <= 1)
900 {
901 return Fail("%s: Block size must be at least 1 in all dimensions");
902 }
903
904 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
905
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100906 bool isSupported = false;
907 FORWARD_LAYER_SUPPORT_FUNC(__func__,
908 IsSpaceToDepthSupported,
909 data.m_Backends,
910 isSupported,
911 inputInfo,
912 outputInfo,
913 desc);
914 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100915 {
916 return false;
917 }
918
919 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
920 assert(layer != nullptr);
921 input.Connect(layer->GetInputSlot(0));
922
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100923 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Keith Davisa6bc52f2019-06-26 09:39:49 +0100924}
925
Francis Murtagh074c25a2019-07-22 16:40:57 +0100926bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
927{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100928 ALOGV("hal_1_2::HalPolicy::ConvertSoftmax()");
929
Francis Murtagh074c25a2019-07-22 16:40:57 +0100930 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
931 if (!input.IsValid())
932 {
933 return Fail("%s: Operation has invalid inputs", __func__);
934 }
935
936 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
937 if (!outputOperand)
938 {
939 return Fail("%s: Operation has no outputs", __func__);
940 }
941
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100942 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100943 if (IsDynamicTensor(outputInfo))
Francis Murtagh074c25a2019-07-22 16:40:57 +0100944 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100945 return Fail("%s: Dynamic output tensors are not supported", __func__);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100946 }
947
948 armnn::SoftmaxDescriptor desc;
949 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
950 {
951 return Fail("%s: Operation has invalid inputs", __func__);
952 }
953
954 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
955 2,
956 HalPolicy::OperandType::INT32,
957 desc.m_Axis,
958 model,
959 data))
960 {
961 return Fail("%s: Operation has invalid inputs", __func__);
962 }
963
964 bool isSupported = false;
965 FORWARD_LAYER_SUPPORT_FUNC(__func__,
966 IsSoftmaxSupported,
967 data.m_Backends,
968 isSupported,
969 input.GetTensorInfo(),
970 outputInfo,
971 desc);
972 if (!isSupported)
973 {
974 return false;
975 }
976
977 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
978 assert(layer != nullptr);
979 input.Connect(layer->GetInputSlot(0));
980
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100981 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100982}
983
Sadik Armagan61113162019-07-25 09:09:40 +0100984bool HalPolicy::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
985{
986 ALOGV("hal_1_2::HalPolicy::ConvertTanH()");
987 return ::ConvertTanH<hal_1_2::HalPolicy>(operation, model, data);
988}
989
Mike Kellyb5fdf382019-06-11 16:35:25 +0100990} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100991} // namespace armnn_driver