blob: 836977da97bd53c84574d6b172e52ddcf6625a9f [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
8#include "../1.0/HalPolicy.hpp"
9#include "../1.1/HalPolicy.hpp"
10
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010011#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010012#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013
14#include <cmath>
15
Mike Kellyb5fdf382019-06-11 16:35:25 +010016namespace armnn_driver
17{
18namespace hal_1_2
19{
20
21bool HandledByV1_0(V1_2::OperationType operationType)
22{
23 switch (static_cast<V1_0::OperationType>(operationType))
24 {
25 case V1_0::OperationType::ADD:
26 case V1_0::OperationType::AVERAGE_POOL_2D:
27 case V1_0::OperationType::CONCATENATION:
28 case V1_0::OperationType::DEPTH_TO_SPACE:
29 case V1_0::OperationType::DEQUANTIZE:
30 case V1_0::OperationType::EMBEDDING_LOOKUP:
31 case V1_0::OperationType::FLOOR:
32 case V1_0::OperationType::FULLY_CONNECTED:
33 case V1_0::OperationType::HASHTABLE_LOOKUP:
34 case V1_0::OperationType::L2_NORMALIZATION:
35 case V1_0::OperationType::L2_POOL_2D:
36 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
37 case V1_0::OperationType::LOGISTIC:
38 case V1_0::OperationType::LSH_PROJECTION:
39 case V1_0::OperationType::LSTM:
40 case V1_0::OperationType::MAX_POOL_2D:
41 case V1_0::OperationType::MUL:
42 case V1_0::OperationType::RELU:
43 case V1_0::OperationType::RELU1:
44 case V1_0::OperationType::RELU6:
45 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010046 case V1_0::OperationType::RNN:
47 case V1_0::OperationType::SOFTMAX:
48 case V1_0::OperationType::SPACE_TO_DEPTH:
49 case V1_0::OperationType::SVDF:
50 case V1_0::OperationType::TANH:
51 case V1_0::OperationType::OEM_OPERATION:
52 return true;
53 default:
54 return false;
55 }
56}
57
58bool HandledByV1_1(V1_2::OperationType operationType)
59{
60 if (HandledByV1_0(operationType))
61 {
62 return true;
63 }
64 switch (static_cast<V1_1::OperationType>(operationType))
65 {
66 case V1_1::OperationType::BATCH_TO_SPACE_ND:
67 case V1_1::OperationType::DIV:
68 case V1_1::OperationType::MEAN:
69 case V1_1::OperationType::PAD:
70 case V1_1::OperationType::SPACE_TO_BATCH_ND:
71 case V1_1::OperationType::SQUEEZE:
72 case V1_1::OperationType::STRIDED_SLICE:
73 case V1_1::OperationType::SUB:
74 case V1_1::OperationType::TRANSPOSE:
75 return true;
76 default:
77 return false;
78 }
79}
80
81bool HandledByV1_0(const V1_2::Operation& operation)
82{
83 return HandledByV1_0(operation.type);
84}
85
86bool HandledByV1_1(const V1_2::Operation& operation)
87{
88 return HandledByV1_1(operation.type);
89}
90
91V1_0::OperationType CastToV1_0(V1_2::OperationType type)
92{
93 return static_cast<V1_0::OperationType>(type);
94}
95
96V1_1::OperationType CastToV1_1(V1_2::OperationType type)
97{
98 return static_cast<V1_1::OperationType>(type);
99}
100
101V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
102{
103 V1_0::Operation op;
104 op.type = CastToV1_0(operation.type);
105 op.inputs = operation.inputs;
106 op.outputs = operation.outputs;
107 return op;
108}
109
110V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
111{
112 V1_1::Operation op;
113 op.type = CastToV1_1(operation.type);
114 op.inputs = operation.inputs;
115 op.outputs = operation.outputs;
116 return op;
117}
118
119bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
120{
121 if (HandledByV1_0(operation) && compliantWithV1_0(model))
122 {
123 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
124 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
125
126 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
127 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100128
129 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100130 {
131 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
132 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
133
134 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
135 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100136
Mike Kellyb5fdf382019-06-11 16:35:25 +0100137 switch (operation.type)
138 {
139 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100140 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100141 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100142 return ConvertDepthwiseConv2d(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100143 case V1_2::OperationType::PAD_V2:
144 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100145 case V1_2::OperationType::PRELU:
146 return ConvertPrelu(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100147 case V1_2::OperationType::RESIZE_BILINEAR:
148 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100149 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100150 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100151 default:
152 return Fail("%s: Operation type %s not supported in ArmnnDriver",
153 __func__, toString(operation.type).c_str());
154 }
155}
156
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100157bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
158{
159 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
160 if (!input.IsValid())
161 {
162 return Fail("%s: Operation has invalid inputs", __func__);
163 }
164
165 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
166 if (!output)
167 {
168 return Fail("%s: Could not read output 0", __func__);
169 }
170
171 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
172 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
173
174 // ArmNN does not currently support non-fixed weights or bias
175 const ConstTensorPin weightsPin =
176 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
177 const ConstTensorPin biasPin =
178 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
179
180 if (!weightsPin.IsValid())
181 {
182 return Fail("%s: Operation has invalid weights", __func__);
183 }
184
185 if (!biasPin.IsValid())
186 {
187 return Fail("%s: Operation has invalid biases", __func__);
188 }
189
190 armnn::ConstTensor weights = weightsPin.GetConstTensor();
191 armnn::ConstTensor bias = biasPin.GetConstTensor();
192 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
193
194 armnn::Convolution2dDescriptor desc;
195 desc.m_DataLayout = armnn::DataLayout::NHWC;
196 ActivationFn activation;
197
198 // Determine whether padding is implicit or explicit
199 bool implicitPadding = operation.inputs.size() == 7 ||
200 (operation.inputs.size() >= 8 &&
201 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
202
203 if (implicitPadding)
204 {
205 android::nn::PaddingScheme paddingScheme;
206 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
207 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
208 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
209 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
210 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
211 {
212 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
213 }
214
215 const uint32_t kernelX = weights.GetShape()[2];
216 const uint32_t kernelY = weights.GetShape()[1];
217 const uint32_t inputX = inputInfo.GetShape()[2];
218 const uint32_t inputY = inputInfo.GetShape()[1];
219
220 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
221 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
222
223 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
224 }
225 else if (operation.inputs.size() >= 10)
226 {
227 // explicit padding
228 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
229 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
230 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
231 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
232 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
233 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
234 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
235 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
236 {
237 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
238 }
239 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
240 }
241 else
242 {
243 return Fail("%s: Unsupported number of operation inputs", __func__);
244 }
245
246 desc.m_BiasEnabled = true;
247 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
248
249 if (!IsLayerSupportedForAnyBackend(__func__,
250 armnn::IsConvolution2dSupported,
251 data.m_Backends,
252 inputInfo,
253 outputInfo,
254 desc,
255 weights.GetInfo(),
256 biases))
257 {
258 return false;
259 }
260
261 armnn::IConnectableLayer* startLayer =
262 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
263
264 if (!startLayer)
265 {
266 return Fail("%s: AddConvolution2dLayer failed", __func__);
267 }
268
269 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
270
271 if (!endLayer)
272 {
273 return Fail("%s: ProcessActivation failed", __func__);
274 }
275
276 input.Connect(startLayer->GetInputSlot(0));
277
278 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
279}
280
281bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
282{
283 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
284
285 if (!input.IsValid())
286 {
287 return Fail("%s: Operation has invalid inputs", __func__);
288 }
289
290 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
291
292 if (!output)
293 {
294 return Fail("%s: Could not read output 0", __func__);
295 }
296
297 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
298 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
299
300 // ArmNN does not currently support non-fixed weights or bias
301 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
302 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
303
304 if (weightsOperand == nullptr)
305 {
306 return Fail("%s: Operand is invalid", __func__);
307 }
308 armnn::DepthwiseConvolution2dDescriptor desc;
309 desc.m_DataLayout = armnn::DataLayout::NHWC;
310
311 // Determine whether padding is implicit or explicit
312 bool implicitPadding = operation.inputs.size() == 8 ||
313 (operation.inputs.size() >= 9 &&
314 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
315
316 // Look ahead to find the optional DataLayout, if present
317 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
318 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
319
320 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
321 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
322 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
323 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
324
325 // Reinterpret weight data as [ H, W, I, M ]
326 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
327 weightsOperand->dimensions[2],
328 inputInfo.GetShape()[channelsIndex],
329 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
330
331 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
332 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
333
334 const ConstTensorPin weightsPin =
335 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
336 1,
337 model,
338 data,
339 HWIMToMIHW,
340 &weightsShape);
341
342 // Bias is a 1D tensor
343 const ConstTensorPin biasPin =
344 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
345
346 if (!weightsPin.IsValid())
347 {
348 return Fail("%s: Operation has invalid weights", __func__);
349 }
350
351 if (!biasPin.IsValid())
352 {
353 return Fail("%s: Operation has invalid biases", __func__);
354 }
355
356 armnn::ConstTensor weights = weightsPin.GetConstTensor();
357 armnn::ConstTensor bias = biasPin.GetConstTensor();
358 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
359
360 ActivationFn activation;
361
362 if (implicitPadding)
363 {
364 android::nn::PaddingScheme paddingScheme;
365 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
366 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
367 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
368 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
369 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
370 {
371 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
372 }
373
374 const uint32_t kernelX = weights.GetShape()[3];
375 const uint32_t kernelY = weights.GetShape()[2];
376 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
377 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
378
379 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
380 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
381 }
382 else if (operation.inputs.size() >= 11)
383 {
384 // explicit padding
385 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
386 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
387 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
388 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
389 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
390 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
391 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
392 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
393 {
394 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
395 }
396 }
397 else
398 {
399 return Fail("%s: Unsupported number of operation inputs", __func__);
400 }
401
402 desc.m_BiasEnabled = true;
403 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
404
405 if (!IsLayerSupportedForAnyBackend(__func__,
406 armnn::IsDepthwiseConvolutionSupported,
407 data.m_Backends,
408 inputInfo,
409 outputInfo,
410 desc,
411 weights.GetInfo(),
412 biases))
413 {
414 return false;
415 }
416
417 armnn::IConnectableLayer* startLayer =
418 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
419 if (!startLayer)
420 {
421 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
422 }
423
424 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
425 if (!endLayer)
426 {
427 return Fail("%s: ProcessActivation failed", __func__);
428 }
429
430 input.Connect(startLayer->GetInputSlot(0));
431
432 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
433}
434
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100435bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
436{
437 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
438 if (!input.IsValid())
439 {
440 return Fail("%s: Could not read input 0", __func__);
441 }
442
443 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
444 unsigned int rank = inputInfo.GetNumDimensions();
445
446 armnn::PadDescriptor descriptor;
447 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
448 {
449 return Fail("%s: Could not convert paddings", __func__);
450 }
451
452 // Determine type of padding value
453 OperandType operandType0;
454 OperandType operandType2;
455
456 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
457 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
458 {
459 return Fail("%s: Operation has invalid inputs", __func__);
460 }
461
462 // Read value to use for padding
463 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
464 {
465 armnn::Half f16PadValue;
466 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
467 {
468 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
469 }
470
471 descriptor.m_PadValue = f16PadValue;
472 }
473 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
474 {
475 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
476 {
477 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
478 }
479 }
480 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
481 {
482 int32_t quantizedPadValue = 0;
483 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
484 {
485 return Fail("%s: Could not read input 2 (INT32)", __func__);
486 }
487
488 descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
489 inputInfo.GetQuantizationScale(),
490 inputInfo.GetQuantizationOffset());
491 }
492 else
493 {
494 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
495 }
496
497 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
498 if (!output)
499 {
500 return Fail("%s: Could not read output", __func__);
501 }
502
503 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
504
505 if (!IsLayerSupportedForAnyBackend(__func__,
506 armnn::IsPadSupported,
507 data.m_Backends,
508 inputInfo,
509 outputInfo,
510 descriptor))
511 {
512 return false;
513 }
514
515 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
516 assert(layer != nullptr);
517 input.Connect(layer->GetInputSlot(0));
518 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
519
520 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
521}
522
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100523bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
524{
525 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
526 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
527
528 if (!input.IsValid() || !alpha.IsValid())
529 {
530 return Fail("%s: Operation has invalid inputs", __func__);
531 }
532
533 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
534
535 if (!output)
536 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100537 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100538 }
539
540 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
541 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
542 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
543
544 if (!IsLayerSupportedForAnyBackend(__func__,
545 armnn::IsPreluSupported,
546 data.m_Backends,
547 inputInfo,
548 alphaInfo,
549 outputInfo))
550 {
551 return false;
552 }
553
554 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
555
556 if (!layer)
557 {
558 return Fail("%s: AddPreluLayer failed", __func__);
559 }
560
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100561 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100562
563 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
564}
565
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100566bool HalPolicy::ConvertResize(const Operation& operation,
567 const Model& model,
568 ConversionData& data,
569 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100570{
571 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
572 if (!input.IsValid())
573 {
574 return Fail("%s: Could not read input 0", __func__);
575 }
576
577 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
578 if (!output)
579 {
580 return Fail("%s: Could not read output 0", __func__);
581 }
582
583 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
584 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
585
586 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100587 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100588 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
589
590 OperandType operandType1;
591 OperandType operandType2;
592
593 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
594 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
595 {
596 return Fail("%s: Operation has invalid inputs", __func__);
597 }
598
599 if (operandType1 != operandType2)
600 {
601 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
602 }
603
604 if (operandType1 == OperandType::INT32)
605 {
606 // Case 1: resizing by shape
607 int32_t targetWidth = 0;
608 int32_t targetHeight = 0;
609
610 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
611 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
612 {
613 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
614 }
615
616 if (targetWidth < 0 || targetHeight < 0)
617 {
618 return Fail("%s: Operation has invalid inputs for resizing by shape. "
619 "Target width/height cannot be < 0", __func__);
620 }
621
622 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
623 descriptor.m_TargetWidth = static_cast<uint32_t>(targetHeight);
624 }
625 else if (operandType1 == OperandType::FLOAT32)
626 {
627 // Case 2: resizing by scale
628 float widthScale = 1.0f;
629 float heightScale = 1.0f;
630
631 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
632 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
633 {
634 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
635 }
636
637 const armnn::TensorShape& inputShape = inputInfo.GetShape();
638 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
639
640 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
641 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
642
643 descriptor.m_TargetWidth = std::floor(width * widthScale);
644 descriptor.m_TargetHeight = std::floor(height * heightScale);
645 }
646 else
647 {
648 // NOTE: FLOAT16 scales are not supported
649 return false;
650 }
651
652 if (!IsLayerSupportedForAnyBackend(__func__,
653 armnn::IsResizeSupported,
654 data.m_Backends,
655 inputInfo,
656 outputInfo,
657 descriptor))
658 {
659 return false;
660 }
661
662 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
663
664 assert(layer != nullptr);
665
666 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
667 input.Connect(layer->GetInputSlot(0));
668
669 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
670}
671
Mike Kellyb5fdf382019-06-11 16:35:25 +0100672} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100673} // namespace armnn_driver