blob: b194a57a4bbeac02d7b28426251b31bf676fee2d [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01008#include "OutputShapeUtils.hpp"
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
28 case V1_0::OperationType::AVERAGE_POOL_2D:
29 case V1_0::OperationType::CONCATENATION:
30 case V1_0::OperationType::DEPTH_TO_SPACE:
31 case V1_0::OperationType::DEQUANTIZE:
32 case V1_0::OperationType::EMBEDDING_LOOKUP:
33 case V1_0::OperationType::FLOOR:
34 case V1_0::OperationType::FULLY_CONNECTED:
35 case V1_0::OperationType::HASHTABLE_LOOKUP:
36 case V1_0::OperationType::L2_NORMALIZATION:
37 case V1_0::OperationType::L2_POOL_2D:
38 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
39 case V1_0::OperationType::LOGISTIC:
40 case V1_0::OperationType::LSH_PROJECTION:
41 case V1_0::OperationType::LSTM:
42 case V1_0::OperationType::MAX_POOL_2D:
43 case V1_0::OperationType::MUL:
44 case V1_0::OperationType::RELU:
45 case V1_0::OperationType::RELU1:
46 case V1_0::OperationType::RELU6:
47 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010048 case V1_0::OperationType::RNN:
49 case V1_0::OperationType::SOFTMAX:
50 case V1_0::OperationType::SPACE_TO_DEPTH:
51 case V1_0::OperationType::SVDF:
52 case V1_0::OperationType::TANH:
53 case V1_0::OperationType::OEM_OPERATION:
54 return true;
55 default:
56 return false;
57 }
58}
59
60bool HandledByV1_1(V1_2::OperationType operationType)
61{
62 if (HandledByV1_0(operationType))
63 {
64 return true;
65 }
66 switch (static_cast<V1_1::OperationType>(operationType))
67 {
68 case V1_1::OperationType::BATCH_TO_SPACE_ND:
69 case V1_1::OperationType::DIV:
70 case V1_1::OperationType::MEAN:
71 case V1_1::OperationType::PAD:
72 case V1_1::OperationType::SPACE_TO_BATCH_ND:
73 case V1_1::OperationType::SQUEEZE:
74 case V1_1::OperationType::STRIDED_SLICE:
75 case V1_1::OperationType::SUB:
76 case V1_1::OperationType::TRANSPOSE:
77 return true;
78 default:
79 return false;
80 }
81}
82
83bool HandledByV1_0(const V1_2::Operation& operation)
84{
85 return HandledByV1_0(operation.type);
86}
87
88bool HandledByV1_1(const V1_2::Operation& operation)
89{
90 return HandledByV1_1(operation.type);
91}
92
93V1_0::OperationType CastToV1_0(V1_2::OperationType type)
94{
95 return static_cast<V1_0::OperationType>(type);
96}
97
98V1_1::OperationType CastToV1_1(V1_2::OperationType type)
99{
100 return static_cast<V1_1::OperationType>(type);
101}
102
103V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
104{
105 V1_0::Operation op;
106 op.type = CastToV1_0(operation.type);
107 op.inputs = operation.inputs;
108 op.outputs = operation.outputs;
109 return op;
110}
111
112V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
113{
114 V1_1::Operation op;
115 op.type = CastToV1_1(operation.type);
116 op.inputs = operation.inputs;
117 op.outputs = operation.outputs;
118 return op;
119}
120
121bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
122{
123 if (HandledByV1_0(operation) && compliantWithV1_0(model))
124 {
125 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
126 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
127
128 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
129 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100130
131 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132 {
133 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
134 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
135
136 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
137 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100138
Mike Kellyb5fdf382019-06-11 16:35:25 +0100139 switch (operation.type)
140 {
141 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100142 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100143 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100144 return ConvertDepthwiseConv2d(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100145 case V1_2::OperationType::PAD_V2:
146 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100147 case V1_2::OperationType::PRELU:
148 return ConvertPrelu(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100149 case V1_2::OperationType::RESIZE_BILINEAR:
150 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100151 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100152 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100153 default:
154 return Fail("%s: Operation type %s not supported in ArmnnDriver",
155 __func__, toString(operation.type).c_str());
156 }
157}
158
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100159bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
160{
161 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
162 if (!input.IsValid())
163 {
164 return Fail("%s: Operation has invalid inputs", __func__);
165 }
166
167 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
168 if (!output)
169 {
170 return Fail("%s: Could not read output 0", __func__);
171 }
172
173 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
174 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
175
176 // ArmNN does not currently support non-fixed weights or bias
177 const ConstTensorPin weightsPin =
178 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
179 const ConstTensorPin biasPin =
180 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
181
182 if (!weightsPin.IsValid())
183 {
184 return Fail("%s: Operation has invalid weights", __func__);
185 }
186
187 if (!biasPin.IsValid())
188 {
189 return Fail("%s: Operation has invalid biases", __func__);
190 }
191
192 armnn::ConstTensor weights = weightsPin.GetConstTensor();
193 armnn::ConstTensor bias = biasPin.GetConstTensor();
194 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
195
196 armnn::Convolution2dDescriptor desc;
197 desc.m_DataLayout = armnn::DataLayout::NHWC;
198 ActivationFn activation;
199
200 // Determine whether padding is implicit or explicit
201 bool implicitPadding = operation.inputs.size() == 7 ||
202 (operation.inputs.size() >= 8 &&
203 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
204
205 if (implicitPadding)
206 {
207 android::nn::PaddingScheme paddingScheme;
208 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
209 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
210 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
211 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
212 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
213 {
214 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
215 }
216
217 const uint32_t kernelX = weights.GetShape()[2];
218 const uint32_t kernelY = weights.GetShape()[1];
219 const uint32_t inputX = inputInfo.GetShape()[2];
220 const uint32_t inputY = inputInfo.GetShape()[1];
221
222 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
223 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
224
225 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
226 }
227 else if (operation.inputs.size() >= 10)
228 {
229 // explicit padding
230 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
231 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
232 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
233 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
234 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
235 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
236 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
237 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
238 {
239 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
240 }
241 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
242 }
243 else
244 {
245 return Fail("%s: Unsupported number of operation inputs", __func__);
246 }
247
248 desc.m_BiasEnabled = true;
249 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
250
251 if (!IsLayerSupportedForAnyBackend(__func__,
252 armnn::IsConvolution2dSupported,
253 data.m_Backends,
254 inputInfo,
255 outputInfo,
256 desc,
257 weights.GetInfo(),
258 biases))
259 {
260 return false;
261 }
262
263 armnn::IConnectableLayer* startLayer =
264 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
265
266 if (!startLayer)
267 {
268 return Fail("%s: AddConvolution2dLayer failed", __func__);
269 }
270
271 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
272
273 if (!endLayer)
274 {
275 return Fail("%s: ProcessActivation failed", __func__);
276 }
277
278 input.Connect(startLayer->GetInputSlot(0));
279
280 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
281}
282
283bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
284{
285 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
286
287 if (!input.IsValid())
288 {
289 return Fail("%s: Operation has invalid inputs", __func__);
290 }
291
292 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
293
294 if (!output)
295 {
296 return Fail("%s: Could not read output 0", __func__);
297 }
298
299 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
300 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
301
302 // ArmNN does not currently support non-fixed weights or bias
303 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
304 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
305
306 if (weightsOperand == nullptr)
307 {
308 return Fail("%s: Operand is invalid", __func__);
309 }
310 armnn::DepthwiseConvolution2dDescriptor desc;
311 desc.m_DataLayout = armnn::DataLayout::NHWC;
312
313 // Determine whether padding is implicit or explicit
314 bool implicitPadding = operation.inputs.size() == 8 ||
315 (operation.inputs.size() >= 9 &&
316 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
317
318 // Look ahead to find the optional DataLayout, if present
319 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
320 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
321
322 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
323 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
324 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
325 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
326
327 // Reinterpret weight data as [ H, W, I, M ]
328 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
329 weightsOperand->dimensions[2],
330 inputInfo.GetShape()[channelsIndex],
331 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
332
333 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
334 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
335
336 const ConstTensorPin weightsPin =
337 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
338 1,
339 model,
340 data,
341 HWIMToMIHW,
342 &weightsShape);
343
344 // Bias is a 1D tensor
345 const ConstTensorPin biasPin =
346 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
347
348 if (!weightsPin.IsValid())
349 {
350 return Fail("%s: Operation has invalid weights", __func__);
351 }
352
353 if (!biasPin.IsValid())
354 {
355 return Fail("%s: Operation has invalid biases", __func__);
356 }
357
358 armnn::ConstTensor weights = weightsPin.GetConstTensor();
359 armnn::ConstTensor bias = biasPin.GetConstTensor();
360 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
361
362 ActivationFn activation;
363
364 if (implicitPadding)
365 {
366 android::nn::PaddingScheme paddingScheme;
367 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
368 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
369 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
370 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
371 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
372 {
373 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
374 }
375
376 const uint32_t kernelX = weights.GetShape()[3];
377 const uint32_t kernelY = weights.GetShape()[2];
378 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
379 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
380
381 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
382 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
383 }
384 else if (operation.inputs.size() >= 11)
385 {
386 // explicit padding
387 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
388 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
389 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
390 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
391 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
392 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
393 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
394 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
395 {
396 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
397 }
398 }
399 else
400 {
401 return Fail("%s: Unsupported number of operation inputs", __func__);
402 }
403
404 desc.m_BiasEnabled = true;
405 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
406
407 if (!IsLayerSupportedForAnyBackend(__func__,
408 armnn::IsDepthwiseConvolutionSupported,
409 data.m_Backends,
410 inputInfo,
411 outputInfo,
412 desc,
413 weights.GetInfo(),
414 biases))
415 {
416 return false;
417 }
418
419 armnn::IConnectableLayer* startLayer =
420 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
421 if (!startLayer)
422 {
423 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
424 }
425
426 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
427 if (!endLayer)
428 {
429 return Fail("%s: ProcessActivation failed", __func__);
430 }
431
432 input.Connect(startLayer->GetInputSlot(0));
433
434 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
435}
436
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100437bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
438{
439 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
440 if (!input.IsValid())
441 {
442 return Fail("%s: Could not read input 0", __func__);
443 }
444
445 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
446 unsigned int rank = inputInfo.GetNumDimensions();
447
448 armnn::PadDescriptor descriptor;
449 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
450 {
451 return Fail("%s: Could not convert paddings", __func__);
452 }
453
454 // Determine type of padding value
455 OperandType operandType0;
456 OperandType operandType2;
457
458 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
459 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
460 {
461 return Fail("%s: Operation has invalid inputs", __func__);
462 }
463
464 // Read value to use for padding
465 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
466 {
467 armnn::Half f16PadValue;
468 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
469 {
470 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
471 }
472
473 descriptor.m_PadValue = f16PadValue;
474 }
475 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
476 {
477 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
478 {
479 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
480 }
481 }
482 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
483 {
484 int32_t quantizedPadValue = 0;
485 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
486 {
487 return Fail("%s: Could not read input 2 (INT32)", __func__);
488 }
489
490 descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
491 inputInfo.GetQuantizationScale(),
492 inputInfo.GetQuantizationOffset());
493 }
494 else
495 {
496 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
497 }
498
499 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
500 if (!output)
501 {
502 return Fail("%s: Could not read output", __func__);
503 }
504
505 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
506
507 if (!IsLayerSupportedForAnyBackend(__func__,
508 armnn::IsPadSupported,
509 data.m_Backends,
510 inputInfo,
511 outputInfo,
512 descriptor))
513 {
514 return false;
515 }
516
517 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
518 assert(layer != nullptr);
519 input.Connect(layer->GetInputSlot(0));
520 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
521
522 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
523}
524
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100525bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
526{
527 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
528 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
529
530 if (!input.IsValid() || !alpha.IsValid())
531 {
532 return Fail("%s: Operation has invalid inputs", __func__);
533 }
534
535 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
536
537 if (!output)
538 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100539 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100540 }
541
542 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
543 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100544
545 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
546 if (outputInfo.GetNumElements() == 0u)
547 {
548 ALOGD("Output shape not set, will infer from inputs");
549 outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
550 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100551
552 if (!IsLayerSupportedForAnyBackend(__func__,
553 armnn::IsPreluSupported,
554 data.m_Backends,
555 inputInfo,
556 alphaInfo,
557 outputInfo))
558 {
559 return false;
560 }
561
562 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
563
564 if (!layer)
565 {
566 return Fail("%s: AddPreluLayer failed", __func__);
567 }
568
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100569 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100570
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100571 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
572 0,
573 *layer,
574 model,
575 data,
576 armnn::Optional<armnn::TensorInfo>(outputInfo));
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100577}
578
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100579bool HalPolicy::ConvertResize(const Operation& operation,
580 const Model& model,
581 ConversionData& data,
582 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100583{
584 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
585 if (!input.IsValid())
586 {
587 return Fail("%s: Could not read input 0", __func__);
588 }
589
590 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
591 if (!output)
592 {
593 return Fail("%s: Could not read output 0", __func__);
594 }
595
596 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
597 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
598
599 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100600 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100601 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
602
603 OperandType operandType1;
604 OperandType operandType2;
605
606 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
607 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
608 {
609 return Fail("%s: Operation has invalid inputs", __func__);
610 }
611
612 if (operandType1 != operandType2)
613 {
614 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
615 }
616
617 if (operandType1 == OperandType::INT32)
618 {
619 // Case 1: resizing by shape
620 int32_t targetWidth = 0;
621 int32_t targetHeight = 0;
622
623 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
624 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
625 {
626 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
627 }
628
629 if (targetWidth < 0 || targetHeight < 0)
630 {
631 return Fail("%s: Operation has invalid inputs for resizing by shape. "
632 "Target width/height cannot be < 0", __func__);
633 }
634
635 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
636 descriptor.m_TargetWidth = static_cast<uint32_t>(targetHeight);
637 }
638 else if (operandType1 == OperandType::FLOAT32)
639 {
640 // Case 2: resizing by scale
641 float widthScale = 1.0f;
642 float heightScale = 1.0f;
643
644 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
645 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
646 {
647 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
648 }
649
650 const armnn::TensorShape& inputShape = inputInfo.GetShape();
651 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
652
653 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
654 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
655
656 descriptor.m_TargetWidth = std::floor(width * widthScale);
657 descriptor.m_TargetHeight = std::floor(height * heightScale);
658 }
659 else
660 {
661 // NOTE: FLOAT16 scales are not supported
662 return false;
663 }
664
665 if (!IsLayerSupportedForAnyBackend(__func__,
666 armnn::IsResizeSupported,
667 data.m_Backends,
668 inputInfo,
669 outputInfo,
670 descriptor))
671 {
672 return false;
673 }
674
675 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
676
677 assert(layer != nullptr);
678
679 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
680 input.Connect(layer->GetInputSlot(0));
681
682 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
683}
684
Keith Davisa6bc52f2019-06-26 09:39:49 +0100685bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
686{
687 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
688
689 if (!input.IsValid() )
690 {
691 return Fail("%s: Operation has invalid inputs", __func__);
692 }
693
694 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
695 unsigned int rank = inputInfo.GetNumDimensions();
696
697 if (rank != 4)
698 {
699 return Fail("%s: Only inputs with rank 4 are supported", __func__);
700 }
701
702 armnn::SpaceToDepthDescriptor desc;
703
704 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
705
706 if (desc.m_BlockSize <= 1)
707 {
708 return Fail("%s: Block size must be at least 1 in all dimensions");
709 }
710
711 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
712
713 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
714 if (!output)
715 {
716 return Fail("%s: Could not read output 0", __func__);
717 }
718
719 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
720 if (!IsLayerSupportedForAnyBackend(__func__,
721 armnn::IsSpaceToDepthSupported,
722 data.m_Backends,
723 inputInfo,
724 outputInfo,
725 desc))
726 {
727 return false;
728 }
729
730 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
731 assert(layer != nullptr);
732 input.Connect(layer->GetInputSlot(0));
733
734 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
735}
736
Mike Kellyb5fdf382019-06-11 16:35:25 +0100737} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100738} // namespace armnn_driver