blob: d7452c6869fbb251baa0b967708a6a7f21f61dcd [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01008#include "OutputShapeUtils.hpp"
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
28 case V1_0::OperationType::AVERAGE_POOL_2D:
29 case V1_0::OperationType::CONCATENATION:
30 case V1_0::OperationType::DEPTH_TO_SPACE:
31 case V1_0::OperationType::DEQUANTIZE:
32 case V1_0::OperationType::EMBEDDING_LOOKUP:
33 case V1_0::OperationType::FLOOR:
34 case V1_0::OperationType::FULLY_CONNECTED:
35 case V1_0::OperationType::HASHTABLE_LOOKUP:
36 case V1_0::OperationType::L2_NORMALIZATION:
37 case V1_0::OperationType::L2_POOL_2D:
38 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
39 case V1_0::OperationType::LOGISTIC:
40 case V1_0::OperationType::LSH_PROJECTION:
41 case V1_0::OperationType::LSTM:
42 case V1_0::OperationType::MAX_POOL_2D:
43 case V1_0::OperationType::MUL:
44 case V1_0::OperationType::RELU:
45 case V1_0::OperationType::RELU1:
46 case V1_0::OperationType::RELU6:
47 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010048 case V1_0::OperationType::RNN:
49 case V1_0::OperationType::SOFTMAX:
50 case V1_0::OperationType::SPACE_TO_DEPTH:
51 case V1_0::OperationType::SVDF:
52 case V1_0::OperationType::TANH:
53 case V1_0::OperationType::OEM_OPERATION:
54 return true;
55 default:
56 return false;
57 }
58}
59
60bool HandledByV1_1(V1_2::OperationType operationType)
61{
62 if (HandledByV1_0(operationType))
63 {
64 return true;
65 }
66 switch (static_cast<V1_1::OperationType>(operationType))
67 {
68 case V1_1::OperationType::BATCH_TO_SPACE_ND:
69 case V1_1::OperationType::DIV:
70 case V1_1::OperationType::MEAN:
71 case V1_1::OperationType::PAD:
72 case V1_1::OperationType::SPACE_TO_BATCH_ND:
73 case V1_1::OperationType::SQUEEZE:
74 case V1_1::OperationType::STRIDED_SLICE:
75 case V1_1::OperationType::SUB:
76 case V1_1::OperationType::TRANSPOSE:
77 return true;
78 default:
79 return false;
80 }
81}
82
83bool HandledByV1_0(const V1_2::Operation& operation)
84{
85 return HandledByV1_0(operation.type);
86}
87
88bool HandledByV1_1(const V1_2::Operation& operation)
89{
90 return HandledByV1_1(operation.type);
91}
92
93V1_0::OperationType CastToV1_0(V1_2::OperationType type)
94{
95 return static_cast<V1_0::OperationType>(type);
96}
97
98V1_1::OperationType CastToV1_1(V1_2::OperationType type)
99{
100 return static_cast<V1_1::OperationType>(type);
101}
102
103V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
104{
105 V1_0::Operation op;
106 op.type = CastToV1_0(operation.type);
107 op.inputs = operation.inputs;
108 op.outputs = operation.outputs;
109 return op;
110}
111
112V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
113{
114 V1_1::Operation op;
115 op.type = CastToV1_1(operation.type);
116 op.inputs = operation.inputs;
117 op.outputs = operation.outputs;
118 return op;
119}
120
121bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
122{
123 if (HandledByV1_0(operation) && compliantWithV1_0(model))
124 {
125 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
126 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
127
128 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
129 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100130
131 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132 {
133 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
134 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
135
136 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
137 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100138
Mike Kellyb5fdf382019-06-11 16:35:25 +0100139 switch (operation.type)
140 {
141 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100142 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100143 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100144 return ConvertDepthwiseConv2d(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100145 case V1_2::OperationType::PAD_V2:
146 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100147 case V1_2::OperationType::PRELU:
148 return ConvertPrelu(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100149 case V1_2::OperationType::RESIZE_BILINEAR:
150 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100151 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100152 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100153 default:
154 return Fail("%s: Operation type %s not supported in ArmnnDriver",
155 __func__, toString(operation.type).c_str());
156 }
157}
158
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100159bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
160{
161 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
162 if (!input.IsValid())
163 {
164 return Fail("%s: Operation has invalid inputs", __func__);
165 }
166
167 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
168 if (!output)
169 {
170 return Fail("%s: Could not read output 0", __func__);
171 }
172
173 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
174 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
175
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100176 if (IsDynamicOutput(outputInfo))
177 {
178 return Fail("%s: Dynamic output not supported", __func__);
179 }
180
Mike Kellye1d60bb2019-07-11 11:44:52 +0100181 armnn::Convolution2dDescriptor desc;
182 desc.m_DataLayout = armnn::DataLayout::NHWC;
183
184 // Determine whether padding is implicit or explicit
185 bool implicitPadding = operation.inputs.size() == 7 ||
186 (operation.inputs.size() >= 8 &&
187 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
188
189 if (implicitPadding)
190 {
191 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
192 }
193 else if (operation.inputs.size() >= 10)
194 {
195 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
196 }
197
198 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
199
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100200 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100201 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
202 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
203 // the DataLayout is NCHW
204 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
205 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
206 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100207 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100208 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100209
210 if (!weightsPin.IsValid())
211 {
212 return Fail("%s: Operation has invalid weights", __func__);
213 }
214
215 if (!biasPin.IsValid())
216 {
217 return Fail("%s: Operation has invalid biases", __func__);
218 }
219
220 armnn::ConstTensor weights = weightsPin.GetConstTensor();
221 armnn::ConstTensor bias = biasPin.GetConstTensor();
222 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
223
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100224 ActivationFn activation;
225
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100226 if (implicitPadding)
227 {
228 android::nn::PaddingScheme paddingScheme;
229 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
230 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
231 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
232 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
233 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
234 {
235 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
236 }
237
Mike Kellye1d60bb2019-07-11 11:44:52 +0100238 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
239 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
240 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
241 const uint32_t kernelX = weights.GetShape()[widthIndex];
242 const uint32_t kernelY = weights.GetShape()[heightIndex];
243 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
244 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100245
Mike Kelly86b36d42019-07-12 16:39:33 +0100246 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
247 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100248
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100249 }
250 else if (operation.inputs.size() >= 10)
251 {
252 // explicit padding
253 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
254 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
255 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
256 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
257 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
258 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
259 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
260 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
261 {
262 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
263 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100264 }
265 else
266 {
267 return Fail("%s: Unsupported number of operation inputs", __func__);
268 }
269
270 desc.m_BiasEnabled = true;
271 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
272
273 if (!IsLayerSupportedForAnyBackend(__func__,
274 armnn::IsConvolution2dSupported,
275 data.m_Backends,
276 inputInfo,
277 outputInfo,
278 desc,
279 weights.GetInfo(),
280 biases))
281 {
282 return false;
283 }
284
285 armnn::IConnectableLayer* startLayer =
286 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
287
288 if (!startLayer)
289 {
290 return Fail("%s: AddConvolution2dLayer failed", __func__);
291 }
292
293 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
294
295 if (!endLayer)
296 {
297 return Fail("%s: ProcessActivation failed", __func__);
298 }
299
300 input.Connect(startLayer->GetInputSlot(0));
301
302 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
303}
304
305bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
306{
307 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
308
309 if (!input.IsValid())
310 {
311 return Fail("%s: Operation has invalid inputs", __func__);
312 }
313
314 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
315
316 if (!output)
317 {
318 return Fail("%s: Could not read output 0", __func__);
319 }
320
321 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
322 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
323
324 // ArmNN does not currently support non-fixed weights or bias
325 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
326 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
327
328 if (weightsOperand == nullptr)
329 {
330 return Fail("%s: Operand is invalid", __func__);
331 }
332 armnn::DepthwiseConvolution2dDescriptor desc;
333 desc.m_DataLayout = armnn::DataLayout::NHWC;
334
335 // Determine whether padding is implicit or explicit
336 bool implicitPadding = operation.inputs.size() == 8 ||
337 (operation.inputs.size() >= 9 &&
338 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
339
340 // Look ahead to find the optional DataLayout, if present
341 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
342 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
343
344 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
345 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
346 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
347 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
348
349 // Reinterpret weight data as [ H, W, I, M ]
350 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
351 weightsOperand->dimensions[2],
352 inputInfo.GetShape()[channelsIndex],
353 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
354
355 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
356 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
357
358 const ConstTensorPin weightsPin =
359 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
360 1,
361 model,
362 data,
363 HWIMToMIHW,
364 &weightsShape);
365
366 // Bias is a 1D tensor
367 const ConstTensorPin biasPin =
368 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
369
370 if (!weightsPin.IsValid())
371 {
372 return Fail("%s: Operation has invalid weights", __func__);
373 }
374
375 if (!biasPin.IsValid())
376 {
377 return Fail("%s: Operation has invalid biases", __func__);
378 }
379
380 armnn::ConstTensor weights = weightsPin.GetConstTensor();
381 armnn::ConstTensor bias = biasPin.GetConstTensor();
382 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
383
384 ActivationFn activation;
385
386 if (implicitPadding)
387 {
388 android::nn::PaddingScheme paddingScheme;
389 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
390 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
391 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
392 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
393 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
394 {
395 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
396 }
397
398 const uint32_t kernelX = weights.GetShape()[3];
399 const uint32_t kernelY = weights.GetShape()[2];
400 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
401 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
402
Mike Kelly86b36d42019-07-12 16:39:33 +0100403 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
404 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100405 }
406 else if (operation.inputs.size() >= 11)
407 {
408 // explicit padding
409 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
410 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
411 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
412 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
413 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
414 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
415 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
416 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
417 {
418 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
419 }
420 }
421 else
422 {
423 return Fail("%s: Unsupported number of operation inputs", __func__);
424 }
425
426 desc.m_BiasEnabled = true;
427 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
428
429 if (!IsLayerSupportedForAnyBackend(__func__,
430 armnn::IsDepthwiseConvolutionSupported,
431 data.m_Backends,
432 inputInfo,
433 outputInfo,
434 desc,
435 weights.GetInfo(),
436 biases))
437 {
438 return false;
439 }
440
441 armnn::IConnectableLayer* startLayer =
442 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
443 if (!startLayer)
444 {
445 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
446 }
447
448 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
449 if (!endLayer)
450 {
451 return Fail("%s: ProcessActivation failed", __func__);
452 }
453
454 input.Connect(startLayer->GetInputSlot(0));
455
456 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
457}
458
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100459bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
460{
461 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
462 if (!input.IsValid())
463 {
464 return Fail("%s: Could not read input 0", __func__);
465 }
466
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100467 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
468 if (!output)
469 {
470 return Fail("%s: Could not read output", __func__);
471 }
472
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100473 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
474 unsigned int rank = inputInfo.GetNumDimensions();
475
476 armnn::PadDescriptor descriptor;
477 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
478 {
479 return Fail("%s: Could not convert paddings", __func__);
480 }
481
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100482 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
483 if (IsDynamicOutput(outputInfo))
484 {
485 ALOGD("Output shape not set, will infer from inputs");
486 outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
487 }
488
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100489 // Determine type of padding value
490 OperandType operandType0;
491 OperandType operandType2;
492
493 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
494 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
495 {
496 return Fail("%s: Operation has invalid inputs", __func__);
497 }
498
499 // Read value to use for padding
500 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
501 {
502 armnn::Half f16PadValue;
503 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
504 {
505 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
506 }
507
508 descriptor.m_PadValue = f16PadValue;
509 }
510 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
511 {
512 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
513 {
514 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
515 }
516 }
517 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
518 {
519 int32_t quantizedPadValue = 0;
520 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
521 {
522 return Fail("%s: Could not read input 2 (INT32)", __func__);
523 }
524
525 descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
526 inputInfo.GetQuantizationScale(),
527 inputInfo.GetQuantizationOffset());
528 }
529 else
530 {
531 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
532 }
533
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100534 if (!IsLayerSupportedForAnyBackend(__func__,
535 armnn::IsPadSupported,
536 data.m_Backends,
537 inputInfo,
538 outputInfo,
539 descriptor))
540 {
541 return false;
542 }
543
544 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
545 assert(layer != nullptr);
546 input.Connect(layer->GetInputSlot(0));
547 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
548
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100549 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
550 0,
551 *layer,
552 model,
553 data,
554 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100555}
556
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100557bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
558{
559 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
560 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
561
562 if (!input.IsValid() || !alpha.IsValid())
563 {
564 return Fail("%s: Operation has invalid inputs", __func__);
565 }
566
567 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
568
569 if (!output)
570 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100571 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100572 }
573
574 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
575 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100576
577 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100578 if (IsDynamicOutput(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100579 {
580 ALOGD("Output shape not set, will infer from inputs");
581 outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
582 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100583
584 if (!IsLayerSupportedForAnyBackend(__func__,
585 armnn::IsPreluSupported,
586 data.m_Backends,
587 inputInfo,
588 alphaInfo,
589 outputInfo))
590 {
591 return false;
592 }
593
594 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
595
596 if (!layer)
597 {
598 return Fail("%s: AddPreluLayer failed", __func__);
599 }
600
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100601 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100602
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100603 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
604 0,
605 *layer,
606 model,
607 data,
608 armnn::Optional<armnn::TensorInfo>(outputInfo));
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100609}
610
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100611bool HalPolicy::ConvertResize(const Operation& operation,
612 const Model& model,
613 ConversionData& data,
614 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100615{
616 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
617 if (!input.IsValid())
618 {
619 return Fail("%s: Could not read input 0", __func__);
620 }
621
622 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
623 if (!output)
624 {
625 return Fail("%s: Could not read output 0", __func__);
626 }
627
628 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
629 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
630
631 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100632 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100633 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
634
635 OperandType operandType1;
636 OperandType operandType2;
637
638 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
639 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
640 {
641 return Fail("%s: Operation has invalid inputs", __func__);
642 }
643
644 if (operandType1 != operandType2)
645 {
646 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
647 }
648
649 if (operandType1 == OperandType::INT32)
650 {
651 // Case 1: resizing by shape
652 int32_t targetWidth = 0;
653 int32_t targetHeight = 0;
654
655 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
656 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
657 {
658 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
659 }
660
661 if (targetWidth < 0 || targetHeight < 0)
662 {
663 return Fail("%s: Operation has invalid inputs for resizing by shape. "
664 "Target width/height cannot be < 0", __func__);
665 }
666
667 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
668 descriptor.m_TargetWidth = static_cast<uint32_t>(targetHeight);
669 }
670 else if (operandType1 == OperandType::FLOAT32)
671 {
672 // Case 2: resizing by scale
673 float widthScale = 1.0f;
674 float heightScale = 1.0f;
675
676 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
677 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
678 {
679 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
680 }
681
682 const armnn::TensorShape& inputShape = inputInfo.GetShape();
683 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
684
685 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
686 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
687
688 descriptor.m_TargetWidth = std::floor(width * widthScale);
689 descriptor.m_TargetHeight = std::floor(height * heightScale);
690 }
691 else
692 {
693 // NOTE: FLOAT16 scales are not supported
694 return false;
695 }
696
697 if (!IsLayerSupportedForAnyBackend(__func__,
698 armnn::IsResizeSupported,
699 data.m_Backends,
700 inputInfo,
701 outputInfo,
702 descriptor))
703 {
704 return false;
705 }
706
707 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
708
709 assert(layer != nullptr);
710
711 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
712 input.Connect(layer->GetInputSlot(0));
713
714 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
715}
716
Keith Davisa6bc52f2019-06-26 09:39:49 +0100717bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
718{
719 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
720
721 if (!input.IsValid() )
722 {
723 return Fail("%s: Operation has invalid inputs", __func__);
724 }
725
726 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
727 unsigned int rank = inputInfo.GetNumDimensions();
728
729 if (rank != 4)
730 {
731 return Fail("%s: Only inputs with rank 4 are supported", __func__);
732 }
733
734 armnn::SpaceToDepthDescriptor desc;
735
736 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
737
738 if (desc.m_BlockSize <= 1)
739 {
740 return Fail("%s: Block size must be at least 1 in all dimensions");
741 }
742
743 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
744
745 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
746 if (!output)
747 {
748 return Fail("%s: Could not read output 0", __func__);
749 }
750
751 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
752 if (!IsLayerSupportedForAnyBackend(__func__,
753 armnn::IsSpaceToDepthSupported,
754 data.m_Backends,
755 inputInfo,
756 outputInfo,
757 desc))
758 {
759 return false;
760 }
761
762 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
763 assert(layer != nullptr);
764 input.Connect(layer->GetInputSlot(0));
765
766 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
767}
768
Mike Kellyb5fdf382019-06-11 16:35:25 +0100769} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100770} // namespace armnn_driver