blob: a82db80bc03f035b4da3f8d9b9040b1f12038a75 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01008#include "OutputShapeUtils.hpp"
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
28 case V1_0::OperationType::AVERAGE_POOL_2D:
29 case V1_0::OperationType::CONCATENATION:
30 case V1_0::OperationType::DEPTH_TO_SPACE:
31 case V1_0::OperationType::DEQUANTIZE:
32 case V1_0::OperationType::EMBEDDING_LOOKUP:
33 case V1_0::OperationType::FLOOR:
34 case V1_0::OperationType::FULLY_CONNECTED:
35 case V1_0::OperationType::HASHTABLE_LOOKUP:
36 case V1_0::OperationType::L2_NORMALIZATION:
37 case V1_0::OperationType::L2_POOL_2D:
38 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
39 case V1_0::OperationType::LOGISTIC:
40 case V1_0::OperationType::LSH_PROJECTION:
41 case V1_0::OperationType::LSTM:
42 case V1_0::OperationType::MAX_POOL_2D:
43 case V1_0::OperationType::MUL:
44 case V1_0::OperationType::RELU:
45 case V1_0::OperationType::RELU1:
46 case V1_0::OperationType::RELU6:
47 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010048 case V1_0::OperationType::RNN:
49 case V1_0::OperationType::SOFTMAX:
50 case V1_0::OperationType::SPACE_TO_DEPTH:
51 case V1_0::OperationType::SVDF:
52 case V1_0::OperationType::TANH:
53 case V1_0::OperationType::OEM_OPERATION:
54 return true;
55 default:
56 return false;
57 }
58}
59
60bool HandledByV1_1(V1_2::OperationType operationType)
61{
62 if (HandledByV1_0(operationType))
63 {
64 return true;
65 }
66 switch (static_cast<V1_1::OperationType>(operationType))
67 {
68 case V1_1::OperationType::BATCH_TO_SPACE_ND:
69 case V1_1::OperationType::DIV:
70 case V1_1::OperationType::MEAN:
71 case V1_1::OperationType::PAD:
72 case V1_1::OperationType::SPACE_TO_BATCH_ND:
73 case V1_1::OperationType::SQUEEZE:
74 case V1_1::OperationType::STRIDED_SLICE:
75 case V1_1::OperationType::SUB:
76 case V1_1::OperationType::TRANSPOSE:
77 return true;
78 default:
79 return false;
80 }
81}
82
83bool HandledByV1_0(const V1_2::Operation& operation)
84{
85 return HandledByV1_0(operation.type);
86}
87
88bool HandledByV1_1(const V1_2::Operation& operation)
89{
90 return HandledByV1_1(operation.type);
91}
92
93V1_0::OperationType CastToV1_0(V1_2::OperationType type)
94{
95 return static_cast<V1_0::OperationType>(type);
96}
97
98V1_1::OperationType CastToV1_1(V1_2::OperationType type)
99{
100 return static_cast<V1_1::OperationType>(type);
101}
102
103V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
104{
105 V1_0::Operation op;
106 op.type = CastToV1_0(operation.type);
107 op.inputs = operation.inputs;
108 op.outputs = operation.outputs;
109 return op;
110}
111
112V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
113{
114 V1_1::Operation op;
115 op.type = CastToV1_1(operation.type);
116 op.inputs = operation.inputs;
117 op.outputs = operation.outputs;
118 return op;
119}
120
121bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
122{
123 if (HandledByV1_0(operation) && compliantWithV1_0(model))
124 {
125 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
126 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
127
128 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
129 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100130
131 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132 {
133 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
134 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
135
136 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
137 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100138
Mike Kellyb5fdf382019-06-11 16:35:25 +0100139 switch (operation.type)
140 {
141 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100142 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100143 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100144 return ConvertDepthwiseConv2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100145 case V1_2::OperationType::MAXIMUM:
146 return ConvertMaximum(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100147 case V1_2::OperationType::PAD_V2:
148 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100149 case V1_2::OperationType::PRELU:
150 return ConvertPrelu(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100151 case V1_2::OperationType::RESIZE_BILINEAR:
152 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100153 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100154 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100155 default:
156 return Fail("%s: Operation type %s not supported in ArmnnDriver",
157 __func__, toString(operation.type).c_str());
158 }
159}
160
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100161bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
162{
163 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
164 if (!input.IsValid())
165 {
166 return Fail("%s: Operation has invalid inputs", __func__);
167 }
168
169 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
170 if (!output)
171 {
172 return Fail("%s: Could not read output 0", __func__);
173 }
174
175 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
176 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
177
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100178 if (IsDynamicOutput(outputInfo))
179 {
180 return Fail("%s: Dynamic output not supported", __func__);
181 }
182
Mike Kellye1d60bb2019-07-11 11:44:52 +0100183 armnn::Convolution2dDescriptor desc;
184 desc.m_DataLayout = armnn::DataLayout::NHWC;
185
186 // Determine whether padding is implicit or explicit
187 bool implicitPadding = operation.inputs.size() == 7 ||
188 (operation.inputs.size() >= 8 &&
189 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
190
191 if (implicitPadding)
192 {
193 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
194 }
195 else if (operation.inputs.size() >= 10)
196 {
197 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
198 }
199
200 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
201
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100202 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100203 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
204 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
205 // the DataLayout is NCHW
206 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
207 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
208 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100209 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100210 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100211
212 if (!weightsPin.IsValid())
213 {
214 return Fail("%s: Operation has invalid weights", __func__);
215 }
216
217 if (!biasPin.IsValid())
218 {
219 return Fail("%s: Operation has invalid biases", __func__);
220 }
221
222 armnn::ConstTensor weights = weightsPin.GetConstTensor();
223 armnn::ConstTensor bias = biasPin.GetConstTensor();
224 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
225
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100226 ActivationFn activation;
227
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100228 if (implicitPadding)
229 {
230 android::nn::PaddingScheme paddingScheme;
231 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
232 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
233 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
234 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
235 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
236 {
237 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
238 }
239
Mike Kellye1d60bb2019-07-11 11:44:52 +0100240 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
241 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
242 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
243 const uint32_t kernelX = weights.GetShape()[widthIndex];
244 const uint32_t kernelY = weights.GetShape()[heightIndex];
245 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
246 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100247
Mike Kelly86b36d42019-07-12 16:39:33 +0100248 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
249 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100250
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100251 }
252 else if (operation.inputs.size() >= 10)
253 {
254 // explicit padding
255 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
256 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
257 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
258 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
259 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
260 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
261 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
262 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
263 {
264 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
265 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100266 }
267 else
268 {
269 return Fail("%s: Unsupported number of operation inputs", __func__);
270 }
271
272 desc.m_BiasEnabled = true;
273 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
274
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100275 bool isSupported = false;
276 FORWARD_LAYER_SUPPORT_FUNC(__func__,
277 IsConvolution2dSupported,
278 data.m_Backends,
279 isSupported,
280 inputInfo,
281 outputInfo,
282 desc,
283 weights.GetInfo(),
284 biases);
285 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100286 {
287 return false;
288 }
289
290 armnn::IConnectableLayer* startLayer =
291 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
292
293 if (!startLayer)
294 {
295 return Fail("%s: AddConvolution2dLayer failed", __func__);
296 }
297
298 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
299
300 if (!endLayer)
301 {
302 return Fail("%s: ProcessActivation failed", __func__);
303 }
304
305 input.Connect(startLayer->GetInputSlot(0));
306
307 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
308}
309
310bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
311{
312 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
313
314 if (!input.IsValid())
315 {
316 return Fail("%s: Operation has invalid inputs", __func__);
317 }
318
319 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
320
321 if (!output)
322 {
323 return Fail("%s: Could not read output 0", __func__);
324 }
325
326 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
327 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
328
329 // ArmNN does not currently support non-fixed weights or bias
330 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
331 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
332
333 if (weightsOperand == nullptr)
334 {
335 return Fail("%s: Operand is invalid", __func__);
336 }
337 armnn::DepthwiseConvolution2dDescriptor desc;
338 desc.m_DataLayout = armnn::DataLayout::NHWC;
339
340 // Determine whether padding is implicit or explicit
341 bool implicitPadding = operation.inputs.size() == 8 ||
342 (operation.inputs.size() >= 9 &&
343 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
344
345 // Look ahead to find the optional DataLayout, if present
346 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
347 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
348
349 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
350 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
351 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
352 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
353
354 // Reinterpret weight data as [ H, W, I, M ]
355 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
356 weightsOperand->dimensions[2],
357 inputInfo.GetShape()[channelsIndex],
358 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
359
360 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
361 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
362
363 const ConstTensorPin weightsPin =
364 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
365 1,
366 model,
367 data,
368 HWIMToMIHW,
369 &weightsShape);
370
371 // Bias is a 1D tensor
372 const ConstTensorPin biasPin =
373 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
374
375 if (!weightsPin.IsValid())
376 {
377 return Fail("%s: Operation has invalid weights", __func__);
378 }
379
380 if (!biasPin.IsValid())
381 {
382 return Fail("%s: Operation has invalid biases", __func__);
383 }
384
385 armnn::ConstTensor weights = weightsPin.GetConstTensor();
386 armnn::ConstTensor bias = biasPin.GetConstTensor();
387 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
388
389 ActivationFn activation;
390
391 if (implicitPadding)
392 {
393 android::nn::PaddingScheme paddingScheme;
394 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
395 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
396 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
397 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
398 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
399 {
400 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
401 }
402
403 const uint32_t kernelX = weights.GetShape()[3];
404 const uint32_t kernelY = weights.GetShape()[2];
405 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
406 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
407
Mike Kelly86b36d42019-07-12 16:39:33 +0100408 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
409 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100410 }
411 else if (operation.inputs.size() >= 11)
412 {
413 // explicit padding
414 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
415 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
416 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
417 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
418 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
419 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
420 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
421 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
422 {
423 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
424 }
425 }
426 else
427 {
428 return Fail("%s: Unsupported number of operation inputs", __func__);
429 }
430
431 desc.m_BiasEnabled = true;
432 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
433
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100434 bool isSupported = false;
435 FORWARD_LAYER_SUPPORT_FUNC(__func__,
436 IsDepthwiseConvolutionSupported,
437 data.m_Backends,
438 isSupported,
439 inputInfo,
440 outputInfo,
441 desc,
442 weights.GetInfo(),
443 biases);
444 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100445 {
446 return false;
447 }
448
449 armnn::IConnectableLayer* startLayer =
450 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
451 if (!startLayer)
452 {
453 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
454 }
455
456 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
457 if (!endLayer)
458 {
459 return Fail("%s: ProcessActivation failed", __func__);
460 }
461
462 input.Connect(startLayer->GetInputSlot(0));
463
464 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
465}
466
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100467bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
468{
469 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
470 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
471
472 if (!input0.IsValid() || !input1.IsValid())
473 {
474 return Fail("%s: Operation has invalid inputs", __func__);
475 }
476
477 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
478 if (!outputOperand)
479 {
480 return Fail("%s: Could not read output", __func__);
481 }
482
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100483 armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100484 if (IsDynamicOutput(outInfo))
485 {
486 ALOGD("Output shape not set, will infer from inputs");
487 outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
488 }
489
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100490 bool isSupported = false;
491 FORWARD_LAYER_SUPPORT_FUNC(__func__,
492 IsMaximumSupported,
493 data.m_Backends,
494 isSupported,
495 input0.GetTensorInfo(),
496 input1.GetTensorInfo(),
497 outInfo);
498
499 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100500 {
501 return false;
502 }
503
504 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
505 assert(layer != nullptr);
506 BroadcastTensor(input0, input1, layer, *data.m_Network);
507
508 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
509 0,
510 *layer,
511 model,
512 data,
513 armnn::Optional<armnn::TensorInfo>(outInfo));
514}
515
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100516bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
517{
518 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
519 if (!input.IsValid())
520 {
521 return Fail("%s: Could not read input 0", __func__);
522 }
523
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100524 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
525 if (!output)
526 {
527 return Fail("%s: Could not read output", __func__);
528 }
529
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100530 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
531 unsigned int rank = inputInfo.GetNumDimensions();
532
533 armnn::PadDescriptor descriptor;
534 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
535 {
536 return Fail("%s: Could not convert paddings", __func__);
537 }
538
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100539 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
540 if (IsDynamicOutput(outputInfo))
541 {
542 ALOGD("Output shape not set, will infer from inputs");
543 outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
544 }
545
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100546 // Determine type of padding value
547 OperandType operandType0;
548 OperandType operandType2;
549
550 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
551 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
552 {
553 return Fail("%s: Operation has invalid inputs", __func__);
554 }
555
556 // Read value to use for padding
557 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
558 {
559 armnn::Half f16PadValue;
560 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
561 {
562 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
563 }
564
565 descriptor.m_PadValue = f16PadValue;
566 }
567 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
568 {
569 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
570 {
571 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
572 }
573 }
574 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
575 {
576 int32_t quantizedPadValue = 0;
577 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
578 {
579 return Fail("%s: Could not read input 2 (INT32)", __func__);
580 }
581
582 descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
583 inputInfo.GetQuantizationScale(),
584 inputInfo.GetQuantizationOffset());
585 }
586 else
587 {
588 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
589 }
590
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100591 bool isSupported = false;
592 FORWARD_LAYER_SUPPORT_FUNC(__func__,
593 IsPadSupported,
594 data.m_Backends,
595 isSupported,
596 inputInfo,
597 outputInfo,
598 descriptor);
599 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100600 {
601 return false;
602 }
603
604 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
605 assert(layer != nullptr);
606 input.Connect(layer->GetInputSlot(0));
607 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
608
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100609 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
610 0,
611 *layer,
612 model,
613 data,
614 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100615}
616
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100617bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
618{
619 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
620 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
621
622 if (!input.IsValid() || !alpha.IsValid())
623 {
624 return Fail("%s: Operation has invalid inputs", __func__);
625 }
626
627 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
628
629 if (!output)
630 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100631 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100632 }
633
634 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
635 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100636
637 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100638 if (IsDynamicOutput(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100639 {
640 ALOGD("Output shape not set, will infer from inputs");
641 outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
642 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100643
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100644 bool isSupported = false;
645 FORWARD_LAYER_SUPPORT_FUNC(__func__,
646 IsPreluSupported,
647 data.m_Backends,
648 isSupported,
649 inputInfo,
650 alphaInfo,
651 outputInfo);
652 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100653 {
654 return false;
655 }
656
657 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
658
659 if (!layer)
660 {
661 return Fail("%s: AddPreluLayer failed", __func__);
662 }
663
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100664 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100665
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100666 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
667 0,
668 *layer,
669 model,
670 data,
671 armnn::Optional<armnn::TensorInfo>(outputInfo));
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100672}
673
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100674bool HalPolicy::ConvertResize(const Operation& operation,
675 const Model& model,
676 ConversionData& data,
677 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100678{
679 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
680 if (!input.IsValid())
681 {
682 return Fail("%s: Could not read input 0", __func__);
683 }
684
685 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
686 if (!output)
687 {
688 return Fail("%s: Could not read output 0", __func__);
689 }
690
691 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
692 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
693
694 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100695 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100696 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
697
698 OperandType operandType1;
699 OperandType operandType2;
700
701 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
702 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
703 {
704 return Fail("%s: Operation has invalid inputs", __func__);
705 }
706
707 if (operandType1 != operandType2)
708 {
709 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
710 }
711
712 if (operandType1 == OperandType::INT32)
713 {
714 // Case 1: resizing by shape
715 int32_t targetWidth = 0;
716 int32_t targetHeight = 0;
717
718 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
719 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
720 {
721 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
722 }
723
724 if (targetWidth < 0 || targetHeight < 0)
725 {
726 return Fail("%s: Operation has invalid inputs for resizing by shape. "
727 "Target width/height cannot be < 0", __func__);
728 }
729
730 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
731 descriptor.m_TargetWidth = static_cast<uint32_t>(targetHeight);
732 }
733 else if (operandType1 == OperandType::FLOAT32)
734 {
735 // Case 2: resizing by scale
736 float widthScale = 1.0f;
737 float heightScale = 1.0f;
738
739 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
740 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
741 {
742 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
743 }
744
745 const armnn::TensorShape& inputShape = inputInfo.GetShape();
746 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
747
748 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
749 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
750
751 descriptor.m_TargetWidth = std::floor(width * widthScale);
752 descriptor.m_TargetHeight = std::floor(height * heightScale);
753 }
754 else
755 {
756 // NOTE: FLOAT16 scales are not supported
757 return false;
758 }
759
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100760 bool isSupported = false;
761 FORWARD_LAYER_SUPPORT_FUNC(__func__,
762 IsResizeSupported,
763 data.m_Backends,
764 isSupported,
765 inputInfo,
766 outputInfo,
767 descriptor);
768 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100769 {
770 return false;
771 }
772
773 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
774
775 assert(layer != nullptr);
776
777 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
778 input.Connect(layer->GetInputSlot(0));
779
780 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
781}
782
Keith Davisa6bc52f2019-06-26 09:39:49 +0100783bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
784{
785 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
786
787 if (!input.IsValid() )
788 {
789 return Fail("%s: Operation has invalid inputs", __func__);
790 }
791
792 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
793 unsigned int rank = inputInfo.GetNumDimensions();
794
795 if (rank != 4)
796 {
797 return Fail("%s: Only inputs with rank 4 are supported", __func__);
798 }
799
800 armnn::SpaceToDepthDescriptor desc;
801
802 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
803
804 if (desc.m_BlockSize <= 1)
805 {
806 return Fail("%s: Block size must be at least 1 in all dimensions");
807 }
808
809 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
810
811 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
812 if (!output)
813 {
814 return Fail("%s: Could not read output 0", __func__);
815 }
816
817 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100818
819 bool isSupported = false;
820 FORWARD_LAYER_SUPPORT_FUNC(__func__,
821 IsSpaceToDepthSupported,
822 data.m_Backends,
823 isSupported,
824 inputInfo,
825 outputInfo,
826 desc);
827 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100828 {
829 return false;
830 }
831
832 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
833 assert(layer != nullptr);
834 input.Connect(layer->GetInputSlot(0));
835
836 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
837}
838
Mike Kellyb5fdf382019-06-11 16:35:25 +0100839} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100840} // namespace armnn_driver