blob: 3c00388cbfc076268899225c789271858d5e713e [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01008#include "OutputShapeUtils.hpp"
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
28 case V1_0::OperationType::AVERAGE_POOL_2D:
29 case V1_0::OperationType::CONCATENATION:
30 case V1_0::OperationType::DEPTH_TO_SPACE:
31 case V1_0::OperationType::DEQUANTIZE:
32 case V1_0::OperationType::EMBEDDING_LOOKUP:
33 case V1_0::OperationType::FLOOR:
34 case V1_0::OperationType::FULLY_CONNECTED:
35 case V1_0::OperationType::HASHTABLE_LOOKUP:
36 case V1_0::OperationType::L2_NORMALIZATION:
37 case V1_0::OperationType::L2_POOL_2D:
38 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
39 case V1_0::OperationType::LOGISTIC:
40 case V1_0::OperationType::LSH_PROJECTION:
41 case V1_0::OperationType::LSTM:
42 case V1_0::OperationType::MAX_POOL_2D:
43 case V1_0::OperationType::MUL:
44 case V1_0::OperationType::RELU:
45 case V1_0::OperationType::RELU1:
46 case V1_0::OperationType::RELU6:
47 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010048 case V1_0::OperationType::RNN:
Mike Kellyb5fdf382019-06-11 16:35:25 +010049 case V1_0::OperationType::SPACE_TO_DEPTH:
50 case V1_0::OperationType::SVDF:
51 case V1_0::OperationType::TANH:
52 case V1_0::OperationType::OEM_OPERATION:
53 return true;
54 default:
55 return false;
56 }
57}
58
59bool HandledByV1_1(V1_2::OperationType operationType)
60{
61 if (HandledByV1_0(operationType))
62 {
63 return true;
64 }
65 switch (static_cast<V1_1::OperationType>(operationType))
66 {
67 case V1_1::OperationType::BATCH_TO_SPACE_ND:
68 case V1_1::OperationType::DIV:
69 case V1_1::OperationType::MEAN:
70 case V1_1::OperationType::PAD:
71 case V1_1::OperationType::SPACE_TO_BATCH_ND:
72 case V1_1::OperationType::SQUEEZE:
73 case V1_1::OperationType::STRIDED_SLICE:
74 case V1_1::OperationType::SUB:
75 case V1_1::OperationType::TRANSPOSE:
76 return true;
77 default:
78 return false;
79 }
80}
81
82bool HandledByV1_0(const V1_2::Operation& operation)
83{
84 return HandledByV1_0(operation.type);
85}
86
87bool HandledByV1_1(const V1_2::Operation& operation)
88{
89 return HandledByV1_1(operation.type);
90}
91
92V1_0::OperationType CastToV1_0(V1_2::OperationType type)
93{
94 return static_cast<V1_0::OperationType>(type);
95}
96
97V1_1::OperationType CastToV1_1(V1_2::OperationType type)
98{
99 return static_cast<V1_1::OperationType>(type);
100}
101
102V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
103{
104 V1_0::Operation op;
105 op.type = CastToV1_0(operation.type);
106 op.inputs = operation.inputs;
107 op.outputs = operation.outputs;
108 return op;
109}
110
111V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
112{
113 V1_1::Operation op;
114 op.type = CastToV1_1(operation.type);
115 op.inputs = operation.inputs;
116 op.outputs = operation.outputs;
117 return op;
118}
119
120bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
121{
122 if (HandledByV1_0(operation) && compliantWithV1_0(model))
123 {
124 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
125 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
126
127 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
128 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100129
130 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100131 {
132 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
133 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
134
135 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
136 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100137
Mike Kellyb5fdf382019-06-11 16:35:25 +0100138 switch (operation.type)
139 {
140 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100141 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100142 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100143 return ConvertDepthwiseConv2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100144 case V1_2::OperationType::MAXIMUM:
145 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100146 case V1_2::OperationType::MINIMUM:
147 return ConvertMinimum(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100148 case V1_2::OperationType::PAD_V2:
149 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100150 case V1_2::OperationType::PRELU:
151 return ConvertPrelu(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100152 case V1_2::OperationType::RESIZE_BILINEAR:
153 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100154 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100155 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Francis Murtagh074c25a2019-07-22 16:40:57 +0100156 case V1_2::OperationType::SOFTMAX:
157 return ConvertSoftmax(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100158 default:
159 return Fail("%s: Operation type %s not supported in ArmnnDriver",
160 __func__, toString(operation.type).c_str());
161 }
162}
163
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100164bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
165{
166 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
167 if (!input.IsValid())
168 {
169 return Fail("%s: Operation has invalid inputs", __func__);
170 }
171
172 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
173 if (!output)
174 {
175 return Fail("%s: Could not read output 0", __func__);
176 }
177
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100178 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
179 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100180
Mike Kellye1d60bb2019-07-11 11:44:52 +0100181 armnn::Convolution2dDescriptor desc;
182 desc.m_DataLayout = armnn::DataLayout::NHWC;
183
184 // Determine whether padding is implicit or explicit
185 bool implicitPadding = operation.inputs.size() == 7 ||
186 (operation.inputs.size() >= 8 &&
187 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
188
189 if (implicitPadding)
190 {
191 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
192 }
193 else if (operation.inputs.size() >= 10)
194 {
195 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
196 }
197
198 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
199
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100200 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100201 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
202 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
203 // the DataLayout is NCHW
204 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
205 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
206 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100207 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100208 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100209
210 if (!weightsPin.IsValid())
211 {
212 return Fail("%s: Operation has invalid weights", __func__);
213 }
214
215 if (!biasPin.IsValid())
216 {
217 return Fail("%s: Operation has invalid biases", __func__);
218 }
219
220 armnn::ConstTensor weights = weightsPin.GetConstTensor();
221 armnn::ConstTensor bias = biasPin.GetConstTensor();
222 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
223
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100224 ActivationFn activation;
225
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100226 if (implicitPadding)
227 {
228 android::nn::PaddingScheme paddingScheme;
229 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
230 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
231 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
232 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
233 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
234 {
235 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
236 }
237
Mike Kellye1d60bb2019-07-11 11:44:52 +0100238 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
239 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
240 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
241 const uint32_t kernelX = weights.GetShape()[widthIndex];
242 const uint32_t kernelY = weights.GetShape()[heightIndex];
243 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
244 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100245
Mike Kelly86b36d42019-07-12 16:39:33 +0100246 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
247 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100248
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100249 }
250 else if (operation.inputs.size() >= 10)
251 {
252 // explicit padding
253 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
254 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
255 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
256 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
257 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
258 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
259 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
260 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
261 {
262 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
263 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100264 }
265 else
266 {
267 return Fail("%s: Unsupported number of operation inputs", __func__);
268 }
269
270 desc.m_BiasEnabled = true;
271 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
272
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100273 if (IsDynamicOutput(outputInfo))
274 {
275 try
276 {
277 ALOGD("Output shape not set, will infer from inputs");
278 outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
279 weights.GetInfo().GetShape(),
280 desc));
281 }
282 catch (armnn::Exception& e)
283 {
284 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
285 }
286 }
287
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100288 bool isSupported = false;
289 FORWARD_LAYER_SUPPORT_FUNC(__func__,
290 IsConvolution2dSupported,
291 data.m_Backends,
292 isSupported,
293 inputInfo,
294 outputInfo,
295 desc,
296 weights.GetInfo(),
297 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100298
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100299 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100300 {
301 return false;
302 }
303
304 armnn::IConnectableLayer* startLayer =
305 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
306
307 if (!startLayer)
308 {
309 return Fail("%s: AddConvolution2dLayer failed", __func__);
310 }
311
312 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
313
314 if (!endLayer)
315 {
316 return Fail("%s: ProcessActivation failed", __func__);
317 }
318
319 input.Connect(startLayer->GetInputSlot(0));
320
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100321 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
322 0,
323 *endLayer,
324 model,
325 data,
326 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100327}
328
329bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
330{
331 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
332
333 if (!input.IsValid())
334 {
335 return Fail("%s: Operation has invalid inputs", __func__);
336 }
337
338 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
339
340 if (!output)
341 {
342 return Fail("%s: Could not read output 0", __func__);
343 }
344
345 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100346
347 // ArmNN does not currently support non-fixed weights or bias
348 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
349 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
350
351 if (weightsOperand == nullptr)
352 {
353 return Fail("%s: Operand is invalid", __func__);
354 }
355 armnn::DepthwiseConvolution2dDescriptor desc;
356 desc.m_DataLayout = armnn::DataLayout::NHWC;
357
358 // Determine whether padding is implicit or explicit
359 bool implicitPadding = operation.inputs.size() == 8 ||
360 (operation.inputs.size() >= 9 &&
361 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
362
363 // Look ahead to find the optional DataLayout, if present
364 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
365 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
366
367 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
368 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
369 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
370 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
371
372 // Reinterpret weight data as [ H, W, I, M ]
373 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
374 weightsOperand->dimensions[2],
375 inputInfo.GetShape()[channelsIndex],
376 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
377
378 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
379 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
380
381 const ConstTensorPin weightsPin =
382 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
383 1,
384 model,
385 data,
386 HWIMToMIHW,
387 &weightsShape);
388
389 // Bias is a 1D tensor
390 const ConstTensorPin biasPin =
391 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
392
393 if (!weightsPin.IsValid())
394 {
395 return Fail("%s: Operation has invalid weights", __func__);
396 }
397
398 if (!biasPin.IsValid())
399 {
400 return Fail("%s: Operation has invalid biases", __func__);
401 }
402
403 armnn::ConstTensor weights = weightsPin.GetConstTensor();
404 armnn::ConstTensor bias = biasPin.GetConstTensor();
405 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
406
407 ActivationFn activation;
408
409 if (implicitPadding)
410 {
411 android::nn::PaddingScheme paddingScheme;
412 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
413 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
414 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
415 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
416 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
417 {
418 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
419 }
420
421 const uint32_t kernelX = weights.GetShape()[3];
422 const uint32_t kernelY = weights.GetShape()[2];
423 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
424 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
425
Mike Kelly86b36d42019-07-12 16:39:33 +0100426 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
427 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100428 }
429 else if (operation.inputs.size() >= 11)
430 {
431 // explicit padding
432 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
433 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
434 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
435 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
436 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
437 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
438 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
439 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
440 {
441 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
442 }
443 }
444 else
445 {
446 return Fail("%s: Unsupported number of operation inputs", __func__);
447 }
448
449 desc.m_BiasEnabled = true;
450 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
451
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100452 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
453 if (IsDynamicOutput(outputInfo))
454 {
455 try
456 {
457 ALOGD("Output shape not set, will infer from inputs");
458 outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
459 weights.GetInfo().GetShape(),
460 desc));
461 }
462 catch (armnn::Exception& e)
463 {
464 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
465 }
466 }
467
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100468 bool isSupported = false;
469 FORWARD_LAYER_SUPPORT_FUNC(__func__,
470 IsDepthwiseConvolutionSupported,
471 data.m_Backends,
472 isSupported,
473 inputInfo,
474 outputInfo,
475 desc,
476 weights.GetInfo(),
477 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100478
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100479 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100480 {
481 return false;
482 }
483
484 armnn::IConnectableLayer* startLayer =
485 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100486
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100487 if (!startLayer)
488 {
489 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
490 }
491
492 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
493 if (!endLayer)
494 {
495 return Fail("%s: ProcessActivation failed", __func__);
496 }
497
498 input.Connect(startLayer->GetInputSlot(0));
499
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100500 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
501 0,
502 *endLayer,
503 model,
504 data,
505 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100506}
507
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100508bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
509{
510 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
511 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
512
513 if (!input0.IsValid() || !input1.IsValid())
514 {
515 return Fail("%s: Operation has invalid inputs", __func__);
516 }
517
518 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
519 if (!outputOperand)
520 {
521 return Fail("%s: Could not read output", __func__);
522 }
523
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100524 armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100525 if (IsDynamicOutput(outInfo))
526 {
527 ALOGD("Output shape not set, will infer from inputs");
528 outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
529 }
530
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100531 bool isSupported = false;
532 FORWARD_LAYER_SUPPORT_FUNC(__func__,
533 IsMaximumSupported,
534 data.m_Backends,
535 isSupported,
536 input0.GetTensorInfo(),
537 input1.GetTensorInfo(),
538 outInfo);
539
540 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100541 {
542 return false;
543 }
544
545 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
546 assert(layer != nullptr);
547 BroadcastTensor(input0, input1, layer, *data.m_Network);
548
549 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
550 0,
551 *layer,
552 model,
553 data,
554 armnn::Optional<armnn::TensorInfo>(outInfo));
555}
556
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100557bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
558{
559 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
560 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
561
562 if (!input0.IsValid() || !input1.IsValid())
563 {
564 return Fail("%s: Operation has invalid inputs", __func__);
565 }
566
567 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
568 if (!output)
569 {
570 return Fail("%s: Could not read output 0", __func__);
571 }
572
573 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
574 if (IsDynamicOutput(outputInfo))
575 {
576 ALOGD("Output shape not set, will infer from inputs");
577 outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(),
578 input1.GetTensorInfo().GetShape()));
579 }
580
581 bool isSupported = false;
582 FORWARD_LAYER_SUPPORT_FUNC(__func__,
583 IsMinimumSupported,
584 data.m_Backends,
585 isSupported,
586 input0.GetTensorInfo(),
587 input1.GetTensorInfo(),
588 outputInfo);
589
590 if (!isSupported)
591 {
592 return false;
593 }
594
595 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
596 assert(layer != nullptr);
597 BroadcastTensor(input0, input1, layer, *data.m_Network);
598
599 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
600 0,
601 *layer,
602 model,
603 data,
604 armnn::Optional<armnn::TensorInfo>(outputInfo));
605}
606
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100607bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
608{
609 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
610 if (!input.IsValid())
611 {
612 return Fail("%s: Could not read input 0", __func__);
613 }
614
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100615 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
616 if (!output)
617 {
618 return Fail("%s: Could not read output", __func__);
619 }
620
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100621 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
622 unsigned int rank = inputInfo.GetNumDimensions();
623
624 armnn::PadDescriptor descriptor;
625 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
626 {
627 return Fail("%s: Could not convert paddings", __func__);
628 }
629
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100630 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
631 if (IsDynamicOutput(outputInfo))
632 {
633 ALOGD("Output shape not set, will infer from inputs");
634 outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
635 }
636
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100637 // Determine type of padding value
638 OperandType operandType0;
639 OperandType operandType2;
640
641 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
642 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
643 {
644 return Fail("%s: Operation has invalid inputs", __func__);
645 }
646
647 // Read value to use for padding
648 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
649 {
650 armnn::Half f16PadValue;
651 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
652 {
653 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
654 }
655
656 descriptor.m_PadValue = f16PadValue;
657 }
658 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
659 {
660 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
661 {
662 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
663 }
664 }
665 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
666 {
667 int32_t quantizedPadValue = 0;
668 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
669 {
670 return Fail("%s: Could not read input 2 (INT32)", __func__);
671 }
672
673 descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
674 inputInfo.GetQuantizationScale(),
675 inputInfo.GetQuantizationOffset());
676 }
677 else
678 {
679 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
680 }
681
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100682 bool isSupported = false;
683 FORWARD_LAYER_SUPPORT_FUNC(__func__,
684 IsPadSupported,
685 data.m_Backends,
686 isSupported,
687 inputInfo,
688 outputInfo,
689 descriptor);
690 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100691 {
692 return false;
693 }
694
695 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
696 assert(layer != nullptr);
697 input.Connect(layer->GetInputSlot(0));
698 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
699
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100700 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
701 0,
702 *layer,
703 model,
704 data,
705 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100706}
707
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100708bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
709{
710 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
711 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
712
713 if (!input.IsValid() || !alpha.IsValid())
714 {
715 return Fail("%s: Operation has invalid inputs", __func__);
716 }
717
718 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
719
720 if (!output)
721 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100722 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100723 }
724
725 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
726 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100727
728 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100729 if (IsDynamicOutput(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100730 {
731 ALOGD("Output shape not set, will infer from inputs");
732 outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
733 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100734
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100735 bool isSupported = false;
736 FORWARD_LAYER_SUPPORT_FUNC(__func__,
737 IsPreluSupported,
738 data.m_Backends,
739 isSupported,
740 inputInfo,
741 alphaInfo,
742 outputInfo);
743 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100744 {
745 return false;
746 }
747
748 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
749
750 if (!layer)
751 {
752 return Fail("%s: AddPreluLayer failed", __func__);
753 }
754
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100755 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100756
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100757 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
758 0,
759 *layer,
760 model,
761 data,
762 armnn::Optional<armnn::TensorInfo>(outputInfo));
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100763}
764
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100765bool HalPolicy::ConvertResize(const Operation& operation,
766 const Model& model,
767 ConversionData& data,
768 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100769{
770 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
771 if (!input.IsValid())
772 {
773 return Fail("%s: Could not read input 0", __func__);
774 }
775
776 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
777 if (!output)
778 {
779 return Fail("%s: Could not read output 0", __func__);
780 }
781
782 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100783 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100784
785 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100786 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100787 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
788
789 OperandType operandType1;
790 OperandType operandType2;
791
792 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
793 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
794 {
795 return Fail("%s: Operation has invalid inputs", __func__);
796 }
797
798 if (operandType1 != operandType2)
799 {
800 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
801 }
802
803 if (operandType1 == OperandType::INT32)
804 {
805 // Case 1: resizing by shape
806 int32_t targetWidth = 0;
807 int32_t targetHeight = 0;
808
809 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
810 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
811 {
812 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
813 }
814
815 if (targetWidth < 0 || targetHeight < 0)
816 {
817 return Fail("%s: Operation has invalid inputs for resizing by shape. "
818 "Target width/height cannot be < 0", __func__);
819 }
820
821 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100822 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100823 }
824 else if (operandType1 == OperandType::FLOAT32)
825 {
826 // Case 2: resizing by scale
827 float widthScale = 1.0f;
828 float heightScale = 1.0f;
829
830 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
831 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
832 {
833 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
834 }
835
836 const armnn::TensorShape& inputShape = inputInfo.GetShape();
837 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
838
839 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
840 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
841
842 descriptor.m_TargetWidth = std::floor(width * widthScale);
843 descriptor.m_TargetHeight = std::floor(height * heightScale);
844 }
845 else
846 {
847 // NOTE: FLOAT16 scales are not supported
848 return false;
849 }
850
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100851 if (IsDynamicOutput(outputInfo))
852 {
853 try
854 {
855 ALOGD("Output shape not set, will infer from inputs");
856 outputInfo.SetShape(InferResizeOutputShape(inputInfo.GetShape(), descriptor));
857 }
858 catch (armnn::Exception& e)
859 {
860 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
861 }
862 }
863
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100864 bool isSupported = false;
865 FORWARD_LAYER_SUPPORT_FUNC(__func__,
866 IsResizeSupported,
867 data.m_Backends,
868 isSupported,
869 inputInfo,
870 outputInfo,
871 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100872
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100873 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100874 {
875 return false;
876 }
877
878 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
879
880 assert(layer != nullptr);
881
882 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
883 input.Connect(layer->GetInputSlot(0));
884
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100885 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
886 0,
887 *layer,
888 model,
889 data,
890 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100891}
892
Keith Davisa6bc52f2019-06-26 09:39:49 +0100893bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
894{
895 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
896
897 if (!input.IsValid() )
898 {
899 return Fail("%s: Operation has invalid inputs", __func__);
900 }
901
902 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
903 unsigned int rank = inputInfo.GetNumDimensions();
904
905 if (rank != 4)
906 {
907 return Fail("%s: Only inputs with rank 4 are supported", __func__);
908 }
909
910 armnn::SpaceToDepthDescriptor desc;
911
912 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
913
914 if (desc.m_BlockSize <= 1)
915 {
916 return Fail("%s: Block size must be at least 1 in all dimensions");
917 }
918
919 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
920
921 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
922 if (!output)
923 {
924 return Fail("%s: Could not read output 0", __func__);
925 }
926
927 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100928
929 bool isSupported = false;
930 FORWARD_LAYER_SUPPORT_FUNC(__func__,
931 IsSpaceToDepthSupported,
932 data.m_Backends,
933 isSupported,
934 inputInfo,
935 outputInfo,
936 desc);
937 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100938 {
939 return false;
940 }
941
942 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
943 assert(layer != nullptr);
944 input.Connect(layer->GetInputSlot(0));
945
946 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
947}
948
Francis Murtagh074c25a2019-07-22 16:40:57 +0100949bool HalPolicy::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
950{
951 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
952 if (!input.IsValid())
953 {
954 return Fail("%s: Operation has invalid inputs", __func__);
955 }
956
957 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
958 if (!outputOperand)
959 {
960 return Fail("%s: Operation has no outputs", __func__);
961 }
962
963 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
964 if (IsDynamicOutput(outputInfo))
965 {
966 ALOGD("Output shape not set, will infer from input");
967 outputInfo.SetShape(input.GetTensorInfo().GetShape());
968 }
969
970 armnn::SoftmaxDescriptor desc;
971 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, desc.m_Beta, model, data))
972 {
973 return Fail("%s: Operation has invalid inputs", __func__);
974 }
975
976 if (operation.inputs.size() > 2 && !GetInputScalar<hal_1_2::HalPolicy>(operation,
977 2,
978 HalPolicy::OperandType::INT32,
979 desc.m_Axis,
980 model,
981 data))
982 {
983 return Fail("%s: Operation has invalid inputs", __func__);
984 }
985
986 bool isSupported = false;
987 FORWARD_LAYER_SUPPORT_FUNC(__func__,
988 IsSoftmaxSupported,
989 data.m_Backends,
990 isSupported,
991 input.GetTensorInfo(),
992 outputInfo,
993 desc);
994 if (!isSupported)
995 {
996 return false;
997 }
998
999 armnn::IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
1000 assert(layer != nullptr);
1001 input.Connect(layer->GetInputSlot(0));
1002
1003 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
1004 0,
1005 *layer,
1006 model,
1007 data,
1008 armnn::Optional<armnn::TensorInfo>(outputInfo));
1009}
1010
Mike Kellyb5fdf382019-06-11 16:35:25 +01001011} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +01001012} // namespace armnn_driver