blob: 5fe54d80c44526d7e845203f0b062afb0579ef02 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +01008#include "OutputShapeUtils.hpp"
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "../1.0/HalPolicy.hpp"
11#include "../1.1/HalPolicy.hpp"
12
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010013#include <DataLayoutIndexed.hpp>
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +010014#include <Half.hpp>
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010015
16#include <cmath>
17
Mike Kellyb5fdf382019-06-11 16:35:25 +010018namespace armnn_driver
19{
20namespace hal_1_2
21{
22
23bool HandledByV1_0(V1_2::OperationType operationType)
24{
25 switch (static_cast<V1_0::OperationType>(operationType))
26 {
27 case V1_0::OperationType::ADD:
28 case V1_0::OperationType::AVERAGE_POOL_2D:
29 case V1_0::OperationType::CONCATENATION:
30 case V1_0::OperationType::DEPTH_TO_SPACE:
31 case V1_0::OperationType::DEQUANTIZE:
32 case V1_0::OperationType::EMBEDDING_LOOKUP:
33 case V1_0::OperationType::FLOOR:
34 case V1_0::OperationType::FULLY_CONNECTED:
35 case V1_0::OperationType::HASHTABLE_LOOKUP:
36 case V1_0::OperationType::L2_NORMALIZATION:
37 case V1_0::OperationType::L2_POOL_2D:
38 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
39 case V1_0::OperationType::LOGISTIC:
40 case V1_0::OperationType::LSH_PROJECTION:
41 case V1_0::OperationType::LSTM:
42 case V1_0::OperationType::MAX_POOL_2D:
43 case V1_0::OperationType::MUL:
44 case V1_0::OperationType::RELU:
45 case V1_0::OperationType::RELU1:
46 case V1_0::OperationType::RELU6:
47 case V1_0::OperationType::RESHAPE:
Mike Kellyb5fdf382019-06-11 16:35:25 +010048 case V1_0::OperationType::RNN:
49 case V1_0::OperationType::SOFTMAX:
50 case V1_0::OperationType::SPACE_TO_DEPTH:
51 case V1_0::OperationType::SVDF:
52 case V1_0::OperationType::TANH:
53 case V1_0::OperationType::OEM_OPERATION:
54 return true;
55 default:
56 return false;
57 }
58}
59
60bool HandledByV1_1(V1_2::OperationType operationType)
61{
62 if (HandledByV1_0(operationType))
63 {
64 return true;
65 }
66 switch (static_cast<V1_1::OperationType>(operationType))
67 {
68 case V1_1::OperationType::BATCH_TO_SPACE_ND:
69 case V1_1::OperationType::DIV:
70 case V1_1::OperationType::MEAN:
71 case V1_1::OperationType::PAD:
72 case V1_1::OperationType::SPACE_TO_BATCH_ND:
73 case V1_1::OperationType::SQUEEZE:
74 case V1_1::OperationType::STRIDED_SLICE:
75 case V1_1::OperationType::SUB:
76 case V1_1::OperationType::TRANSPOSE:
77 return true;
78 default:
79 return false;
80 }
81}
82
83bool HandledByV1_0(const V1_2::Operation& operation)
84{
85 return HandledByV1_0(operation.type);
86}
87
88bool HandledByV1_1(const V1_2::Operation& operation)
89{
90 return HandledByV1_1(operation.type);
91}
92
93V1_0::OperationType CastToV1_0(V1_2::OperationType type)
94{
95 return static_cast<V1_0::OperationType>(type);
96}
97
98V1_1::OperationType CastToV1_1(V1_2::OperationType type)
99{
100 return static_cast<V1_1::OperationType>(type);
101}
102
103V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
104{
105 V1_0::Operation op;
106 op.type = CastToV1_0(operation.type);
107 op.inputs = operation.inputs;
108 op.outputs = operation.outputs;
109 return op;
110}
111
112V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
113{
114 V1_1::Operation op;
115 op.type = CastToV1_1(operation.type);
116 op.inputs = operation.inputs;
117 op.outputs = operation.outputs;
118 return op;
119}
120
121bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
122{
123 if (HandledByV1_0(operation) && compliantWithV1_0(model))
124 {
125 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
126 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
127
128 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
129 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100130
131 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100132 {
133 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
134 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
135
136 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
137 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100138
Mike Kellyb5fdf382019-06-11 16:35:25 +0100139 switch (operation.type)
140 {
141 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100142 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100143 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100144 return ConvertDepthwiseConv2d(operation, model, data);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100145 case V1_2::OperationType::MAXIMUM:
146 return ConvertMaximum(operation, model, data);
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100147 case V1_2::OperationType::MINIMUM:
148 return ConvertMinimum(operation, model, data);
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100149 case V1_2::OperationType::PAD_V2:
150 return ConvertPadV2(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100151 case V1_2::OperationType::PRELU:
152 return ConvertPrelu(operation, model, data);
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100153 case V1_2::OperationType::RESIZE_BILINEAR:
154 return ConvertResize(operation, model, data, armnn::ResizeMethod::Bilinear);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100155 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100156 return ConvertResize(operation, model, data, armnn::ResizeMethod::NearestNeighbor);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100157 default:
158 return Fail("%s: Operation type %s not supported in ArmnnDriver",
159 __func__, toString(operation.type).c_str());
160 }
161}
162
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100163bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
164{
165 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
166 if (!input.IsValid())
167 {
168 return Fail("%s: Operation has invalid inputs", __func__);
169 }
170
171 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
172 if (!output)
173 {
174 return Fail("%s: Could not read output 0", __func__);
175 }
176
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100177 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
178 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100179
Mike Kellye1d60bb2019-07-11 11:44:52 +0100180 armnn::Convolution2dDescriptor desc;
181 desc.m_DataLayout = armnn::DataLayout::NHWC;
182
183 // Determine whether padding is implicit or explicit
184 bool implicitPadding = operation.inputs.size() == 7 ||
185 (operation.inputs.size() >= 8 &&
186 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
187
188 if (implicitPadding)
189 {
190 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
191 }
192 else if (operation.inputs.size() >= 10)
193 {
194 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
195 }
196
197 const armnn::PermutationVector OHWIToOIHW = {0, 2, 3, 1};
198
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100199 // ArmNN does not currently support non-fixed weights or bias
Mike Kellye1d60bb2019-07-11 11:44:52 +0100200 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
201 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
202 // the DataLayout is NCHW
203 const ConstTensorPin weightsPin = (desc.m_DataLayout == armnn::DataLayout::NCHW) ?
204 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data, OHWIToOIHW) :
205 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100206 const ConstTensorPin biasPin =
Mike Kellye1d60bb2019-07-11 11:44:52 +0100207 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100208
209 if (!weightsPin.IsValid())
210 {
211 return Fail("%s: Operation has invalid weights", __func__);
212 }
213
214 if (!biasPin.IsValid())
215 {
216 return Fail("%s: Operation has invalid biases", __func__);
217 }
218
219 armnn::ConstTensor weights = weightsPin.GetConstTensor();
220 armnn::ConstTensor bias = biasPin.GetConstTensor();
221 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
222
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100223 ActivationFn activation;
224
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100225 if (implicitPadding)
226 {
227 android::nn::PaddingScheme paddingScheme;
228 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
229 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
230 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
231 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
232 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
233 {
234 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
235 }
236
Mike Kellye1d60bb2019-07-11 11:44:52 +0100237 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
238 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
239 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
240 const uint32_t kernelX = weights.GetShape()[widthIndex];
241 const uint32_t kernelY = weights.GetShape()[heightIndex];
242 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
243 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100244
Mike Kelly86b36d42019-07-12 16:39:33 +0100245 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
246 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100247
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100248 }
249 else if (operation.inputs.size() >= 10)
250 {
251 // explicit padding
252 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
253 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
254 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
255 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
256 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
257 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
258 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
259 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
260 {
261 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
262 }
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100263 }
264 else
265 {
266 return Fail("%s: Unsupported number of operation inputs", __func__);
267 }
268
269 desc.m_BiasEnabled = true;
270 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
271
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100272 if (IsDynamicOutput(outputInfo))
273 {
274 try
275 {
276 ALOGD("Output shape not set, will infer from inputs");
277 outputInfo.SetShape(InferConvolution2dOutputShape(inputInfo.GetShape(),
278 weights.GetInfo().GetShape(),
279 desc));
280 }
281 catch (armnn::Exception& e)
282 {
283 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
284 }
285 }
286
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100287 bool isSupported = false;
288 FORWARD_LAYER_SUPPORT_FUNC(__func__,
289 IsConvolution2dSupported,
290 data.m_Backends,
291 isSupported,
292 inputInfo,
293 outputInfo,
294 desc,
295 weights.GetInfo(),
296 biases);
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100297
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100298 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100299 {
300 return false;
301 }
302
303 armnn::IConnectableLayer* startLayer =
304 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
305
306 if (!startLayer)
307 {
308 return Fail("%s: AddConvolution2dLayer failed", __func__);
309 }
310
311 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
312
313 if (!endLayer)
314 {
315 return Fail("%s: ProcessActivation failed", __func__);
316 }
317
318 input.Connect(startLayer->GetInputSlot(0));
319
Aron Virginas-Tar2b173122019-07-15 14:29:09 +0100320 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
321 0,
322 *endLayer,
323 model,
324 data,
325 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100326}
327
328bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
329{
330 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
331
332 if (!input.IsValid())
333 {
334 return Fail("%s: Operation has invalid inputs", __func__);
335 }
336
337 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
338
339 if (!output)
340 {
341 return Fail("%s: Could not read output 0", __func__);
342 }
343
344 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100345
346 // ArmNN does not currently support non-fixed weights or bias
347 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
348 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
349
350 if (weightsOperand == nullptr)
351 {
352 return Fail("%s: Operand is invalid", __func__);
353 }
354 armnn::DepthwiseConvolution2dDescriptor desc;
355 desc.m_DataLayout = armnn::DataLayout::NHWC;
356
357 // Determine whether padding is implicit or explicit
358 bool implicitPadding = operation.inputs.size() == 8 ||
359 (operation.inputs.size() >= 9 &&
360 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
361
362 // Look ahead to find the optional DataLayout, if present
363 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
364 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
365
366 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
367 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
368 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
369 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
370
371 // Reinterpret weight data as [ H, W, I, M ]
372 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
373 weightsOperand->dimensions[2],
374 inputInfo.GetShape()[channelsIndex],
375 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
376
377 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
378 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
379
380 const ConstTensorPin weightsPin =
381 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
382 1,
383 model,
384 data,
385 HWIMToMIHW,
386 &weightsShape);
387
388 // Bias is a 1D tensor
389 const ConstTensorPin biasPin =
390 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
391
392 if (!weightsPin.IsValid())
393 {
394 return Fail("%s: Operation has invalid weights", __func__);
395 }
396
397 if (!biasPin.IsValid())
398 {
399 return Fail("%s: Operation has invalid biases", __func__);
400 }
401
402 armnn::ConstTensor weights = weightsPin.GetConstTensor();
403 armnn::ConstTensor bias = biasPin.GetConstTensor();
404 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
405
406 ActivationFn activation;
407
408 if (implicitPadding)
409 {
410 android::nn::PaddingScheme paddingScheme;
411 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
412 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
413 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
414 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
415 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
416 {
417 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
418 }
419
420 const uint32_t kernelX = weights.GetShape()[3];
421 const uint32_t kernelY = weights.GetShape()[2];
422 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
423 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
424
Mike Kelly86b36d42019-07-12 16:39:33 +0100425 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
426 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100427 }
428 else if (operation.inputs.size() >= 11)
429 {
430 // explicit padding
431 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
432 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
433 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
434 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
435 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
436 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
437 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
438 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
439 {
440 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
441 }
442 }
443 else
444 {
445 return Fail("%s: Unsupported number of operation inputs", __func__);
446 }
447
448 desc.m_BiasEnabled = true;
449 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
450
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100451 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
452 if (IsDynamicOutput(outputInfo))
453 {
454 try
455 {
456 ALOGD("Output shape not set, will infer from inputs");
457 outputInfo.SetShape(InferDepthwiseConvolution2dOutputShape(inputInfo.GetShape(),
458 weights.GetInfo().GetShape(),
459 desc));
460 }
461 catch (armnn::Exception& e)
462 {
463 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
464 }
465 }
466
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100467 bool isSupported = false;
468 FORWARD_LAYER_SUPPORT_FUNC(__func__,
469 IsDepthwiseConvolutionSupported,
470 data.m_Backends,
471 isSupported,
472 inputInfo,
473 outputInfo,
474 desc,
475 weights.GetInfo(),
476 biases);
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100477
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100478 if (!isSupported)
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100479 {
480 return false;
481 }
482
483 armnn::IConnectableLayer* startLayer =
484 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100485
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100486 if (!startLayer)
487 {
488 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
489 }
490
491 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
492 if (!endLayer)
493 {
494 return Fail("%s: ProcessActivation failed", __func__);
495 }
496
497 input.Connect(startLayer->GetInputSlot(0));
498
Aron Virginas-Tar9fd37392019-07-15 18:04:32 +0100499 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
500 0,
501 *endLayer,
502 model,
503 data,
504 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100505}
506
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100507bool HalPolicy::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
508{
509 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
510 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
511
512 if (!input0.IsValid() || !input1.IsValid())
513 {
514 return Fail("%s: Operation has invalid inputs", __func__);
515 }
516
517 const Operand* outputOperand = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
518 if (!outputOperand)
519 {
520 return Fail("%s: Could not read output", __func__);
521 }
522
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100523 armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100524 if (IsDynamicOutput(outInfo))
525 {
526 ALOGD("Output shape not set, will infer from inputs");
527 outInfo.SetShape(InferMaximumOutputShape(input0.GetTensorInfo().GetShape(), input1.GetTensorInfo().GetShape()));
528 }
529
Aron Virginas-Tard7593232019-07-16 13:17:06 +0100530 bool isSupported = false;
531 FORWARD_LAYER_SUPPORT_FUNC(__func__,
532 IsMaximumSupported,
533 data.m_Backends,
534 isSupported,
535 input0.GetTensorInfo(),
536 input1.GetTensorInfo(),
537 outInfo);
538
539 if (!isSupported)
Narumol Prangnawarat95b1ef62019-07-15 12:02:20 +0100540 {
541 return false;
542 }
543
544 armnn::IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
545 assert(layer != nullptr);
546 BroadcastTensor(input0, input1, layer, *data.m_Network);
547
548 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
549 0,
550 *layer,
551 model,
552 data,
553 armnn::Optional<armnn::TensorInfo>(outInfo));
554}
555
Ellen Norris-Thompson1cb29aa2019-07-11 17:27:37 +0100556bool HalPolicy::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
557{
558 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
559 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
560
561 if (!input0.IsValid() || !input1.IsValid())
562 {
563 return Fail("%s: Operation has invalid inputs", __func__);
564 }
565
566 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
567 if (!output)
568 {
569 return Fail("%s: Could not read output 0", __func__);
570 }
571
572 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
573 if (IsDynamicOutput(outputInfo))
574 {
575 ALOGD("Output shape not set, will infer from inputs");
576 outputInfo.SetShape(InferMinimumOutputShape(input0.GetTensorInfo().GetShape(),
577 input1.GetTensorInfo().GetShape()));
578 }
579
580 bool isSupported = false;
581 FORWARD_LAYER_SUPPORT_FUNC(__func__,
582 IsMinimumSupported,
583 data.m_Backends,
584 isSupported,
585 input0.GetTensorInfo(),
586 input1.GetTensorInfo(),
587 outputInfo);
588
589 if (!isSupported)
590 {
591 return false;
592 }
593
594 armnn::IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
595 assert(layer != nullptr);
596 BroadcastTensor(input0, input1, layer, *data.m_Network);
597
598 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
599 0,
600 *layer,
601 model,
602 data,
603 armnn::Optional<armnn::TensorInfo>(outputInfo));
604}
605
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100606bool HalPolicy::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
607{
608 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
609 if (!input.IsValid())
610 {
611 return Fail("%s: Could not read input 0", __func__);
612 }
613
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100614 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
615 if (!output)
616 {
617 return Fail("%s: Could not read output", __func__);
618 }
619
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100620 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
621 unsigned int rank = inputInfo.GetNumDimensions();
622
623 armnn::PadDescriptor descriptor;
624 if (!ConvertPaddings<hal_1_2::HalPolicy>(operation, model, data, rank, descriptor))
625 {
626 return Fail("%s: Could not convert paddings", __func__);
627 }
628
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100629 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
630 if (IsDynamicOutput(outputInfo))
631 {
632 ALOGD("Output shape not set, will infer from inputs");
633 outputInfo.SetShape(InferPadOutputShape(inputInfo.GetShape(), descriptor.m_PadList));
634 }
635
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100636 // Determine type of padding value
637 OperandType operandType0;
638 OperandType operandType2;
639
640 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 0, model, operandType0) ||
641 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
642 {
643 return Fail("%s: Operation has invalid inputs", __func__);
644 }
645
646 // Read value to use for padding
647 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
648 {
649 armnn::Half f16PadValue;
650 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 2, operandType2, f16PadValue, model, data))
651 {
652 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
653 }
654
655 descriptor.m_PadValue = f16PadValue;
656 }
657 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
658 {
659 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, descriptor.m_PadValue, model, data))
660 {
661 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
662 }
663 }
664 else if (operandType0 == OperandType::TENSOR_QUANT8_ASYMM && operandType2 == OperandType::INT32)
665 {
666 int32_t quantizedPadValue = 0;
667 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 2, quantizedPadValue, model, data))
668 {
669 return Fail("%s: Could not read input 2 (INT32)", __func__);
670 }
671
672 descriptor.m_PadValue = armnn::Dequantize(quantizedPadValue,
673 inputInfo.GetQuantizationScale(),
674 inputInfo.GetQuantizationOffset());
675 }
676 else
677 {
678 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
679 }
680
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100681 bool isSupported = false;
682 FORWARD_LAYER_SUPPORT_FUNC(__func__,
683 IsPadSupported,
684 data.m_Backends,
685 isSupported,
686 inputInfo,
687 outputInfo,
688 descriptor);
689 if (!isSupported)
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100690 {
691 return false;
692 }
693
694 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
695 assert(layer != nullptr);
696 input.Connect(layer->GetInputSlot(0));
697 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
698
Sadik Armagan310d8ff2019-07-11 10:53:38 +0100699 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
700 0,
701 *layer,
702 model,
703 data,
704 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100705}
706
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100707bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
708{
709 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
710 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
711
712 if (!input.IsValid() || !alpha.IsValid())
713 {
714 return Fail("%s: Operation has invalid inputs", __func__);
715 }
716
717 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
718
719 if (!output)
720 {
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100721 return Fail("%s: Could not read output", __func__);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100722 }
723
724 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
725 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100726
727 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100728 if (IsDynamicOutput(outputInfo))
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100729 {
730 ALOGD("Output shape not set, will infer from inputs");
731 outputInfo.SetShape(InferPreluOutputShape(inputInfo.GetShape(), alphaInfo.GetShape()));
732 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100733
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100734 bool isSupported = false;
735 FORWARD_LAYER_SUPPORT_FUNC(__func__,
736 IsPreluSupported,
737 data.m_Backends,
738 isSupported,
739 inputInfo,
740 alphaInfo,
741 outputInfo);
742 if (!isSupported)
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100743 {
744 return false;
745 }
746
747 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
748
749 if (!layer)
750 {
751 return Fail("%s: AddPreluLayer failed", __func__);
752 }
753
Matteo Martincigh0bd89a82019-07-02 16:53:10 +0100754 BroadcastTensor(input, alpha, layer, *data.m_Network);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100755
Aron Virginas-Tarf03fcf02019-07-09 17:44:24 +0100756 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
757 0,
758 *layer,
759 model,
760 data,
761 armnn::Optional<armnn::TensorInfo>(outputInfo));
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100762}
763
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100764bool HalPolicy::ConvertResize(const Operation& operation,
765 const Model& model,
766 ConversionData& data,
767 armnn::ResizeMethod resizeMethod)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100768{
769 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
770 if (!input.IsValid())
771 {
772 return Fail("%s: Could not read input 0", __func__);
773 }
774
775 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
776 if (!output)
777 {
778 return Fail("%s: Could not read output 0", __func__);
779 }
780
781 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100782 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100783
784 armnn::ResizeDescriptor descriptor;
Aron Virginas-Tarfb2fa292019-07-04 11:59:48 +0100785 descriptor.m_Method = resizeMethod;
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100786 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
787
788 OperandType operandType1;
789 OperandType operandType2;
790
791 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
792 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
793 {
794 return Fail("%s: Operation has invalid inputs", __func__);
795 }
796
797 if (operandType1 != operandType2)
798 {
799 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
800 }
801
802 if (operandType1 == OperandType::INT32)
803 {
804 // Case 1: resizing by shape
805 int32_t targetWidth = 0;
806 int32_t targetHeight = 0;
807
808 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
809 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
810 {
811 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
812 }
813
814 if (targetWidth < 0 || targetHeight < 0)
815 {
816 return Fail("%s: Operation has invalid inputs for resizing by shape. "
817 "Target width/height cannot be < 0", __func__);
818 }
819
820 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
Teresa Charlin9843c012019-07-19 12:18:35 +0100821 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100822 }
823 else if (operandType1 == OperandType::FLOAT32)
824 {
825 // Case 2: resizing by scale
826 float widthScale = 1.0f;
827 float heightScale = 1.0f;
828
829 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
830 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
831 {
832 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
833 }
834
835 const armnn::TensorShape& inputShape = inputInfo.GetShape();
836 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
837
838 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
839 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
840
841 descriptor.m_TargetWidth = std::floor(width * widthScale);
842 descriptor.m_TargetHeight = std::floor(height * heightScale);
843 }
844 else
845 {
846 // NOTE: FLOAT16 scales are not supported
847 return false;
848 }
849
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100850 if (IsDynamicOutput(outputInfo))
851 {
852 try
853 {
854 ALOGD("Output shape not set, will infer from inputs");
855 outputInfo.SetShape(InferResizeOutputShape(inputInfo.GetShape(), descriptor));
856 }
857 catch (armnn::Exception& e)
858 {
859 return Fail("%s: Could not infer dynamic output shape: %s", __func__, e.what());
860 }
861 }
862
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100863 bool isSupported = false;
864 FORWARD_LAYER_SUPPORT_FUNC(__func__,
865 IsResizeSupported,
866 data.m_Backends,
867 isSupported,
868 inputInfo,
869 outputInfo,
870 descriptor);
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100871
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100872 if (!isSupported)
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100873 {
874 return false;
875 }
876
877 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
878
879 assert(layer != nullptr);
880
881 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
882 input.Connect(layer->GetInputSlot(0));
883
Aron Virginas-Tarbe5d3562019-07-16 11:32:29 +0100884 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation,
885 0,
886 *layer,
887 model,
888 data,
889 armnn::Optional<armnn::TensorInfo>(outputInfo));
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100890}
891
Keith Davisa6bc52f2019-06-26 09:39:49 +0100892bool HalPolicy::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
893{
894 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
895
896 if (!input.IsValid() )
897 {
898 return Fail("%s: Operation has invalid inputs", __func__);
899 }
900
901 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
902 unsigned int rank = inputInfo.GetNumDimensions();
903
904 if (rank != 4)
905 {
906 return Fail("%s: Only inputs with rank 4 are supported", __func__);
907 }
908
909 armnn::SpaceToDepthDescriptor desc;
910
911 GetInputScalar<hal_1_2::HalPolicy>(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
912
913 if (desc.m_BlockSize <= 1)
914 {
915 return Fail("%s: Block size must be at least 1 in all dimensions");
916 }
917
918 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 2, model, data);
919
920 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
921 if (!output)
922 {
923 return Fail("%s: Could not read output 0", __func__);
924 }
925
926 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100927
928 bool isSupported = false;
929 FORWARD_LAYER_SUPPORT_FUNC(__func__,
930 IsSpaceToDepthSupported,
931 data.m_Backends,
932 isSupported,
933 inputInfo,
934 outputInfo,
935 desc);
936 if (!isSupported)
Keith Davisa6bc52f2019-06-26 09:39:49 +0100937 {
938 return false;
939 }
940
941 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
942 assert(layer != nullptr);
943 input.Connect(layer->GetInputSlot(0));
944
945 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
946}
947
Mike Kellyb5fdf382019-06-11 16:35:25 +0100948} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100949} // namespace armnn_driver