blob: 99cc98023a4f8b743a822d3ea64b27e6f1abcdc4 [file] [log] [blame]
Mike Kellyb5fdf382019-06-11 16:35:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
8#include "../1.0/HalPolicy.hpp"
9#include "../1.1/HalPolicy.hpp"
10
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +010011#include <DataLayoutIndexed.hpp>
12
13#include <cmath>
14
Mike Kellyb5fdf382019-06-11 16:35:25 +010015namespace armnn_driver
16{
17namespace hal_1_2
18{
19
20bool HandledByV1_0(V1_2::OperationType operationType)
21{
22 switch (static_cast<V1_0::OperationType>(operationType))
23 {
24 case V1_0::OperationType::ADD:
25 case V1_0::OperationType::AVERAGE_POOL_2D:
26 case V1_0::OperationType::CONCATENATION:
27 case V1_0::OperationType::DEPTH_TO_SPACE:
28 case V1_0::OperationType::DEQUANTIZE:
29 case V1_0::OperationType::EMBEDDING_LOOKUP:
30 case V1_0::OperationType::FLOOR:
31 case V1_0::OperationType::FULLY_CONNECTED:
32 case V1_0::OperationType::HASHTABLE_LOOKUP:
33 case V1_0::OperationType::L2_NORMALIZATION:
34 case V1_0::OperationType::L2_POOL_2D:
35 case V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
36 case V1_0::OperationType::LOGISTIC:
37 case V1_0::OperationType::LSH_PROJECTION:
38 case V1_0::OperationType::LSTM:
39 case V1_0::OperationType::MAX_POOL_2D:
40 case V1_0::OperationType::MUL:
41 case V1_0::OperationType::RELU:
42 case V1_0::OperationType::RELU1:
43 case V1_0::OperationType::RELU6:
44 case V1_0::OperationType::RESHAPE:
45 case V1_0::OperationType::RESIZE_BILINEAR:
46 case V1_0::OperationType::RNN:
47 case V1_0::OperationType::SOFTMAX:
48 case V1_0::OperationType::SPACE_TO_DEPTH:
49 case V1_0::OperationType::SVDF:
50 case V1_0::OperationType::TANH:
51 case V1_0::OperationType::OEM_OPERATION:
52 return true;
53 default:
54 return false;
55 }
56}
57
58bool HandledByV1_1(V1_2::OperationType operationType)
59{
60 if (HandledByV1_0(operationType))
61 {
62 return true;
63 }
64 switch (static_cast<V1_1::OperationType>(operationType))
65 {
66 case V1_1::OperationType::BATCH_TO_SPACE_ND:
67 case V1_1::OperationType::DIV:
68 case V1_1::OperationType::MEAN:
69 case V1_1::OperationType::PAD:
70 case V1_1::OperationType::SPACE_TO_BATCH_ND:
71 case V1_1::OperationType::SQUEEZE:
72 case V1_1::OperationType::STRIDED_SLICE:
73 case V1_1::OperationType::SUB:
74 case V1_1::OperationType::TRANSPOSE:
75 return true;
76 default:
77 return false;
78 }
79}
80
81bool HandledByV1_0(const V1_2::Operation& operation)
82{
83 return HandledByV1_0(operation.type);
84}
85
86bool HandledByV1_1(const V1_2::Operation& operation)
87{
88 return HandledByV1_1(operation.type);
89}
90
91V1_0::OperationType CastToV1_0(V1_2::OperationType type)
92{
93 return static_cast<V1_0::OperationType>(type);
94}
95
96V1_1::OperationType CastToV1_1(V1_2::OperationType type)
97{
98 return static_cast<V1_1::OperationType>(type);
99}
100
101V1_0::Operation ConvertToV1_0(const V1_2::Operation& operation)
102{
103 V1_0::Operation op;
104 op.type = CastToV1_0(operation.type);
105 op.inputs = operation.inputs;
106 op.outputs = operation.outputs;
107 return op;
108}
109
110V1_1::Operation ConvertToV1_1(const V1_2::Operation& operation)
111{
112 V1_1::Operation op;
113 op.type = CastToV1_1(operation.type);
114 op.inputs = operation.inputs;
115 op.outputs = operation.outputs;
116 return op;
117}
118
119bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
120{
121 if (HandledByV1_0(operation) && compliantWithV1_0(model))
122 {
123 hal_1_0::HalPolicy::Operation v10Operation = ConvertToV1_0(operation);
124 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
125
126 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
127 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100128
129 if (HandledByV1_1(operation) && compliantWithV1_1(model))
Mike Kellyb5fdf382019-06-11 16:35:25 +0100130 {
131 hal_1_1::HalPolicy::Operation v11Operation = ConvertToV1_1(operation);
132 hal_1_1::HalPolicy::Model v11Model = convertToV1_1(model);
133
134 return hal_1_1::HalPolicy::ConvertOperation(v11Operation, v11Model, data);
135 }
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100136
Mike Kellyb5fdf382019-06-11 16:35:25 +0100137 switch (operation.type)
138 {
139 case V1_2::OperationType::CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100140 return ConvertConv2d(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100141 case V1_2::OperationType::DEPTHWISE_CONV_2D:
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100142 return ConvertDepthwiseConv2d(operation, model, data);
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100143 case V1_2::OperationType::PRELU:
144 return ConvertPrelu(operation, model, data);
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100145 case V1_2::OperationType::RESIZE_NEAREST_NEIGHBOR:
146 return ConvertResizeNearestNeighbor(operation, model, data);
Mike Kellyb5fdf382019-06-11 16:35:25 +0100147 default:
148 return Fail("%s: Operation type %s not supported in ArmnnDriver",
149 __func__, toString(operation.type).c_str());
150 }
151}
152
Aron Virginas-Tar24e699d2019-06-17 14:47:46 +0100153bool HalPolicy::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
154{
155 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
156 if (!input.IsValid())
157 {
158 return Fail("%s: Operation has invalid inputs", __func__);
159 }
160
161 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
162 if (!output)
163 {
164 return Fail("%s: Could not read output 0", __func__);
165 }
166
167 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
168 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
169
170 // ArmNN does not currently support non-fixed weights or bias
171 const ConstTensorPin weightsPin =
172 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 1, model, data);
173 const ConstTensorPin biasPin =
174 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
175
176 if (!weightsPin.IsValid())
177 {
178 return Fail("%s: Operation has invalid weights", __func__);
179 }
180
181 if (!biasPin.IsValid())
182 {
183 return Fail("%s: Operation has invalid biases", __func__);
184 }
185
186 armnn::ConstTensor weights = weightsPin.GetConstTensor();
187 armnn::ConstTensor bias = biasPin.GetConstTensor();
188 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
189
190 armnn::Convolution2dDescriptor desc;
191 desc.m_DataLayout = armnn::DataLayout::NHWC;
192 ActivationFn activation;
193
194 // Determine whether padding is implicit or explicit
195 bool implicitPadding = operation.inputs.size() == 7 ||
196 (operation.inputs.size() >= 8 &&
197 GetInputOperand<hal_1_2::HalPolicy>(operation, 7, model)->type == OperandType::BOOL);
198
199 if (implicitPadding)
200 {
201 android::nn::PaddingScheme paddingScheme;
202 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
203 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
204 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
205 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 6, activation, model, data) ||
206 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 8, desc, model, data))
207 {
208 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
209 }
210
211 const uint32_t kernelX = weights.GetShape()[2];
212 const uint32_t kernelY = weights.GetShape()[1];
213 const uint32_t inputX = inputInfo.GetShape()[2];
214 const uint32_t inputY = inputInfo.GetShape()[1];
215
216 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
217 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
218
219 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 7, model, data);
220 }
221 else if (operation.inputs.size() >= 10)
222 {
223 // explicit padding
224 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
225 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
226 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
227 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
228 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
229 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
230 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 9, activation, model, data) ||
231 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 11, desc, model, data))
232 {
233 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
234 }
235 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 10, model, data);
236 }
237 else
238 {
239 return Fail("%s: Unsupported number of operation inputs", __func__);
240 }
241
242 desc.m_BiasEnabled = true;
243 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
244
245 if (!IsLayerSupportedForAnyBackend(__func__,
246 armnn::IsConvolution2dSupported,
247 data.m_Backends,
248 inputInfo,
249 outputInfo,
250 desc,
251 weights.GetInfo(),
252 biases))
253 {
254 return false;
255 }
256
257 armnn::IConnectableLayer* startLayer =
258 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
259
260 if (!startLayer)
261 {
262 return Fail("%s: AddConvolution2dLayer failed", __func__);
263 }
264
265 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
266
267 if (!endLayer)
268 {
269 return Fail("%s: ProcessActivation failed", __func__);
270 }
271
272 input.Connect(startLayer->GetInputSlot(0));
273
274 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
275}
276
277bool HalPolicy::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
278{
279 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
280
281 if (!input.IsValid())
282 {
283 return Fail("%s: Operation has invalid inputs", __func__);
284 }
285
286 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
287
288 if (!output)
289 {
290 return Fail("%s: Could not read output 0", __func__);
291 }
292
293 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
294 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
295
296 // ArmNN does not currently support non-fixed weights or bias
297 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
298 const Operand* weightsOperand = GetInputOperand<hal_1_2::HalPolicy>(operation, 1, model);
299
300 if (weightsOperand == nullptr)
301 {
302 return Fail("%s: Operand is invalid", __func__);
303 }
304 armnn::DepthwiseConvolution2dDescriptor desc;
305 desc.m_DataLayout = armnn::DataLayout::NHWC;
306
307 // Determine whether padding is implicit or explicit
308 bool implicitPadding = operation.inputs.size() == 8 ||
309 (operation.inputs.size() >= 9 &&
310 GetInputOperand<hal_1_2::HalPolicy>(operation, 8, model)->type == OperandType::BOOL);
311
312 // Look ahead to find the optional DataLayout, if present
313 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
314 desc.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, dataLayoutFlagIndex, model, data);
315
316 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
317 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
318 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
319 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
320
321 // Reinterpret weight data as [ H, W, I, M ]
322 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
323 weightsOperand->dimensions[2],
324 inputInfo.GetShape()[channelsIndex],
325 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
326
327 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
328 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
329
330 const ConstTensorPin weightsPin =
331 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation,
332 1,
333 model,
334 data,
335 HWIMToMIHW,
336 &weightsShape);
337
338 // Bias is a 1D tensor
339 const ConstTensorPin biasPin =
340 ConvertOperationInputToConstTensorPin<hal_1_2::HalPolicy>(operation, 2, model, data);
341
342 if (!weightsPin.IsValid())
343 {
344 return Fail("%s: Operation has invalid weights", __func__);
345 }
346
347 if (!biasPin.IsValid())
348 {
349 return Fail("%s: Operation has invalid biases", __func__);
350 }
351
352 armnn::ConstTensor weights = weightsPin.GetConstTensor();
353 armnn::ConstTensor bias = biasPin.GetConstTensor();
354 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
355
356 ActivationFn activation;
357
358 if (implicitPadding)
359 {
360 android::nn::PaddingScheme paddingScheme;
361 if (!GetInputPaddingScheme<hal_1_2::HalPolicy>(operation, 3, paddingScheme, model, data) ||
362 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
363 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
364 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 7, activation, model, data) ||
365 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 9, desc, model, data))
366 {
367 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
368 }
369
370 const uint32_t kernelX = weights.GetShape()[3];
371 const uint32_t kernelY = weights.GetShape()[2];
372 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
373 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
374
375 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
376 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
377 }
378 else if (operation.inputs.size() >= 11)
379 {
380 // explicit padding
381 if (!GetInputScalar<hal_1_2::HalPolicy>(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
382 !GetInputScalar<hal_1_2::HalPolicy>(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
383 !GetInputScalar<hal_1_2::HalPolicy>(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
384 !GetInputScalar<hal_1_2::HalPolicy>(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
385 !GetInputScalar<hal_1_2::HalPolicy>(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
386 !GetInputScalar<hal_1_2::HalPolicy>(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
387 !GetInputActivationFunction<hal_1_2::HalPolicy>(operation, 10, activation, model, data) ||
388 !GetOptionalConvolutionDilationParams<hal_1_2::HalPolicy>(operation, 12, desc, model, data))
389 {
390 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
391 }
392 }
393 else
394 {
395 return Fail("%s: Unsupported number of operation inputs", __func__);
396 }
397
398 desc.m_BiasEnabled = true;
399 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
400
401 if (!IsLayerSupportedForAnyBackend(__func__,
402 armnn::IsDepthwiseConvolutionSupported,
403 data.m_Backends,
404 inputInfo,
405 outputInfo,
406 desc,
407 weights.GetInfo(),
408 biases))
409 {
410 return false;
411 }
412
413 armnn::IConnectableLayer* startLayer =
414 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
415 if (!startLayer)
416 {
417 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
418 }
419
420 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
421 if (!endLayer)
422 {
423 return Fail("%s: ProcessActivation failed", __func__);
424 }
425
426 input.Connect(startLayer->GetInputSlot(0));
427
428 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *endLayer, model, data);
429}
430
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100431bool HalPolicy::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
432{
433 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
434 LayerInputHandle alpha = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 1, model, data);
435
436 if (!input.IsValid() || !alpha.IsValid())
437 {
438 return Fail("%s: Operation has invalid inputs", __func__);
439 }
440
441 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
442
443 if (!output)
444 {
445 return Fail("%s: Could not read output 0", __func__);
446 }
447
448 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
449 const armnn::TensorInfo& alphaInfo = alpha.GetTensorInfo();
450 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
451
452 if (!IsLayerSupportedForAnyBackend(__func__,
453 armnn::IsPreluSupported,
454 data.m_Backends,
455 inputInfo,
456 alphaInfo,
457 outputInfo))
458 {
459 return false;
460 }
461
462 armnn::IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
463
464 if (!layer)
465 {
466 return Fail("%s: AddPreluLayer failed", __func__);
467 }
468
469 input.Connect(layer->GetInputSlot(0));
470 alpha.Connect(layer->GetInputSlot(1));
471
472 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
473}
474
Aron Virginas-Tar7a6d11b2019-07-03 15:27:08 +0100475bool HalPolicy::ConvertResizeNearestNeighbor(const Operation& operation, const Model& model, ConversionData& data)
476{
477 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_2::HalPolicy>(operation, 0, model, data);
478 if (!input.IsValid())
479 {
480 return Fail("%s: Could not read input 0", __func__);
481 }
482
483 const Operand* output = GetOutputOperand<hal_1_2::HalPolicy>(operation, 0, model);
484 if (!output)
485 {
486 return Fail("%s: Could not read output 0", __func__);
487 }
488
489 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
490 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
491
492 armnn::ResizeDescriptor descriptor;
493 descriptor.m_Method = armnn::ResizeMethod::NearestNeighbor;
494 descriptor.m_DataLayout = OptionalDataLayout<hal_1_2::HalPolicy>(operation, 3, model, data);
495
496 OperandType operandType1;
497 OperandType operandType2;
498
499 if (!GetOperandType<hal_1_2::HalPolicy>(operation, 1, model, operandType1) ||
500 !GetOperandType<hal_1_2::HalPolicy>(operation, 2, model, operandType2))
501 {
502 return Fail("%s: Operation has invalid inputs", __func__);
503 }
504
505 if (operandType1 != operandType2)
506 {
507 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
508 }
509
510 if (operandType1 == OperandType::INT32)
511 {
512 // Case 1: resizing by shape
513 int32_t targetWidth = 0;
514 int32_t targetHeight = 0;
515
516 if (!GetInputInt32<hal_1_2::HalPolicy>(operation, 1, targetWidth, model, data) ||
517 !GetInputInt32<hal_1_2::HalPolicy>(operation, 2, targetHeight, model, data))
518 {
519 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
520 }
521
522 if (targetWidth < 0 || targetHeight < 0)
523 {
524 return Fail("%s: Operation has invalid inputs for resizing by shape. "
525 "Target width/height cannot be < 0", __func__);
526 }
527
528 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
529 descriptor.m_TargetWidth = static_cast<uint32_t>(targetHeight);
530 }
531 else if (operandType1 == OperandType::FLOAT32)
532 {
533 // Case 2: resizing by scale
534 float widthScale = 1.0f;
535 float heightScale = 1.0f;
536
537 if (!GetInputFloat32<hal_1_2::HalPolicy>(operation, 1, widthScale, model, data) ||
538 !GetInputFloat32<hal_1_2::HalPolicy>(operation, 2, heightScale, model, data))
539 {
540 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
541 }
542
543 const armnn::TensorShape& inputShape = inputInfo.GetShape();
544 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
545
546 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
547 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
548
549 descriptor.m_TargetWidth = std::floor(width * widthScale);
550 descriptor.m_TargetHeight = std::floor(height * heightScale);
551 }
552 else
553 {
554 // NOTE: FLOAT16 scales are not supported
555 return false;
556 }
557
558 if (!IsLayerSupportedForAnyBackend(__func__,
559 armnn::IsResizeSupported,
560 data.m_Backends,
561 inputInfo,
562 outputInfo,
563 descriptor))
564 {
565 return false;
566 }
567
568 armnn::IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
569
570 assert(layer != nullptr);
571
572 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
573 input.Connect(layer->GetInputSlot(0));
574
575 return SetupAndTrackLayerOutputSlot<hal_1_2::HalPolicy>(operation, 0, *layer, model, data);
576}
577
Mike Kellyb5fdf382019-06-11 16:35:25 +0100578} // namespace hal_1_2
Matteo Martincigh17ffff32019-06-27 14:12:55 +0100579} // namespace armnn_driver