blob: 6e1628609f106de6ca5c8e31797d63451acabbfd [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
8#include "../1.0/HalPolicy.hpp"
9
10namespace armnn_driver
11{
12namespace hal_1_1
13{
14
15bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
16{
17 if (compliantWithV1_0(operation))
18 {
19 hal_1_0::HalPolicy::Operation v10Operation = convertToV1_0(operation);
20 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
21
22 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
23 }
24 else
25 {
26 switch (operation.type)
27 {
28 case V1_1::OperationType::DIV:
29 return ConvertDiv(operation, model, data);
David Beck38e12942018-09-12 16:02:24 +010030 case V1_1::OperationType::SUB:
31 return ConvertSub(operation, model, data);
narpra013c052562018-09-17 14:25:04 +010032 case V1_1::OperationType::MEAN:
33 return ConvertMean(operation, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +010034 case V1_1::OperationType::PAD:
35 return ConvertPad(operation, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +000036 case V1_1::OperationType::SPACE_TO_BATCH_ND:
37 return ConvertSpaceToBatchNd(operation, model, data);
saoste01b8471482018-10-10 09:44:51 +010038 case V1_1::OperationType::SQUEEZE:
39 return ConvertSqueeze(operation, model, data);
saoste01fe463152018-10-18 17:49:56 +010040 case V1_1::OperationType::TRANSPOSE:
41 return ConvertTranspose(operation, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +000042 case V1_1::OperationType::BATCH_TO_SPACE_ND:
43 return ConvertBatchToSpaceNd(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010044 default:
45 return Fail("%s: Operation type %s not supported in ArmnnDriver",
46 __func__, toString(operation.type).c_str());
47 }
48 }
49}
50
51bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
52{
53 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
54 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
55
56 if (!input0.IsValid() || !input1.IsValid())
57 {
58 return Fail("%s: Operation has invalid inputs", __func__);
59 }
60
61 // The FuseActivation parameter is always the input index 2
62 // and it should be optional
63 ActivationFn activationFunction;
64 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
65 {
66 return Fail("%s: Operation has invalid inputs", __func__);
67 }
68
69 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
70 if (!outputOperand)
71 {
72 return false;
73 }
74
75 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
76
77 if (!IsLayerSupported(__func__,
78 armnn::IsDivisionSupported,
79 data.m_Compute,
80 input0.GetTensorInfo(),
81 input1.GetTensorInfo(),
82 outInfo))
83 {
84 return false;
85 }
86
87 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
88 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
89
90 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
91 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
92
93 if (endLayer)
94 {
95 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
96 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
97 }
98
99 return Fail("%s: ProcessActivation failed", __func__);
100}
101
David Beck38e12942018-09-12 16:02:24 +0100102bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
103{
104 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
105 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
106
107 if (!input0.IsValid() || !input1.IsValid())
108 {
109 return Fail("%s: Operation has invalid inputs", __func__);
110 }
111
112 // The FuseActivation parameter is always the input index 2
113 // and it should be optional
114 ActivationFn activationFunction;
115 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
116 {
117 return Fail("%s: Operation has invalid inputs", __func__);
118 }
119
120 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
121 if (!outputOperand)
122 {
123 return false;
124 }
125
126 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
127
128 if (!IsLayerSupported(__func__,
129 armnn::IsSubtractionSupported,
130 data.m_Compute,
131 input0.GetTensorInfo(),
132 input1.GetTensorInfo(),
133 outInfo))
134 {
135 return false;
136 }
137
138 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
139 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
140
141 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
142 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
143
144 if (endLayer)
145 {
146 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
147 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
148 }
149
150 return Fail("%s: ProcessActivation failed", __func__);
151}
152
narpra013c052562018-09-17 14:25:04 +0100153bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
154{
155 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
narpra013c052562018-09-17 14:25:04 +0100156 if (!input.IsValid())
157 {
158 return Fail("%s: Operation has invalid inputs", __func__);
159 }
160
Matteo Martincighae622b72018-10-23 18:25:38 +0100161 const Operand* axisOperand = GetInputOperand(operation, 1, model);
162 if (!axisOperand)
163 {
164 return Fail("%s: Could not read input 1", __func__);
165 }
166
167 std::vector<int32_t> axis;
168 if (!GetTensorInt32Values(*axisOperand, axis, model, data))
169 {
170 return Fail("%s: Input 1 has invalid values", __func__);
171 }
172
173 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
174
175 // Convert the axis to unsigned int and remove duplicates.
176 unsigned int rank = inputInfo.GetNumDimensions();
177 std::set<unsigned int> uniqueAxis;
178 std::transform(axis.begin(), axis.end(),
179 std::inserter(uniqueAxis, uniqueAxis.begin()),
180 [rank](int i) -> unsigned int { return (i + rank) % rank; });
181
182 // Get the "keep dims" flag.
183 int32_t keepDims = 0;
184 if (!GetInputInt32(operation, 2, keepDims, model, data))
185 {
186 return Fail("%s: Could not read input 2", __func__);
187 }
narpra013c052562018-09-17 14:25:04 +0100188
189 armnn::MeanDescriptor descriptor;
Matteo Martincighae622b72018-10-23 18:25:38 +0100190 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
191 descriptor.m_KeepDims = keepDims > 0;
narpra013c052562018-09-17 14:25:04 +0100192
193 const Operand* output = GetOutputOperand(operation, 0, model);
194 if (!output)
195 {
196 return Fail("%s: Could not read output 0", __func__);
197 }
198
199 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
200
201 if (!IsLayerSupported(__func__,
202 armnn::IsMeanSupported,
203 data.m_Compute,
204 inputInfo,
205 outputInfo,
206 descriptor))
207 {
208 return false;
209 }
210
211 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
narpra0196bedf02018-09-26 16:57:28 +0100212 assert(layer != nullptr);
213 input.Connect(layer->GetInputSlot(0));
narpra013c052562018-09-17 14:25:04 +0100214
215 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
216}
217
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100218bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
219{
220 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
221
222 if (!input.IsValid())
223 {
224 return Fail("%s: Operation has invalid inputs", __func__);
225 }
226
227 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
228
229 const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
230
231 if (!paddingsOperand)
232 {
233 return Fail("%s: Could not read paddings operand", __func__);
234 }
235
236 unsigned int rank = inputInfo.GetNumDimensions();
237 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
238 if (paddingsOperandShape.GetNumDimensions() != rank || paddingsOperandShape.GetNumElements() != 2)
239 {
240 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
241 }
242
243 std::vector<int32_t> paddings;
244 GetTensorInt32Values(*paddingsOperand, paddings, model, data);
245
246 // add padding for each dimension of input tensor.
247 armnn::PadDescriptor descriptor;
248 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
249 {
250 int paddingBeforeInput = paddings[i];
251 int paddingAfterInput = paddings[i + 1];
252 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
253 {
254 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
255 }
256 descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
257 }
258
259 const Operand* output = GetOutputOperand(operation, 0, model);
260 if (!output)
261 {
262 return Fail("%s: Could not read output 0", __func__);
263 }
264
265 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
266
267 if (!IsLayerSupported(__func__,
268 armnn::IsPadSupported,
269 data.m_Compute,
270 inputInfo,
271 outputInfo,
272 descriptor))
273 {
274 return false;
275 }
276
277 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
278 assert(layer != nullptr);
279 input.Connect(layer->GetInputSlot(0));
280 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
281
282 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
283}
284
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000285bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
286{
287 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
288
289 if (!input.IsValid())
290 {
291 return Fail("%s: Operation has invalid inputs", __func__);
292 }
293
294 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
295 unsigned int rank = inputInfo.GetNumDimensions();
296 unsigned int spatialDim = rank - 2;
297
298 if (rank != 4)
299 {
300 Fail("%s: Only inputs with rank 4 are supported", __func__);
301 }
302
303 armnn::SpaceToBatchNdDescriptor descriptor;
304 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
305
306 const Operand* blockShapeOperand = GetInputOperand(operation, 1, model);
307 const Operand* paddingsOperand = GetInputOperand(operation, 2, model);
308
309 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
310 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
311 {
312 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
313 }
314
315 std::vector<int32_t> blockShape;
316 GetTensorInt32Values(*blockShapeOperand, blockShape, model, data);
317 for (unsigned int i = 0; i < blockShape.size(); i++)
318 {
319 if (blockShape[i] < 1)
320 {
321 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
322 }
323
324 descriptor.m_BlockShape.push_back((unsigned int) blockShape[i]);
325 }
326
327 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
328 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
329 {
330 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
331 }
332
333 std::vector<int32_t> paddings;
334 GetTensorInt32Values(*paddingsOperand, paddings, model, data);
335 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
336 {
337 int paddingBeforeInput = paddings[i];
338 int paddingAfterInput = paddings[i + 1];
339 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
340 {
341 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
342 }
343
344 descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
345 }
346
347 const Operand* output = GetOutputOperand(operation, 0, model);
348 if (!output)
349 {
350 return Fail("%s: Could not read output 0", __func__);
351 }
352
353 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
354 if (!IsLayerSupported(__func__,
355 armnn::IsSpaceToBatchNdSupported,
356 data.m_Compute,
357 inputInfo,
358 outputInfo,
359 descriptor))
360 {
361 return false;
362 }
363
364 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
365 assert(layer != nullptr);
366 input.Connect(layer->GetInputSlot(0));
367
368 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
369}
370
saoste01b8471482018-10-10 09:44:51 +0100371bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
372{
saoste01b8471482018-10-10 09:44:51 +0100373 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
374
375 if (!input.IsValid())
376 {
377 return Fail("%s: Operation has invalid inputs", __func__);
378 }
379
380 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
381
382 unsigned int rank = inputInfo.GetNumDimensions();
saoste01fe463152018-10-18 17:49:56 +0100383 if (rank > 4)
saoste01b8471482018-10-10 09:44:51 +0100384 {
saoste01fe463152018-10-18 17:49:56 +0100385 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
saoste01b8471482018-10-10 09:44:51 +0100386 }
387
388 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
389 // if the operand index is out of bounds.
390 const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
391
saoste01fe463152018-10-18 17:49:56 +0100392 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
393
saoste01b8471482018-10-10 09:44:51 +0100394 std::vector<int32_t> axis;
saoste01fe463152018-10-18 17:49:56 +0100395 if (!axisOperand)
saoste01b8471482018-10-10 09:44:51 +0100396 {
397 axis.assign(dimensionSequence,
saoste01fe463152018-10-18 17:49:56 +0100398 dimensionSequence + rank);
saoste01b8471482018-10-10 09:44:51 +0100399 }
400 else
401 {
402 GetTensorInt32Values(*axisOperand, axis, model, data);
403 }
404
saoste01b8471482018-10-10 09:44:51 +0100405
saoste01a893efa2018-10-13 11:56:12 +0100406 std::vector<uint32_t> outputDims;
saoste01fe463152018-10-18 17:49:56 +0100407 for (unsigned int i = 0; i < rank; i++)
saoste01a893efa2018-10-13 11:56:12 +0100408 {
409 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
410 auto currentDimension = inputInfo.GetShape()[i];
saoste01b8471482018-10-10 09:44:51 +0100411 if (skipSqueeze || currentDimension != 1)
412 {
413 outputDims.push_back(currentDimension);
414 }
415 }
416
saoste01fe463152018-10-18 17:49:56 +0100417 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
saoste01b8471482018-10-10 09:44:51 +0100418
419 armnn::TensorInfo outputInfo = inputInfo;
420 outputInfo.SetShape(outShape);
421
422 armnn::ReshapeDescriptor reshapeDesc;
423 reshapeDesc.m_TargetShape = outputInfo.GetShape();
424
425 const Operand* output = GetOutputOperand(operation, 0, model);
426 if (!output)
427 {
428 return Fail("%s: Could not read output 0", __func__);
429 }
430
431 if (!IsLayerSupported(__func__,
432 armnn::IsReshapeSupported,
433 data.m_Compute,
434 inputInfo))
435 {
436 return false;
437 }
438
439 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
440 assert(layer != nullptr);
441 input.Connect(layer->GetInputSlot(0));
saoste01fe463152018-10-18 17:49:56 +0100442
443 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
444}
445
446bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
447{
448 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
449
450 if (!input.IsValid())
451 {
452 return Fail("%s: Operation has invalid inputs", __func__);
453 }
454
455 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
456
457 unsigned int rank = inputInfo.GetNumDimensions();
458 if (rank > 4)
459 {
460 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
461 }
462
463 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
464 // if the operand index is out of bounds.
465 const Operand* permOperand = GetInputOperand(operation, 1, model, false);
466
467 std::vector<int32_t> perm(rank);
468 if (!permOperand)
469 {
470 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
471 for (unsigned int i = rank; i > 0; i--)
472 {
473 perm[rank - i] = boost::numeric_cast<int> (i - 1);
474 }
475 }
476 else
477 {
478 GetTensorInt32Values(*permOperand, perm, model, data);
479 }
480
481 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
482
483 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
484 if (!permutationVector.IsEqual(NHWCToArmNN)
485 && !permutationVector.IsEqual(ArmNNToNHWC)
486 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
487 {
488 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
489 }
490
491 armnn::PermuteDescriptor permuteDesc;
492 permuteDesc.m_DimMappings = permutationVector;
493
494 const Operand* output = GetOutputOperand(operation, 0, model);
495 if (!output)
496 {
497 return Fail("%s: Could not read output 0", __func__);
498 }
499
500 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
501
502 if (!IsLayerSupported(__func__,
503 armnn::IsPermuteSupported,
504 data.m_Compute,
505 inputInfo,
506 outputInfo,
507 permuteDesc))
508 {
509 return false;
510 }
511
512 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
513 assert(layer != nullptr);
514 input.Connect(layer->GetInputSlot(0));
saoste01b8471482018-10-10 09:44:51 +0100515
516 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
517}
518
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000519bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
520{
521 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
522 if (!input.IsValid())
523 {
524 return Fail("%s: Operation has invalid inputs", __func__);
525 }
526
527 const Operand* blockOperand = GetInputOperand(operation, 1, model);
528 if (!blockOperand)
529 {
530 return Fail("%s: Could not read input 1", __func__);
531 }
532
533 // Convert the block operand to int32
534 std::vector<int32_t> block;
535 if (!GetTensorInt32Values(*blockOperand, block, model, data))
536 {
537 return Fail("%s: Input 1 has invalid values", __func__);
538 }
539
540 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
541
542 unsigned int rank = inputInfo.GetNumDimensions();
543 if (rank != 4)
544 {
545 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
546 }
547
548 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
549 {
550 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
551 " greater than or equal to 1", __func__);
552 }
553
554 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
555 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
556 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
557
558 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
559 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
560
561 const Operand* output = GetOutputOperand(operation, 0, model);
562 if (!output)
563 {
564 return Fail("%s: Could not read output 0", __func__);
565 }
566
567 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
568
569 if (!IsLayerSupported(__func__,
570 armnn::IsBatchToSpaceNdSupported,
571 data.m_Compute,
572 inputInfo,
573 outputInfo,
574 batchToSpaceNdDesc))
575 {
576 return false;
577 }
578
579 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
580 assert(layer != nullptr);
581 input.Connect(layer->GetInputSlot(0));
582
583 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
584}
585
586
arovir01b0717b52018-09-05 17:03:25 +0100587} // namespace hal_1_1
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000588} // namespace armnn_driver