blob: 5530d310c598e956e791eb747f44807db0f26913 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
8#include "../1.0/HalPolicy.hpp"
9
10namespace armnn_driver
11{
12namespace hal_1_1
13{
14
15bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
16{
17 if (compliantWithV1_0(operation))
18 {
19 hal_1_0::HalPolicy::Operation v10Operation = convertToV1_0(operation);
20 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
21
22 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
23 }
24 else
25 {
26 switch (operation.type)
27 {
28 case V1_1::OperationType::DIV:
29 return ConvertDiv(operation, model, data);
David Beck38e12942018-09-12 16:02:24 +010030 case V1_1::OperationType::SUB:
31 return ConvertSub(operation, model, data);
narpra013c052562018-09-17 14:25:04 +010032 case V1_1::OperationType::MEAN:
33 return ConvertMean(operation, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +010034 case V1_1::OperationType::PAD:
35 return ConvertPad(operation, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +000036 case V1_1::OperationType::SPACE_TO_BATCH_ND:
37 return ConvertSpaceToBatchNd(operation, model, data);
saoste01b8471482018-10-10 09:44:51 +010038 case V1_1::OperationType::SQUEEZE:
39 return ConvertSqueeze(operation, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +000040 case V1_1::OperationType::STRIDED_SLICE:
41 return ConvertStridedSlice(operation, model, data);
saoste01fe463152018-10-18 17:49:56 +010042 case V1_1::OperationType::TRANSPOSE:
43 return ConvertTranspose(operation, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +000044 case V1_1::OperationType::BATCH_TO_SPACE_ND:
45 return ConvertBatchToSpaceNd(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010046 default:
47 return Fail("%s: Operation type %s not supported in ArmnnDriver",
48 __func__, toString(operation.type).c_str());
49 }
50 }
51}
52
53bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
54{
55 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
56 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
57
58 if (!input0.IsValid() || !input1.IsValid())
59 {
60 return Fail("%s: Operation has invalid inputs", __func__);
61 }
62
63 // The FuseActivation parameter is always the input index 2
64 // and it should be optional
65 ActivationFn activationFunction;
66 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
67 {
68 return Fail("%s: Operation has invalid inputs", __func__);
69 }
70
71 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
72 if (!outputOperand)
73 {
74 return false;
75 }
76
77 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
78
79 if (!IsLayerSupported(__func__,
80 armnn::IsDivisionSupported,
81 data.m_Compute,
82 input0.GetTensorInfo(),
83 input1.GetTensorInfo(),
84 outInfo))
85 {
86 return false;
87 }
88
89 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
90 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
91
92 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
93 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
94
95 if (endLayer)
96 {
97 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
98 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
99 }
100
101 return Fail("%s: ProcessActivation failed", __func__);
102}
103
David Beck38e12942018-09-12 16:02:24 +0100104bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
105{
106 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
107 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
108
109 if (!input0.IsValid() || !input1.IsValid())
110 {
111 return Fail("%s: Operation has invalid inputs", __func__);
112 }
113
114 // The FuseActivation parameter is always the input index 2
115 // and it should be optional
116 ActivationFn activationFunction;
117 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
118 {
119 return Fail("%s: Operation has invalid inputs", __func__);
120 }
121
122 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
123 if (!outputOperand)
124 {
125 return false;
126 }
127
128 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
129
130 if (!IsLayerSupported(__func__,
131 armnn::IsSubtractionSupported,
132 data.m_Compute,
133 input0.GetTensorInfo(),
134 input1.GetTensorInfo(),
135 outInfo))
136 {
137 return false;
138 }
139
140 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
141 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
142
143 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
144 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
145
146 if (endLayer)
147 {
148 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
149 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
150 }
151
152 return Fail("%s: ProcessActivation failed", __func__);
153}
154
narpra013c052562018-09-17 14:25:04 +0100155bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
156{
157 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
narpra013c052562018-09-17 14:25:04 +0100158 if (!input.IsValid())
159 {
160 return Fail("%s: Operation has invalid inputs", __func__);
161 }
162
Matteo Martincighae622b72018-10-23 18:25:38 +0100163 const Operand* axisOperand = GetInputOperand(operation, 1, model);
164 if (!axisOperand)
165 {
166 return Fail("%s: Could not read input 1", __func__);
167 }
168
169 std::vector<int32_t> axis;
170 if (!GetTensorInt32Values(*axisOperand, axis, model, data))
171 {
172 return Fail("%s: Input 1 has invalid values", __func__);
173 }
174
175 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
176
177 // Convert the axis to unsigned int and remove duplicates.
178 unsigned int rank = inputInfo.GetNumDimensions();
179 std::set<unsigned int> uniqueAxis;
180 std::transform(axis.begin(), axis.end(),
181 std::inserter(uniqueAxis, uniqueAxis.begin()),
182 [rank](int i) -> unsigned int { return (i + rank) % rank; });
183
184 // Get the "keep dims" flag.
185 int32_t keepDims = 0;
186 if (!GetInputInt32(operation, 2, keepDims, model, data))
187 {
188 return Fail("%s: Could not read input 2", __func__);
189 }
narpra013c052562018-09-17 14:25:04 +0100190
191 armnn::MeanDescriptor descriptor;
Matteo Martincighae622b72018-10-23 18:25:38 +0100192 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
193 descriptor.m_KeepDims = keepDims > 0;
narpra013c052562018-09-17 14:25:04 +0100194
195 const Operand* output = GetOutputOperand(operation, 0, model);
196 if (!output)
197 {
198 return Fail("%s: Could not read output 0", __func__);
199 }
200
201 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
202
203 if (!IsLayerSupported(__func__,
204 armnn::IsMeanSupported,
205 data.m_Compute,
206 inputInfo,
207 outputInfo,
208 descriptor))
209 {
210 return false;
211 }
212
213 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
narpra0196bedf02018-09-26 16:57:28 +0100214 assert(layer != nullptr);
215 input.Connect(layer->GetInputSlot(0));
narpra013c052562018-09-17 14:25:04 +0100216
217 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
218}
219
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100220bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
221{
222 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
223
224 if (!input.IsValid())
225 {
226 return Fail("%s: Operation has invalid inputs", __func__);
227 }
228
229 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
230
231 const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
232
233 if (!paddingsOperand)
234 {
235 return Fail("%s: Could not read paddings operand", __func__);
236 }
237
238 unsigned int rank = inputInfo.GetNumDimensions();
239 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
Éanna Ó Catháin074f1ec2019-01-14 16:18:49 +0000240 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100241 {
242 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
243 }
244
245 std::vector<int32_t> paddings;
246 GetTensorInt32Values(*paddingsOperand, paddings, model, data);
247
248 // add padding for each dimension of input tensor.
249 armnn::PadDescriptor descriptor;
250 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
251 {
252 int paddingBeforeInput = paddings[i];
253 int paddingAfterInput = paddings[i + 1];
254 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
255 {
256 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
257 }
258 descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
259 }
260
261 const Operand* output = GetOutputOperand(operation, 0, model);
262 if (!output)
263 {
264 return Fail("%s: Could not read output 0", __func__);
265 }
266
267 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
268
269 if (!IsLayerSupported(__func__,
270 armnn::IsPadSupported,
271 data.m_Compute,
272 inputInfo,
273 outputInfo,
274 descriptor))
275 {
276 return false;
277 }
278
279 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
280 assert(layer != nullptr);
281 input.Connect(layer->GetInputSlot(0));
282 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
283
284 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
285}
286
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000287bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
288{
289 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
290
291 if (!input.IsValid())
292 {
293 return Fail("%s: Operation has invalid inputs", __func__);
294 }
295
296 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
297 unsigned int rank = inputInfo.GetNumDimensions();
298 unsigned int spatialDim = rank - 2;
299
300 if (rank != 4)
301 {
302 Fail("%s: Only inputs with rank 4 are supported", __func__);
303 }
304
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000305 const Operand* blockShapeOperand = GetInputOperand(operation, 1, model);
306 const Operand* paddingsOperand = GetInputOperand(operation, 2, model);
307
308 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
309 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
310 {
311 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
312 }
313
314 std::vector<int32_t> blockShape;
315 GetTensorInt32Values(*blockShapeOperand, blockShape, model, data);
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000316 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000317 {
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000318 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000319 }
320
321 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
322 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
323 {
324 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
325 }
326
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000327 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000328 std::vector<int32_t> paddings;
329 GetTensorInt32Values(*paddingsOperand, paddings, model, data);
330 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
331 {
332 int paddingBeforeInput = paddings[i];
333 int paddingAfterInput = paddings[i + 1];
334 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
335 {
336 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
337 }
338
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000339 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000340 }
341
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000342 armnn::SpaceToBatchNdDescriptor descriptor;
343 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
344 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
345 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
346
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000347 const Operand* output = GetOutputOperand(operation, 0, model);
348 if (!output)
349 {
350 return Fail("%s: Could not read output 0", __func__);
351 }
352
353 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
354 if (!IsLayerSupported(__func__,
355 armnn::IsSpaceToBatchNdSupported,
356 data.m_Compute,
357 inputInfo,
358 outputInfo,
359 descriptor))
360 {
361 return false;
362 }
363
364 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
365 assert(layer != nullptr);
366 input.Connect(layer->GetInputSlot(0));
367
368 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
369}
370
saoste01b8471482018-10-10 09:44:51 +0100371bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
372{
saoste01b8471482018-10-10 09:44:51 +0100373 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
374
375 if (!input.IsValid())
376 {
377 return Fail("%s: Operation has invalid inputs", __func__);
378 }
379
380 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
381
382 unsigned int rank = inputInfo.GetNumDimensions();
saoste01fe463152018-10-18 17:49:56 +0100383 if (rank > 4)
saoste01b8471482018-10-10 09:44:51 +0100384 {
saoste01fe463152018-10-18 17:49:56 +0100385 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
saoste01b8471482018-10-10 09:44:51 +0100386 }
387
388 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
389 // if the operand index is out of bounds.
390 const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
391
saoste01fe463152018-10-18 17:49:56 +0100392 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
393
saoste01b8471482018-10-10 09:44:51 +0100394 std::vector<int32_t> axis;
saoste01fe463152018-10-18 17:49:56 +0100395 if (!axisOperand)
saoste01b8471482018-10-10 09:44:51 +0100396 {
397 axis.assign(dimensionSequence,
saoste01fe463152018-10-18 17:49:56 +0100398 dimensionSequence + rank);
saoste01b8471482018-10-10 09:44:51 +0100399 }
400 else
401 {
402 GetTensorInt32Values(*axisOperand, axis, model, data);
403 }
404
saoste01b8471482018-10-10 09:44:51 +0100405
saoste01a893efa2018-10-13 11:56:12 +0100406 std::vector<uint32_t> outputDims;
saoste01fe463152018-10-18 17:49:56 +0100407 for (unsigned int i = 0; i < rank; i++)
saoste01a893efa2018-10-13 11:56:12 +0100408 {
409 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
410 auto currentDimension = inputInfo.GetShape()[i];
saoste01b8471482018-10-10 09:44:51 +0100411 if (skipSqueeze || currentDimension != 1)
412 {
413 outputDims.push_back(currentDimension);
414 }
415 }
416
saoste01fe463152018-10-18 17:49:56 +0100417 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
saoste01b8471482018-10-10 09:44:51 +0100418
419 armnn::TensorInfo outputInfo = inputInfo;
420 outputInfo.SetShape(outShape);
421
422 armnn::ReshapeDescriptor reshapeDesc;
423 reshapeDesc.m_TargetShape = outputInfo.GetShape();
424
425 const Operand* output = GetOutputOperand(operation, 0, model);
426 if (!output)
427 {
428 return Fail("%s: Could not read output 0", __func__);
429 }
430
431 if (!IsLayerSupported(__func__,
432 armnn::IsReshapeSupported,
433 data.m_Compute,
Matteo Martincigh265d1ad2019-01-08 18:14:53 +0000434 inputInfo,
435 reshapeDesc))
saoste01b8471482018-10-10 09:44:51 +0100436 {
437 return false;
438 }
439
440 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
441 assert(layer != nullptr);
442 input.Connect(layer->GetInputSlot(0));
saoste01fe463152018-10-18 17:49:56 +0100443
444 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
445}
446
Sadik Armagan758eee82018-11-15 15:34:49 +0000447bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
448{
449 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
450 if (!input.IsValid())
451 {
452 return Fail("%s: Operation has invalid inputs", __func__);
453 }
454 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
455
456 unsigned int rank = inputInfo.GetNumDimensions();
457 if (rank > 4)
458 {
459 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
460 }
461
462 const Operand* beginOperand = GetInputOperand(operation, 1, model);
463 const Operand* endOperand = GetInputOperand(operation, 2, model);
464 const Operand* stridesOperand = GetInputOperand(operation, 3, model);
465
466 std::vector<int32_t> beginValues;
467 std::vector<int32_t> endValues;
468 std::vector<int32_t> stridesValues;
469
470 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
471 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
472 {
473 if (!GetTensorInt32Values(operand, operandValues, model, data))
474 {
475 return false;
476 }
477
478 if (operandValues.size() != rank)
479 {
480 return false;
481 }
482
483 return true;
484 };
485
486 if (!ValidateInputOperands(*beginOperand, beginValues)
487 || !ValidateInputOperands(*endOperand, endValues)
488 || !ValidateInputOperands(*stridesOperand, stridesValues))
489 {
490 return Fail("%s: Operation has invalid input operand", __func__);
491 }
492
493 // Stride cannot have value '0'
494 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
495 {
496 return Fail("%s: Stride must be non-zero value.", __func__);
497 }
498
499 armnn::StridedSliceDescriptor descriptor;
500 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
501 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
502 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
503 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
504
505 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
506 if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data)
507 || !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data)
508 || !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
509 {
510 return Fail("%s: Operation has invalid inputs", __func__);
511 }
512
513 const Operand* output = GetOutputOperand(operation, 0, model);
514 if (!output)
515 {
516 return Fail("%s: Could not read output 0", __func__);
517 }
518 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
519
520 if (!IsLayerSupported(__func__,
521 armnn::IsStridedSliceSupported,
522 data.m_Compute,
523 inputInfo,
524 outputInfo,
525 descriptor))
526 {
527 return false;
528 }
529
530 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
531 assert(layer != nullptr);
532 input.Connect(layer->GetInputSlot(0));
533
534 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
535}
536
saoste01fe463152018-10-18 17:49:56 +0100537bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
538{
539 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
540
541 if (!input.IsValid())
542 {
543 return Fail("%s: Operation has invalid inputs", __func__);
544 }
545
546 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
547
548 unsigned int rank = inputInfo.GetNumDimensions();
549 if (rank > 4)
550 {
551 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
552 }
553
554 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
555 // if the operand index is out of bounds.
556 const Operand* permOperand = GetInputOperand(operation, 1, model, false);
557
558 std::vector<int32_t> perm(rank);
559 if (!permOperand)
560 {
561 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
562 for (unsigned int i = rank; i > 0; i--)
563 {
564 perm[rank - i] = boost::numeric_cast<int> (i - 1);
565 }
566 }
567 else
568 {
569 GetTensorInt32Values(*permOperand, perm, model, data);
570 }
571
572 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
573
574 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
575 if (!permutationVector.IsEqual(NHWCToArmNN)
576 && !permutationVector.IsEqual(ArmNNToNHWC)
577 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
578 {
579 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
580 }
581
582 armnn::PermuteDescriptor permuteDesc;
583 permuteDesc.m_DimMappings = permutationVector;
584
585 const Operand* output = GetOutputOperand(operation, 0, model);
586 if (!output)
587 {
588 return Fail("%s: Could not read output 0", __func__);
589 }
590
591 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
592
593 if (!IsLayerSupported(__func__,
594 armnn::IsPermuteSupported,
595 data.m_Compute,
596 inputInfo,
597 outputInfo,
598 permuteDesc))
599 {
600 return false;
601 }
602
603 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
604 assert(layer != nullptr);
605 input.Connect(layer->GetInputSlot(0));
saoste01b8471482018-10-10 09:44:51 +0100606
607 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
608}
609
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000610bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
611{
612 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
613 if (!input.IsValid())
614 {
615 return Fail("%s: Operation has invalid inputs", __func__);
616 }
617
618 const Operand* blockOperand = GetInputOperand(operation, 1, model);
619 if (!blockOperand)
620 {
621 return Fail("%s: Could not read input 1", __func__);
622 }
623
624 // Convert the block operand to int32
625 std::vector<int32_t> block;
626 if (!GetTensorInt32Values(*blockOperand, block, model, data))
627 {
628 return Fail("%s: Input 1 has invalid values", __func__);
629 }
630
631 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
632
633 unsigned int rank = inputInfo.GetNumDimensions();
634 if (rank != 4)
635 {
636 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
637 }
638
639 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
640 {
641 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
642 " greater than or equal to 1", __func__);
643 }
644
645 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
646 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
647 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
648
649 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
650 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
651
652 const Operand* output = GetOutputOperand(operation, 0, model);
653 if (!output)
654 {
655 return Fail("%s: Could not read output 0", __func__);
656 }
657
658 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
659
660 if (!IsLayerSupported(__func__,
661 armnn::IsBatchToSpaceNdSupported,
662 data.m_Compute,
663 inputInfo,
664 outputInfo,
665 batchToSpaceNdDesc))
666 {
667 return false;
668 }
669
670 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
671 assert(layer != nullptr);
672 input.Connect(layer->GetInputSlot(0));
673
674 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
675}
676
677
arovir01b0717b52018-09-05 17:03:25 +0100678} // namespace hal_1_1
Matteo Martincigh265d1ad2019-01-08 18:14:53 +0000679} // namespace armnn_driver