blob: 86f11188d9588169e3656d89a7f5df9d8f67f91e [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
8#include "../1.0/HalPolicy.hpp"
9
10namespace armnn_driver
11{
12namespace hal_1_1
13{
14
15bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
16{
17 if (compliantWithV1_0(operation))
18 {
19 hal_1_0::HalPolicy::Operation v10Operation = convertToV1_0(operation);
20 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
21
22 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
23 }
24 else
25 {
26 switch (operation.type)
27 {
28 case V1_1::OperationType::DIV:
29 return ConvertDiv(operation, model, data);
David Beck38e12942018-09-12 16:02:24 +010030 case V1_1::OperationType::SUB:
31 return ConvertSub(operation, model, data);
narpra013c052562018-09-17 14:25:04 +010032 case V1_1::OperationType::MEAN:
33 return ConvertMean(operation, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +010034 case V1_1::OperationType::PAD:
35 return ConvertPad(operation, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +000036 case V1_1::OperationType::SPACE_TO_BATCH_ND:
37 return ConvertSpaceToBatchNd(operation, model, data);
saoste01b8471482018-10-10 09:44:51 +010038 case V1_1::OperationType::SQUEEZE:
39 return ConvertSqueeze(operation, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +000040 case V1_1::OperationType::STRIDED_SLICE:
41 return ConvertStridedSlice(operation, model, data);
saoste01fe463152018-10-18 17:49:56 +010042 case V1_1::OperationType::TRANSPOSE:
43 return ConvertTranspose(operation, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +000044 case V1_1::OperationType::BATCH_TO_SPACE_ND:
45 return ConvertBatchToSpaceNd(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010046 default:
47 return Fail("%s: Operation type %s not supported in ArmnnDriver",
48 __func__, toString(operation.type).c_str());
49 }
50 }
51}
52
53bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
54{
55 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
56 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
57
58 if (!input0.IsValid() || !input1.IsValid())
59 {
60 return Fail("%s: Operation has invalid inputs", __func__);
61 }
62
63 // The FuseActivation parameter is always the input index 2
64 // and it should be optional
65 ActivationFn activationFunction;
66 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
67 {
68 return Fail("%s: Operation has invalid inputs", __func__);
69 }
70
71 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
72 if (!outputOperand)
73 {
74 return false;
75 }
76
77 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
78
79 if (!IsLayerSupported(__func__,
80 armnn::IsDivisionSupported,
81 data.m_Compute,
82 input0.GetTensorInfo(),
83 input1.GetTensorInfo(),
84 outInfo))
85 {
86 return false;
87 }
88
89 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
90 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
91
92 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
93 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
94
95 if (endLayer)
96 {
97 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
98 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
99 }
100
101 return Fail("%s: ProcessActivation failed", __func__);
102}
103
David Beck38e12942018-09-12 16:02:24 +0100104bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
105{
106 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
107 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
108
109 if (!input0.IsValid() || !input1.IsValid())
110 {
111 return Fail("%s: Operation has invalid inputs", __func__);
112 }
113
114 // The FuseActivation parameter is always the input index 2
115 // and it should be optional
116 ActivationFn activationFunction;
117 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
118 {
119 return Fail("%s: Operation has invalid inputs", __func__);
120 }
121
122 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
123 if (!outputOperand)
124 {
125 return false;
126 }
127
128 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
129
130 if (!IsLayerSupported(__func__,
131 armnn::IsSubtractionSupported,
132 data.m_Compute,
133 input0.GetTensorInfo(),
134 input1.GetTensorInfo(),
135 outInfo))
136 {
137 return false;
138 }
139
140 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
141 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
142
143 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
144 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
145
146 if (endLayer)
147 {
148 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
149 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
150 }
151
152 return Fail("%s: ProcessActivation failed", __func__);
153}
154
narpra013c052562018-09-17 14:25:04 +0100155bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
156{
157 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
narpra013c052562018-09-17 14:25:04 +0100158 if (!input.IsValid())
159 {
160 return Fail("%s: Operation has invalid inputs", __func__);
161 }
162
Matteo Martincighae622b72018-10-23 18:25:38 +0100163 const Operand* axisOperand = GetInputOperand(operation, 1, model);
164 if (!axisOperand)
165 {
166 return Fail("%s: Could not read input 1", __func__);
167 }
168
169 std::vector<int32_t> axis;
170 if (!GetTensorInt32Values(*axisOperand, axis, model, data))
171 {
172 return Fail("%s: Input 1 has invalid values", __func__);
173 }
174
175 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
176
177 // Convert the axis to unsigned int and remove duplicates.
178 unsigned int rank = inputInfo.GetNumDimensions();
179 std::set<unsigned int> uniqueAxis;
180 std::transform(axis.begin(), axis.end(),
181 std::inserter(uniqueAxis, uniqueAxis.begin()),
182 [rank](int i) -> unsigned int { return (i + rank) % rank; });
183
184 // Get the "keep dims" flag.
185 int32_t keepDims = 0;
186 if (!GetInputInt32(operation, 2, keepDims, model, data))
187 {
188 return Fail("%s: Could not read input 2", __func__);
189 }
narpra013c052562018-09-17 14:25:04 +0100190
191 armnn::MeanDescriptor descriptor;
Matteo Martincighae622b72018-10-23 18:25:38 +0100192 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
193 descriptor.m_KeepDims = keepDims > 0;
narpra013c052562018-09-17 14:25:04 +0100194
195 const Operand* output = GetOutputOperand(operation, 0, model);
196 if (!output)
197 {
198 return Fail("%s: Could not read output 0", __func__);
199 }
200
201 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
202
203 if (!IsLayerSupported(__func__,
204 armnn::IsMeanSupported,
205 data.m_Compute,
206 inputInfo,
207 outputInfo,
208 descriptor))
209 {
210 return false;
211 }
212
213 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
narpra0196bedf02018-09-26 16:57:28 +0100214 assert(layer != nullptr);
215 input.Connect(layer->GetInputSlot(0));
narpra013c052562018-09-17 14:25:04 +0100216
217 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
218}
219
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100220bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
221{
222 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
223
224 if (!input.IsValid())
225 {
226 return Fail("%s: Operation has invalid inputs", __func__);
227 }
228
229 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
230
231 const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
232
233 if (!paddingsOperand)
234 {
235 return Fail("%s: Could not read paddings operand", __func__);
236 }
237
238 unsigned int rank = inputInfo.GetNumDimensions();
239 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
240 if (paddingsOperandShape.GetNumDimensions() != rank || paddingsOperandShape.GetNumElements() != 2)
241 {
242 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
243 }
244
245 std::vector<int32_t> paddings;
246 GetTensorInt32Values(*paddingsOperand, paddings, model, data);
247
248 // add padding for each dimension of input tensor.
249 armnn::PadDescriptor descriptor;
250 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
251 {
252 int paddingBeforeInput = paddings[i];
253 int paddingAfterInput = paddings[i + 1];
254 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
255 {
256 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
257 }
258 descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
259 }
260
261 const Operand* output = GetOutputOperand(operation, 0, model);
262 if (!output)
263 {
264 return Fail("%s: Could not read output 0", __func__);
265 }
266
267 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
268
269 if (!IsLayerSupported(__func__,
270 armnn::IsPadSupported,
271 data.m_Compute,
272 inputInfo,
273 outputInfo,
274 descriptor))
275 {
276 return false;
277 }
278
279 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
280 assert(layer != nullptr);
281 input.Connect(layer->GetInputSlot(0));
282 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
283
284 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
285}
286
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000287bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
288{
289 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
290
291 if (!input.IsValid())
292 {
293 return Fail("%s: Operation has invalid inputs", __func__);
294 }
295
296 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
297 unsigned int rank = inputInfo.GetNumDimensions();
298 unsigned int spatialDim = rank - 2;
299
300 if (rank != 4)
301 {
302 Fail("%s: Only inputs with rank 4 are supported", __func__);
303 }
304
305 armnn::SpaceToBatchNdDescriptor descriptor;
306 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
307
308 const Operand* blockShapeOperand = GetInputOperand(operation, 1, model);
309 const Operand* paddingsOperand = GetInputOperand(operation, 2, model);
310
311 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
312 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
313 {
314 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
315 }
316
317 std::vector<int32_t> blockShape;
318 GetTensorInt32Values(*blockShapeOperand, blockShape, model, data);
319 for (unsigned int i = 0; i < blockShape.size(); i++)
320 {
321 if (blockShape[i] < 1)
322 {
323 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
324 }
325
326 descriptor.m_BlockShape.push_back((unsigned int) blockShape[i]);
327 }
328
329 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
330 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
331 {
332 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
333 }
334
335 std::vector<int32_t> paddings;
336 GetTensorInt32Values(*paddingsOperand, paddings, model, data);
337 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
338 {
339 int paddingBeforeInput = paddings[i];
340 int paddingAfterInput = paddings[i + 1];
341 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
342 {
343 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
344 }
345
346 descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
347 }
348
349 const Operand* output = GetOutputOperand(operation, 0, model);
350 if (!output)
351 {
352 return Fail("%s: Could not read output 0", __func__);
353 }
354
355 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
356 if (!IsLayerSupported(__func__,
357 armnn::IsSpaceToBatchNdSupported,
358 data.m_Compute,
359 inputInfo,
360 outputInfo,
361 descriptor))
362 {
363 return false;
364 }
365
366 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
367 assert(layer != nullptr);
368 input.Connect(layer->GetInputSlot(0));
369
370 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
371}
372
saoste01b8471482018-10-10 09:44:51 +0100373bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
374{
saoste01b8471482018-10-10 09:44:51 +0100375 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
376
377 if (!input.IsValid())
378 {
379 return Fail("%s: Operation has invalid inputs", __func__);
380 }
381
382 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
383
384 unsigned int rank = inputInfo.GetNumDimensions();
saoste01fe463152018-10-18 17:49:56 +0100385 if (rank > 4)
saoste01b8471482018-10-10 09:44:51 +0100386 {
saoste01fe463152018-10-18 17:49:56 +0100387 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
saoste01b8471482018-10-10 09:44:51 +0100388 }
389
390 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
391 // if the operand index is out of bounds.
392 const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
393
saoste01fe463152018-10-18 17:49:56 +0100394 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
395
saoste01b8471482018-10-10 09:44:51 +0100396 std::vector<int32_t> axis;
saoste01fe463152018-10-18 17:49:56 +0100397 if (!axisOperand)
saoste01b8471482018-10-10 09:44:51 +0100398 {
399 axis.assign(dimensionSequence,
saoste01fe463152018-10-18 17:49:56 +0100400 dimensionSequence + rank);
saoste01b8471482018-10-10 09:44:51 +0100401 }
402 else
403 {
404 GetTensorInt32Values(*axisOperand, axis, model, data);
405 }
406
saoste01b8471482018-10-10 09:44:51 +0100407
saoste01a893efa2018-10-13 11:56:12 +0100408 std::vector<uint32_t> outputDims;
saoste01fe463152018-10-18 17:49:56 +0100409 for (unsigned int i = 0; i < rank; i++)
saoste01a893efa2018-10-13 11:56:12 +0100410 {
411 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
412 auto currentDimension = inputInfo.GetShape()[i];
saoste01b8471482018-10-10 09:44:51 +0100413 if (skipSqueeze || currentDimension != 1)
414 {
415 outputDims.push_back(currentDimension);
416 }
417 }
418
saoste01fe463152018-10-18 17:49:56 +0100419 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
saoste01b8471482018-10-10 09:44:51 +0100420
421 armnn::TensorInfo outputInfo = inputInfo;
422 outputInfo.SetShape(outShape);
423
424 armnn::ReshapeDescriptor reshapeDesc;
425 reshapeDesc.m_TargetShape = outputInfo.GetShape();
426
427 const Operand* output = GetOutputOperand(operation, 0, model);
428 if (!output)
429 {
430 return Fail("%s: Could not read output 0", __func__);
431 }
432
433 if (!IsLayerSupported(__func__,
434 armnn::IsReshapeSupported,
435 data.m_Compute,
436 inputInfo))
437 {
438 return false;
439 }
440
441 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
442 assert(layer != nullptr);
443 input.Connect(layer->GetInputSlot(0));
saoste01fe463152018-10-18 17:49:56 +0100444
445 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
446}
447
Sadik Armagan758eee82018-11-15 15:34:49 +0000448bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
449{
450 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
451 if (!input.IsValid())
452 {
453 return Fail("%s: Operation has invalid inputs", __func__);
454 }
455 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
456
457 unsigned int rank = inputInfo.GetNumDimensions();
458 if (rank > 4)
459 {
460 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
461 }
462
463 const Operand* beginOperand = GetInputOperand(operation, 1, model);
464 const Operand* endOperand = GetInputOperand(operation, 2, model);
465 const Operand* stridesOperand = GetInputOperand(operation, 3, model);
466
467 std::vector<int32_t> beginValues;
468 std::vector<int32_t> endValues;
469 std::vector<int32_t> stridesValues;
470
471 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
472 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
473 {
474 if (!GetTensorInt32Values(operand, operandValues, model, data))
475 {
476 return false;
477 }
478
479 if (operandValues.size() != rank)
480 {
481 return false;
482 }
483
484 return true;
485 };
486
487 if (!ValidateInputOperands(*beginOperand, beginValues)
488 || !ValidateInputOperands(*endOperand, endValues)
489 || !ValidateInputOperands(*stridesOperand, stridesValues))
490 {
491 return Fail("%s: Operation has invalid input operand", __func__);
492 }
493
494 // Stride cannot have value '0'
495 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
496 {
497 return Fail("%s: Stride must be non-zero value.", __func__);
498 }
499
500 armnn::StridedSliceDescriptor descriptor;
501 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
502 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
503 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
504 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
505
506 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
507 if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data)
508 || !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data)
509 || !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
510 {
511 return Fail("%s: Operation has invalid inputs", __func__);
512 }
513
514 const Operand* output = GetOutputOperand(operation, 0, model);
515 if (!output)
516 {
517 return Fail("%s: Could not read output 0", __func__);
518 }
519 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
520
521 if (!IsLayerSupported(__func__,
522 armnn::IsStridedSliceSupported,
523 data.m_Compute,
524 inputInfo,
525 outputInfo,
526 descriptor))
527 {
528 return false;
529 }
530
531 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
532 assert(layer != nullptr);
533 input.Connect(layer->GetInputSlot(0));
534
535 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
536}
537
saoste01fe463152018-10-18 17:49:56 +0100538bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
539{
540 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
541
542 if (!input.IsValid())
543 {
544 return Fail("%s: Operation has invalid inputs", __func__);
545 }
546
547 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
548
549 unsigned int rank = inputInfo.GetNumDimensions();
550 if (rank > 4)
551 {
552 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
553 }
554
555 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
556 // if the operand index is out of bounds.
557 const Operand* permOperand = GetInputOperand(operation, 1, model, false);
558
559 std::vector<int32_t> perm(rank);
560 if (!permOperand)
561 {
562 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
563 for (unsigned int i = rank; i > 0; i--)
564 {
565 perm[rank - i] = boost::numeric_cast<int> (i - 1);
566 }
567 }
568 else
569 {
570 GetTensorInt32Values(*permOperand, perm, model, data);
571 }
572
573 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
574
575 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
576 if (!permutationVector.IsEqual(NHWCToArmNN)
577 && !permutationVector.IsEqual(ArmNNToNHWC)
578 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
579 {
580 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
581 }
582
583 armnn::PermuteDescriptor permuteDesc;
584 permuteDesc.m_DimMappings = permutationVector;
585
586 const Operand* output = GetOutputOperand(operation, 0, model);
587 if (!output)
588 {
589 return Fail("%s: Could not read output 0", __func__);
590 }
591
592 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
593
594 if (!IsLayerSupported(__func__,
595 armnn::IsPermuteSupported,
596 data.m_Compute,
597 inputInfo,
598 outputInfo,
599 permuteDesc))
600 {
601 return false;
602 }
603
604 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
605 assert(layer != nullptr);
606 input.Connect(layer->GetInputSlot(0));
saoste01b8471482018-10-10 09:44:51 +0100607
608 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
609}
610
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000611bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
612{
613 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
614 if (!input.IsValid())
615 {
616 return Fail("%s: Operation has invalid inputs", __func__);
617 }
618
619 const Operand* blockOperand = GetInputOperand(operation, 1, model);
620 if (!blockOperand)
621 {
622 return Fail("%s: Could not read input 1", __func__);
623 }
624
625 // Convert the block operand to int32
626 std::vector<int32_t> block;
627 if (!GetTensorInt32Values(*blockOperand, block, model, data))
628 {
629 return Fail("%s: Input 1 has invalid values", __func__);
630 }
631
632 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
633
634 unsigned int rank = inputInfo.GetNumDimensions();
635 if (rank != 4)
636 {
637 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
638 }
639
640 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
641 {
642 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
643 " greater than or equal to 1", __func__);
644 }
645
646 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
647 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
648 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
649
650 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
651 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
652
653 const Operand* output = GetOutputOperand(operation, 0, model);
654 if (!output)
655 {
656 return Fail("%s: Could not read output 0", __func__);
657 }
658
659 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
660
661 if (!IsLayerSupported(__func__,
662 armnn::IsBatchToSpaceNdSupported,
663 data.m_Compute,
664 inputInfo,
665 outputInfo,
666 batchToSpaceNdDesc))
667 {
668 return false;
669 }
670
671 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
672 assert(layer != nullptr);
673 input.Connect(layer->GetInputSlot(0));
674
675 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
676}
677
678
arovir01b0717b52018-09-05 17:03:25 +0100679} // namespace hal_1_1
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000680} // namespace armnn_driver