blob: 9acd01c60efb4b2697ffe6c0f382fa7f94c8d8ee [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
8#include "../1.0/HalPolicy.hpp"
9
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010010namespace
11{
12static std::vector<V1_0::OperationType> opsEquivalentInV10({
13 V1_0::OperationType::ADD,
14 V1_0::OperationType::AVERAGE_POOL_2D,
15 V1_0::OperationType::CONCATENATION,
16 V1_0::OperationType::CONV_2D,
17 V1_0::OperationType::DEPTHWISE_CONV_2D,
18 V1_0::OperationType::FLOOR,
19 V1_0::OperationType::FULLY_CONNECTED,
20 V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION,
21 V1_0::OperationType::LOGISTIC,
22 V1_0::OperationType::LSTM,
23 V1_0::OperationType::L2_NORMALIZATION,
24 V1_0::OperationType::L2_POOL_2D,
25 V1_0::OperationType::MAX_POOL_2D,
26 V1_0::OperationType::MUL,
27 V1_0::OperationType::RELU,
28 V1_0::OperationType::RELU1,
29 V1_0::OperationType::RELU6,
30 V1_0::OperationType::SOFTMAX,
31 V1_0::OperationType::TANH,
32 V1_0::OperationType::RESHAPE,
33 V1_0::OperationType::RESIZE_BILINEAR,
34});
35
36bool CompliantWithVersion10(const V1_1::Operation & operation)
37{
38 std::vector<V1_0::OperationType>::iterator it;
39 it = std::find(opsEquivalentInV10.begin(), opsEquivalentInV10.end(),
40 static_cast<V1_0::OperationType>(operation.type));
41
42 if(it != opsEquivalentInV10.end())
43 {
44 return true;
45 }
46 return false;
47}
48
49V1_0::Operation ConvertOperationToVersion10(const V1_1::Operation & operation)
50{
51 V1_0::Operation v10Operation;
52 v10Operation.type = static_cast<V1_0::OperationType>(operation.type);
53 v10Operation.inputs = operation.inputs;
54 v10Operation.outputs = operation.outputs;
55 return v10Operation;
56}
57}
58
arovir01b0717b52018-09-05 17:03:25 +010059namespace armnn_driver
60{
61namespace hal_1_1
62{
63
64bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
65{
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010066 if (CompliantWithVersion10(operation))
arovir01b0717b52018-09-05 17:03:25 +010067 {
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010068 hal_1_0::HalPolicy::Operation v10Operation = ConvertOperationToVersion10(operation);
arovir01b0717b52018-09-05 17:03:25 +010069 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
70
71 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
72 }
73 else
74 {
75 switch (operation.type)
76 {
77 case V1_1::OperationType::DIV:
78 return ConvertDiv(operation, model, data);
David Beck38e12942018-09-12 16:02:24 +010079 case V1_1::OperationType::SUB:
80 return ConvertSub(operation, model, data);
narpra013c052562018-09-17 14:25:04 +010081 case V1_1::OperationType::MEAN:
82 return ConvertMean(operation, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +010083 case V1_1::OperationType::PAD:
84 return ConvertPad(operation, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +000085 case V1_1::OperationType::SPACE_TO_BATCH_ND:
86 return ConvertSpaceToBatchNd(operation, model, data);
saoste01b8471482018-10-10 09:44:51 +010087 case V1_1::OperationType::SQUEEZE:
88 return ConvertSqueeze(operation, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +000089 case V1_1::OperationType::STRIDED_SLICE:
90 return ConvertStridedSlice(operation, model, data);
saoste01fe463152018-10-18 17:49:56 +010091 case V1_1::OperationType::TRANSPOSE:
92 return ConvertTranspose(operation, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +000093 case V1_1::OperationType::BATCH_TO_SPACE_ND:
94 return ConvertBatchToSpaceNd(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010095 default:
96 return Fail("%s: Operation type %s not supported in ArmnnDriver",
97 __func__, toString(operation.type).c_str());
98 }
99 }
100}
101
102bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
103{
104 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
105 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
106
107 if (!input0.IsValid() || !input1.IsValid())
108 {
109 return Fail("%s: Operation has invalid inputs", __func__);
110 }
111
112 // The FuseActivation parameter is always the input index 2
113 // and it should be optional
114 ActivationFn activationFunction;
115 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
116 {
117 return Fail("%s: Operation has invalid inputs", __func__);
118 }
119
120 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
121 if (!outputOperand)
122 {
123 return false;
124 }
125
126 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
127
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100128 if (!IsLayerSupportedForAnyBackend(__func__,
129 armnn::IsDivisionSupported,
130 data.m_Backends,
131 input0.GetTensorInfo(),
132 input1.GetTensorInfo(),
133 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100134 {
135 return false;
136 }
137
138 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
139 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
140
141 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
142 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
143
144 if (endLayer)
145 {
146 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
147 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
148 }
149
150 return Fail("%s: ProcessActivation failed", __func__);
151}
152
David Beck38e12942018-09-12 16:02:24 +0100153bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
154{
155 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
156 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
157
158 if (!input0.IsValid() || !input1.IsValid())
159 {
160 return Fail("%s: Operation has invalid inputs", __func__);
161 }
162
163 // The FuseActivation parameter is always the input index 2
164 // and it should be optional
165 ActivationFn activationFunction;
166 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
167 {
168 return Fail("%s: Operation has invalid inputs", __func__);
169 }
170
171 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
172 if (!outputOperand)
173 {
174 return false;
175 }
176
177 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
178
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100179 if (!IsLayerSupportedForAnyBackend(__func__,
180 armnn::IsSubtractionSupported,
181 data.m_Backends,
182 input0.GetTensorInfo(),
183 input1.GetTensorInfo(),
184 outInfo))
David Beck38e12942018-09-12 16:02:24 +0100185 {
186 return false;
187 }
188
189 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
190 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
191
192 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
193 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
194
195 if (endLayer)
196 {
197 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
198 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
199 }
200
201 return Fail("%s: ProcessActivation failed", __func__);
202}
203
narpra013c052562018-09-17 14:25:04 +0100204bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
205{
206 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
narpra013c052562018-09-17 14:25:04 +0100207 if (!input.IsValid())
208 {
209 return Fail("%s: Operation has invalid inputs", __func__);
210 }
211
Matteo Martincighae622b72018-10-23 18:25:38 +0100212 const Operand* axisOperand = GetInputOperand(operation, 1, model);
213 if (!axisOperand)
214 {
215 return Fail("%s: Could not read input 1", __func__);
216 }
217
218 std::vector<int32_t> axis;
219 if (!GetTensorInt32Values(*axisOperand, axis, model, data))
220 {
221 return Fail("%s: Input 1 has invalid values", __func__);
222 }
223
224 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
225
226 // Convert the axis to unsigned int and remove duplicates.
227 unsigned int rank = inputInfo.GetNumDimensions();
228 std::set<unsigned int> uniqueAxis;
229 std::transform(axis.begin(), axis.end(),
230 std::inserter(uniqueAxis, uniqueAxis.begin()),
231 [rank](int i) -> unsigned int { return (i + rank) % rank; });
232
233 // Get the "keep dims" flag.
234 int32_t keepDims = 0;
235 if (!GetInputInt32(operation, 2, keepDims, model, data))
236 {
237 return Fail("%s: Could not read input 2", __func__);
238 }
narpra013c052562018-09-17 14:25:04 +0100239
240 armnn::MeanDescriptor descriptor;
Matteo Martincighae622b72018-10-23 18:25:38 +0100241 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
242 descriptor.m_KeepDims = keepDims > 0;
narpra013c052562018-09-17 14:25:04 +0100243
244 const Operand* output = GetOutputOperand(operation, 0, model);
245 if (!output)
246 {
247 return Fail("%s: Could not read output 0", __func__);
248 }
249
250 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
251
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100252 if (!IsLayerSupportedForAnyBackend(__func__,
253 armnn::IsMeanSupported,
254 data.m_Backends,
255 inputInfo,
256 outputInfo,
257 descriptor))
narpra013c052562018-09-17 14:25:04 +0100258 {
259 return false;
260 }
261
262 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
narpra0196bedf02018-09-26 16:57:28 +0100263 assert(layer != nullptr);
264 input.Connect(layer->GetInputSlot(0));
narpra013c052562018-09-17 14:25:04 +0100265
266 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
267}
268
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100269bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
270{
271 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
272
273 if (!input.IsValid())
274 {
275 return Fail("%s: Operation has invalid inputs", __func__);
276 }
277
278 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
279
280 const Operand* paddingsOperand = GetInputOperand(operation, 1, model);
281
282 if (!paddingsOperand)
283 {
284 return Fail("%s: Could not read paddings operand", __func__);
285 }
286
287 unsigned int rank = inputInfo.GetNumDimensions();
288 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
Éanna Ó Catháin074f1ec2019-01-14 16:18:49 +0000289 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != rank * 2)
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100290 {
291 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, rank);
292 }
293
294 std::vector<int32_t> paddings;
295 GetTensorInt32Values(*paddingsOperand, paddings, model, data);
296
297 // add padding for each dimension of input tensor.
298 armnn::PadDescriptor descriptor;
299 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
300 {
301 int paddingBeforeInput = paddings[i];
302 int paddingAfterInput = paddings[i + 1];
303 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
304 {
305 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
306 }
307 descriptor.m_PadList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
308 }
309
310 const Operand* output = GetOutputOperand(operation, 0, model);
311 if (!output)
312 {
313 return Fail("%s: Could not read output 0", __func__);
314 }
315
316 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
317
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100318 if (!IsLayerSupportedForAnyBackend(__func__,
319 armnn::IsPadSupported,
320 data.m_Backends,
321 inputInfo,
322 outputInfo,
323 descriptor))
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100324 {
325 return false;
326 }
327
328 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
329 assert(layer != nullptr);
330 input.Connect(layer->GetInputSlot(0));
331 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
332
333 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
334}
335
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000336bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
337{
338 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
339
340 if (!input.IsValid())
341 {
342 return Fail("%s: Operation has invalid inputs", __func__);
343 }
344
345 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
346 unsigned int rank = inputInfo.GetNumDimensions();
347 unsigned int spatialDim = rank - 2;
348
349 if (rank != 4)
350 {
351 Fail("%s: Only inputs with rank 4 are supported", __func__);
352 }
353
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000354 const Operand* blockShapeOperand = GetInputOperand(operation, 1, model);
355 const Operand* paddingsOperand = GetInputOperand(operation, 2, model);
356
357 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
358 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
359 {
360 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
361 }
362
363 std::vector<int32_t> blockShape;
364 GetTensorInt32Values(*blockShapeOperand, blockShape, model, data);
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000365 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000366 {
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000367 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000368 }
369
370 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
371 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
372 {
373 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
374 }
375
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000376 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000377 std::vector<int32_t> paddings;
378 GetTensorInt32Values(*paddingsOperand, paddings, model, data);
379 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
380 {
381 int paddingBeforeInput = paddings[i];
382 int paddingAfterInput = paddings[i + 1];
383 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
384 {
385 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
386 }
387
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000388 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000389 }
390
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000391 armnn::SpaceToBatchNdDescriptor descriptor;
392 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
393 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
394 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
395
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000396 const Operand* output = GetOutputOperand(operation, 0, model);
397 if (!output)
398 {
399 return Fail("%s: Could not read output 0", __func__);
400 }
401
402 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100403 if (!IsLayerSupportedForAnyBackend(__func__,
404 armnn::IsSpaceToBatchNdSupported,
405 data.m_Backends,
406 inputInfo,
407 outputInfo,
408 descriptor))
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000409 {
410 return false;
411 }
412
413 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
414 assert(layer != nullptr);
415 input.Connect(layer->GetInputSlot(0));
416
417 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
418}
419
saoste01b8471482018-10-10 09:44:51 +0100420bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
421{
saoste01b8471482018-10-10 09:44:51 +0100422 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
423
424 if (!input.IsValid())
425 {
426 return Fail("%s: Operation has invalid inputs", __func__);
427 }
428
429 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
430
431 unsigned int rank = inputInfo.GetNumDimensions();
saoste01fe463152018-10-18 17:49:56 +0100432 if (rank > 4)
saoste01b8471482018-10-10 09:44:51 +0100433 {
saoste01fe463152018-10-18 17:49:56 +0100434 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
saoste01b8471482018-10-10 09:44:51 +0100435 }
436
437 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
438 // if the operand index is out of bounds.
439 const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
440
saoste01fe463152018-10-18 17:49:56 +0100441 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
442
saoste01b8471482018-10-10 09:44:51 +0100443 std::vector<int32_t> axis;
saoste01fe463152018-10-18 17:49:56 +0100444 if (!axisOperand)
saoste01b8471482018-10-10 09:44:51 +0100445 {
446 axis.assign(dimensionSequence,
saoste01fe463152018-10-18 17:49:56 +0100447 dimensionSequence + rank);
saoste01b8471482018-10-10 09:44:51 +0100448 }
449 else
450 {
451 GetTensorInt32Values(*axisOperand, axis, model, data);
452 }
453
saoste01b8471482018-10-10 09:44:51 +0100454
saoste01a893efa2018-10-13 11:56:12 +0100455 std::vector<uint32_t> outputDims;
saoste01fe463152018-10-18 17:49:56 +0100456 for (unsigned int i = 0; i < rank; i++)
saoste01a893efa2018-10-13 11:56:12 +0100457 {
458 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
459 auto currentDimension = inputInfo.GetShape()[i];
saoste01b8471482018-10-10 09:44:51 +0100460 if (skipSqueeze || currentDimension != 1)
461 {
462 outputDims.push_back(currentDimension);
463 }
464 }
465
saoste01fe463152018-10-18 17:49:56 +0100466 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
saoste01b8471482018-10-10 09:44:51 +0100467
468 armnn::TensorInfo outputInfo = inputInfo;
469 outputInfo.SetShape(outShape);
470
471 armnn::ReshapeDescriptor reshapeDesc;
472 reshapeDesc.m_TargetShape = outputInfo.GetShape();
473
474 const Operand* output = GetOutputOperand(operation, 0, model);
475 if (!output)
476 {
477 return Fail("%s: Could not read output 0", __func__);
478 }
479
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100480 if (!IsLayerSupportedForAnyBackend(__func__,
481 armnn::IsReshapeSupported,
482 data.m_Backends,
483 inputInfo,
484 reshapeDesc))
saoste01b8471482018-10-10 09:44:51 +0100485 {
486 return false;
487 }
488
489 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
490 assert(layer != nullptr);
491 input.Connect(layer->GetInputSlot(0));
saoste01fe463152018-10-18 17:49:56 +0100492
493 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
494}
495
Sadik Armagan758eee82018-11-15 15:34:49 +0000496bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
497{
498 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
499 if (!input.IsValid())
500 {
501 return Fail("%s: Operation has invalid inputs", __func__);
502 }
503 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
504
505 unsigned int rank = inputInfo.GetNumDimensions();
506 if (rank > 4)
507 {
508 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
509 }
510
511 const Operand* beginOperand = GetInputOperand(operation, 1, model);
512 const Operand* endOperand = GetInputOperand(operation, 2, model);
513 const Operand* stridesOperand = GetInputOperand(operation, 3, model);
514
515 std::vector<int32_t> beginValues;
516 std::vector<int32_t> endValues;
517 std::vector<int32_t> stridesValues;
518
519 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
520 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
521 {
522 if (!GetTensorInt32Values(operand, operandValues, model, data))
523 {
524 return false;
525 }
526
527 if (operandValues.size() != rank)
528 {
529 return false;
530 }
531
532 return true;
533 };
534
535 if (!ValidateInputOperands(*beginOperand, beginValues)
536 || !ValidateInputOperands(*endOperand, endValues)
537 || !ValidateInputOperands(*stridesOperand, stridesValues))
538 {
539 return Fail("%s: Operation has invalid input operand", __func__);
540 }
541
542 // Stride cannot have value '0'
543 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
544 {
545 return Fail("%s: Stride must be non-zero value.", __func__);
546 }
547
548 armnn::StridedSliceDescriptor descriptor;
549 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
550 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
551 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
552 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
553
554 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
555 if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data)
556 || !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data)
557 || !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
558 {
559 return Fail("%s: Operation has invalid inputs", __func__);
560 }
561
562 const Operand* output = GetOutputOperand(operation, 0, model);
563 if (!output)
564 {
565 return Fail("%s: Could not read output 0", __func__);
566 }
567 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
568
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100569 if (!IsLayerSupportedForAnyBackend(__func__,
570 armnn::IsStridedSliceSupported,
571 data.m_Backends,
572 inputInfo,
573 outputInfo,
574 descriptor))
Sadik Armagan758eee82018-11-15 15:34:49 +0000575 {
576 return false;
577 }
578
579 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
580 assert(layer != nullptr);
581 input.Connect(layer->GetInputSlot(0));
582
583 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
584}
585
saoste01fe463152018-10-18 17:49:56 +0100586bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
587{
588 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
589
590 if (!input.IsValid())
591 {
592 return Fail("%s: Operation has invalid inputs", __func__);
593 }
594
595 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
596
597 unsigned int rank = inputInfo.GetNumDimensions();
598 if (rank > 4)
599 {
600 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
601 }
602
603 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
604 // if the operand index is out of bounds.
605 const Operand* permOperand = GetInputOperand(operation, 1, model, false);
606
607 std::vector<int32_t> perm(rank);
608 if (!permOperand)
609 {
610 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
611 for (unsigned int i = rank; i > 0; i--)
612 {
613 perm[rank - i] = boost::numeric_cast<int> (i - 1);
614 }
615 }
616 else
617 {
618 GetTensorInt32Values(*permOperand, perm, model, data);
619 }
620
621 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
622
623 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
624 if (!permutationVector.IsEqual(NHWCToArmNN)
625 && !permutationVector.IsEqual(ArmNNToNHWC)
626 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
627 {
628 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
629 }
630
631 armnn::PermuteDescriptor permuteDesc;
632 permuteDesc.m_DimMappings = permutationVector;
633
634 const Operand* output = GetOutputOperand(operation, 0, model);
635 if (!output)
636 {
637 return Fail("%s: Could not read output 0", __func__);
638 }
639
640 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
641
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100642 if (!IsLayerSupportedForAnyBackend(__func__,
643 armnn::IsPermuteSupported,
644 data.m_Backends,
645 inputInfo,
646 outputInfo,
647 permuteDesc))
saoste01fe463152018-10-18 17:49:56 +0100648 {
649 return false;
650 }
651
652 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
653 assert(layer != nullptr);
654 input.Connect(layer->GetInputSlot(0));
saoste01b8471482018-10-10 09:44:51 +0100655
656 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
657}
658
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000659bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
660{
661 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
662 if (!input.IsValid())
663 {
664 return Fail("%s: Operation has invalid inputs", __func__);
665 }
666
667 const Operand* blockOperand = GetInputOperand(operation, 1, model);
668 if (!blockOperand)
669 {
670 return Fail("%s: Could not read input 1", __func__);
671 }
672
673 // Convert the block operand to int32
674 std::vector<int32_t> block;
675 if (!GetTensorInt32Values(*blockOperand, block, model, data))
676 {
677 return Fail("%s: Input 1 has invalid values", __func__);
678 }
679
680 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
681
682 unsigned int rank = inputInfo.GetNumDimensions();
683 if (rank != 4)
684 {
685 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
686 }
687
688 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
689 {
690 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
691 " greater than or equal to 1", __func__);
692 }
693
694 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
695 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
696 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
697
698 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
699 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
700
701 const Operand* output = GetOutputOperand(operation, 0, model);
702 if (!output)
703 {
704 return Fail("%s: Could not read output 0", __func__);
705 }
706
707 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
708
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100709 if (!IsLayerSupportedForAnyBackend(__func__,
710 armnn::IsBatchToSpaceNdSupported,
711 data.m_Backends,
712 inputInfo,
713 outputInfo,
714 batchToSpaceNdDesc))
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000715 {
716 return false;
717 }
718
719 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
720 assert(layer != nullptr);
721 input.Connect(layer->GetInputSlot(0));
722
723 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
724}
725
726
arovir01b0717b52018-09-05 17:03:25 +0100727} // namespace hal_1_1
Matteo Martincigh265d1ad2019-01-08 18:14:53 +0000728} // namespace armnn_driver