blob: 78f157dd8f8d3dfb0fc4a2f908f497cb598bc1a6 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
8#include "../1.0/HalPolicy.hpp"
9
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010010namespace
11{
12static std::vector<V1_0::OperationType> opsEquivalentInV10({
13 V1_0::OperationType::ADD,
14 V1_0::OperationType::AVERAGE_POOL_2D,
15 V1_0::OperationType::CONCATENATION,
16 V1_0::OperationType::CONV_2D,
17 V1_0::OperationType::DEPTHWISE_CONV_2D,
David Monahand5bfae12019-05-30 12:07:44 +010018 V1_0::OperationType::DEQUANTIZE,
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010019 V1_0::OperationType::FLOOR,
20 V1_0::OperationType::FULLY_CONNECTED,
21 V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION,
22 V1_0::OperationType::LOGISTIC,
23 V1_0::OperationType::LSTM,
24 V1_0::OperationType::L2_NORMALIZATION,
25 V1_0::OperationType::L2_POOL_2D,
26 V1_0::OperationType::MAX_POOL_2D,
27 V1_0::OperationType::MUL,
28 V1_0::OperationType::RELU,
29 V1_0::OperationType::RELU1,
30 V1_0::OperationType::RELU6,
31 V1_0::OperationType::SOFTMAX,
Keith Davisa6bc52f2019-06-26 09:39:49 +010032 V1_0::OperationType::SPACE_TO_DEPTH,
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010033 V1_0::OperationType::TANH,
34 V1_0::OperationType::RESHAPE,
35 V1_0::OperationType::RESIZE_BILINEAR,
36});
37
38bool CompliantWithVersion10(const V1_1::Operation & operation)
39{
40 std::vector<V1_0::OperationType>::iterator it;
41 it = std::find(opsEquivalentInV10.begin(), opsEquivalentInV10.end(),
42 static_cast<V1_0::OperationType>(operation.type));
43
44 if(it != opsEquivalentInV10.end())
45 {
46 return true;
47 }
48 return false;
49}
50
51V1_0::Operation ConvertOperationToVersion10(const V1_1::Operation & operation)
52{
53 V1_0::Operation v10Operation;
54 v10Operation.type = static_cast<V1_0::OperationType>(operation.type);
55 v10Operation.inputs = operation.inputs;
56 v10Operation.outputs = operation.outputs;
57 return v10Operation;
58}
59}
60
arovir01b0717b52018-09-05 17:03:25 +010061namespace armnn_driver
62{
63namespace hal_1_1
64{
65
66bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
67{
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010068 if (CompliantWithVersion10(operation))
arovir01b0717b52018-09-05 17:03:25 +010069 {
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010070 hal_1_0::HalPolicy::Operation v10Operation = ConvertOperationToVersion10(operation);
arovir01b0717b52018-09-05 17:03:25 +010071 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
72
73 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
74 }
75 else
76 {
77 switch (operation.type)
78 {
79 case V1_1::OperationType::DIV:
80 return ConvertDiv(operation, model, data);
David Beck38e12942018-09-12 16:02:24 +010081 case V1_1::OperationType::SUB:
82 return ConvertSub(operation, model, data);
narpra013c052562018-09-17 14:25:04 +010083 case V1_1::OperationType::MEAN:
84 return ConvertMean(operation, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +010085 case V1_1::OperationType::PAD:
86 return ConvertPad(operation, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +000087 case V1_1::OperationType::SPACE_TO_BATCH_ND:
88 return ConvertSpaceToBatchNd(operation, model, data);
saoste01b8471482018-10-10 09:44:51 +010089 case V1_1::OperationType::SQUEEZE:
90 return ConvertSqueeze(operation, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +000091 case V1_1::OperationType::STRIDED_SLICE:
92 return ConvertStridedSlice(operation, model, data);
saoste01fe463152018-10-18 17:49:56 +010093 case V1_1::OperationType::TRANSPOSE:
94 return ConvertTranspose(operation, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +000095 case V1_1::OperationType::BATCH_TO_SPACE_ND:
96 return ConvertBatchToSpaceNd(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010097 default:
98 return Fail("%s: Operation type %s not supported in ArmnnDriver",
99 __func__, toString(operation.type).c_str());
100 }
101 }
102}
103
104bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
105{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100106 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
107 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100108
109 if (!input0.IsValid() || !input1.IsValid())
110 {
111 return Fail("%s: Operation has invalid inputs", __func__);
112 }
113
114 // The FuseActivation parameter is always the input index 2
115 // and it should be optional
116 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100117 if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100118 {
119 return Fail("%s: Operation has invalid inputs", __func__);
120 }
121
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100122 const Operand* outputOperand = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100123 if (!outputOperand)
124 {
125 return false;
126 }
127
128 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
129
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100130 if (!IsLayerSupportedForAnyBackend(__func__,
131 armnn::IsDivisionSupported,
132 data.m_Backends,
133 input0.GetTensorInfo(),
134 input1.GetTensorInfo(),
135 outInfo))
arovir01b0717b52018-09-05 17:03:25 +0100136 {
137 return false;
138 }
139
140 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
141 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
142
143 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
144 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
145
146 if (endLayer)
147 {
148 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100149 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100150 }
151
152 return Fail("%s: ProcessActivation failed", __func__);
153}
154
David Beck38e12942018-09-12 16:02:24 +0100155bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
156{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100157 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
158 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
David Beck38e12942018-09-12 16:02:24 +0100159
160 if (!input0.IsValid() || !input1.IsValid())
161 {
162 return Fail("%s: Operation has invalid inputs", __func__);
163 }
164
165 // The FuseActivation parameter is always the input index 2
166 // and it should be optional
167 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100168 if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
David Beck38e12942018-09-12 16:02:24 +0100169 {
170 return Fail("%s: Operation has invalid inputs", __func__);
171 }
172
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100173 const Operand* outputOperand = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
David Beck38e12942018-09-12 16:02:24 +0100174 if (!outputOperand)
175 {
176 return false;
177 }
178
179 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
180
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100181 if (!IsLayerSupportedForAnyBackend(__func__,
182 armnn::IsSubtractionSupported,
183 data.m_Backends,
184 input0.GetTensorInfo(),
185 input1.GetTensorInfo(),
186 outInfo))
David Beck38e12942018-09-12 16:02:24 +0100187 {
188 return false;
189 }
190
191 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
192 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer, data);
193
194 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
195 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
196
197 if (endLayer)
198 {
199 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100200 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
David Beck38e12942018-09-12 16:02:24 +0100201 }
202
203 return Fail("%s: ProcessActivation failed", __func__);
204}
205
narpra013c052562018-09-17 14:25:04 +0100206bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
207{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100208 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
narpra013c052562018-09-17 14:25:04 +0100209 if (!input.IsValid())
210 {
211 return Fail("%s: Operation has invalid inputs", __func__);
212 }
213
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100214 const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
Matteo Martincighae622b72018-10-23 18:25:38 +0100215 if (!axisOperand)
216 {
217 return Fail("%s: Could not read input 1", __func__);
218 }
219
220 std::vector<int32_t> axis;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100221 if (!GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data))
Matteo Martincighae622b72018-10-23 18:25:38 +0100222 {
223 return Fail("%s: Input 1 has invalid values", __func__);
224 }
225
226 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
227
228 // Convert the axis to unsigned int and remove duplicates.
229 unsigned int rank = inputInfo.GetNumDimensions();
230 std::set<unsigned int> uniqueAxis;
231 std::transform(axis.begin(), axis.end(),
232 std::inserter(uniqueAxis, uniqueAxis.begin()),
233 [rank](int i) -> unsigned int { return (i + rank) % rank; });
234
235 // Get the "keep dims" flag.
236 int32_t keepDims = 0;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100237 if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 2, keepDims, model, data))
Matteo Martincighae622b72018-10-23 18:25:38 +0100238 {
239 return Fail("%s: Could not read input 2", __func__);
240 }
narpra013c052562018-09-17 14:25:04 +0100241
242 armnn::MeanDescriptor descriptor;
Matteo Martincighae622b72018-10-23 18:25:38 +0100243 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
244 descriptor.m_KeepDims = keepDims > 0;
narpra013c052562018-09-17 14:25:04 +0100245
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100246 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
narpra013c052562018-09-17 14:25:04 +0100247 if (!output)
248 {
249 return Fail("%s: Could not read output 0", __func__);
250 }
251
252 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
253
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100254 if (!IsLayerSupportedForAnyBackend(__func__,
255 armnn::IsMeanSupported,
256 data.m_Backends,
257 inputInfo,
258 outputInfo,
259 descriptor))
narpra013c052562018-09-17 14:25:04 +0100260 {
261 return false;
262 }
263
264 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
narpra0196bedf02018-09-26 16:57:28 +0100265 assert(layer != nullptr);
266 input.Connect(layer->GetInputSlot(0));
narpra013c052562018-09-17 14:25:04 +0100267
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100268 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
narpra013c052562018-09-17 14:25:04 +0100269}
270
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100271bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
272{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100273 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100274 if (!input.IsValid())
275 {
276 return Fail("%s: Operation has invalid inputs", __func__);
277 }
278
279 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100280 unsigned int rank = inputInfo.GetNumDimensions();
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100281
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100282 armnn::PadDescriptor descriptor;
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100283 if (!ConvertPaddings<hal_1_1::HalPolicy>(operation, model, data, rank, descriptor))
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100284 {
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100285 return Fail("%s: Could not convert paddings", __func__);
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100286 }
287
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100288 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100289 if (!output)
290 {
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100291 return Fail("%s: Could not read output", __func__);
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100292 }
293
294 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
295
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100296 if (!IsLayerSupportedForAnyBackend(__func__,
297 armnn::IsPadSupported,
298 data.m_Backends,
299 inputInfo,
300 outputInfo,
301 descriptor))
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100302 {
303 return false;
304 }
305
306 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
307 assert(layer != nullptr);
308 input.Connect(layer->GetInputSlot(0));
309 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
310
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100311 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +0100312}
313
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000314bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
315{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100316 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000317
318 if (!input.IsValid())
319 {
320 return Fail("%s: Operation has invalid inputs", __func__);
321 }
322
323 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
324 unsigned int rank = inputInfo.GetNumDimensions();
325 unsigned int spatialDim = rank - 2;
326
327 if (rank != 4)
328 {
329 Fail("%s: Only inputs with rank 4 are supported", __func__);
330 }
331
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100332 const Operand* blockShapeOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
333 const Operand* paddingsOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000334
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100335 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000336 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
337 {
338 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
339 }
340
341 std::vector<int32_t> blockShape;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100342 GetTensorInt32Values<hal_1_1::HalPolicy>(*blockShapeOperand, blockShape, model, data);
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000343 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000344 {
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000345 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000346 }
347
348 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
349 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
350 {
351 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
352 }
353
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000354 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000355 std::vector<int32_t> paddings;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100356 GetTensorInt32Values<hal_1_1::HalPolicy>(*paddingsOperand, paddings, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000357 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
358 {
359 int paddingBeforeInput = paddings[i];
360 int paddingAfterInput = paddings[i + 1];
361 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
362 {
363 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
364 }
365
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000366 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000367 }
368
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000369 armnn::SpaceToBatchNdDescriptor descriptor;
370 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
371 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
372 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
373
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100374 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000375 if (!output)
376 {
377 return Fail("%s: Could not read output 0", __func__);
378 }
379
380 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100381 if (!IsLayerSupportedForAnyBackend(__func__,
382 armnn::IsSpaceToBatchNdSupported,
383 data.m_Backends,
384 inputInfo,
385 outputInfo,
386 descriptor))
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000387 {
388 return false;
389 }
390
391 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
392 assert(layer != nullptr);
393 input.Connect(layer->GetInputSlot(0));
394
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100395 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000396}
397
saoste01b8471482018-10-10 09:44:51 +0100398bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
399{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100400 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
saoste01b8471482018-10-10 09:44:51 +0100401
402 if (!input.IsValid())
403 {
404 return Fail("%s: Operation has invalid inputs", __func__);
405 }
406
407 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
408
409 unsigned int rank = inputInfo.GetNumDimensions();
saoste01fe463152018-10-18 17:49:56 +0100410 if (rank > 4)
saoste01b8471482018-10-10 09:44:51 +0100411 {
saoste01fe463152018-10-18 17:49:56 +0100412 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
saoste01b8471482018-10-10 09:44:51 +0100413 }
414
415 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
416 // if the operand index is out of bounds.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100417 const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
saoste01b8471482018-10-10 09:44:51 +0100418
saoste01fe463152018-10-18 17:49:56 +0100419 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
420
saoste01b8471482018-10-10 09:44:51 +0100421 std::vector<int32_t> axis;
saoste01fe463152018-10-18 17:49:56 +0100422 if (!axisOperand)
saoste01b8471482018-10-10 09:44:51 +0100423 {
424 axis.assign(dimensionSequence,
saoste01fe463152018-10-18 17:49:56 +0100425 dimensionSequence + rank);
saoste01b8471482018-10-10 09:44:51 +0100426 }
427 else
428 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100429 GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data);
saoste01b8471482018-10-10 09:44:51 +0100430 }
431
saoste01b8471482018-10-10 09:44:51 +0100432
saoste01a893efa2018-10-13 11:56:12 +0100433 std::vector<uint32_t> outputDims;
saoste01fe463152018-10-18 17:49:56 +0100434 for (unsigned int i = 0; i < rank; i++)
saoste01a893efa2018-10-13 11:56:12 +0100435 {
436 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
437 auto currentDimension = inputInfo.GetShape()[i];
saoste01b8471482018-10-10 09:44:51 +0100438 if (skipSqueeze || currentDimension != 1)
439 {
440 outputDims.push_back(currentDimension);
441 }
442 }
443
saoste01fe463152018-10-18 17:49:56 +0100444 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
saoste01b8471482018-10-10 09:44:51 +0100445
446 armnn::TensorInfo outputInfo = inputInfo;
447 outputInfo.SetShape(outShape);
448
449 armnn::ReshapeDescriptor reshapeDesc;
450 reshapeDesc.m_TargetShape = outputInfo.GetShape();
451
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100452 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
saoste01b8471482018-10-10 09:44:51 +0100453 if (!output)
454 {
455 return Fail("%s: Could not read output 0", __func__);
456 }
457
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100458 if (!IsLayerSupportedForAnyBackend(__func__,
459 armnn::IsReshapeSupported,
460 data.m_Backends,
461 inputInfo,
462 reshapeDesc))
saoste01b8471482018-10-10 09:44:51 +0100463 {
464 return false;
465 }
466
467 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
468 assert(layer != nullptr);
469 input.Connect(layer->GetInputSlot(0));
saoste01fe463152018-10-18 17:49:56 +0100470
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100471 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
saoste01fe463152018-10-18 17:49:56 +0100472}
473
Sadik Armagan758eee82018-11-15 15:34:49 +0000474bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
475{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100476 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +0000477 if (!input.IsValid())
478 {
479 return Fail("%s: Operation has invalid inputs", __func__);
480 }
481 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
482
483 unsigned int rank = inputInfo.GetNumDimensions();
484 if (rank > 4)
485 {
486 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
487 }
488
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100489 const Operand* beginOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
490 const Operand* endOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
491 const Operand* stridesOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 3, model);
Sadik Armagan758eee82018-11-15 15:34:49 +0000492
493 std::vector<int32_t> beginValues;
494 std::vector<int32_t> endValues;
495 std::vector<int32_t> stridesValues;
496
497 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
498 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
499 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100500 if (!GetTensorInt32Values<hal_1_1::HalPolicy>(operand, operandValues, model, data))
Sadik Armagan758eee82018-11-15 15:34:49 +0000501 {
502 return false;
503 }
504
505 if (operandValues.size() != rank)
506 {
507 return false;
508 }
509
510 return true;
511 };
512
513 if (!ValidateInputOperands(*beginOperand, beginValues)
514 || !ValidateInputOperands(*endOperand, endValues)
515 || !ValidateInputOperands(*stridesOperand, stridesValues))
516 {
517 return Fail("%s: Operation has invalid input operand", __func__);
518 }
519
520 // Stride cannot have value '0'
521 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
522 {
523 return Fail("%s: Stride must be non-zero value.", __func__);
524 }
525
526 armnn::StridedSliceDescriptor descriptor;
527 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
528 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
529 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
530 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
531
532 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100533 if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
534 !GetInputInt32<hal_1_1::HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
535 !GetInputInt32<hal_1_1::HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
Sadik Armagan758eee82018-11-15 15:34:49 +0000536 {
537 return Fail("%s: Operation has invalid inputs", __func__);
538 }
539
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100540 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
Sadik Armagan758eee82018-11-15 15:34:49 +0000541 if (!output)
542 {
543 return Fail("%s: Could not read output 0", __func__);
544 }
545 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
546
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100547 if (!IsLayerSupportedForAnyBackend(__func__,
548 armnn::IsStridedSliceSupported,
549 data.m_Backends,
550 inputInfo,
551 outputInfo,
552 descriptor))
Sadik Armagan758eee82018-11-15 15:34:49 +0000553 {
554 return false;
555 }
556
557 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
558 assert(layer != nullptr);
559 input.Connect(layer->GetInputSlot(0));
560
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100561 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +0000562}
563
saoste01fe463152018-10-18 17:49:56 +0100564bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
565{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100566 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
saoste01fe463152018-10-18 17:49:56 +0100567
568 if (!input.IsValid())
569 {
570 return Fail("%s: Operation has invalid inputs", __func__);
571 }
572
573 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
574
575 unsigned int rank = inputInfo.GetNumDimensions();
576 if (rank > 4)
577 {
578 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
579 }
580
581 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
582 // if the operand index is out of bounds.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100583 const Operand* permOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
saoste01fe463152018-10-18 17:49:56 +0100584
585 std::vector<int32_t> perm(rank);
586 if (!permOperand)
587 {
588 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
589 for (unsigned int i = rank; i > 0; i--)
590 {
591 perm[rank - i] = boost::numeric_cast<int> (i - 1);
592 }
593 }
594 else
595 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100596 GetTensorInt32Values<hal_1_1::HalPolicy>(*permOperand, perm, model, data);
saoste01fe463152018-10-18 17:49:56 +0100597 }
598
599 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
600
601 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
602 if (!permutationVector.IsEqual(NHWCToArmNN)
603 && !permutationVector.IsEqual(ArmNNToNHWC)
604 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
605 {
606 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
607 }
608
609 armnn::PermuteDescriptor permuteDesc;
610 permuteDesc.m_DimMappings = permutationVector;
611
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100612 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
saoste01fe463152018-10-18 17:49:56 +0100613 if (!output)
614 {
615 return Fail("%s: Could not read output 0", __func__);
616 }
617
618 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
619
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100620 if (!IsLayerSupportedForAnyBackend(__func__,
621 armnn::IsPermuteSupported,
622 data.m_Backends,
623 inputInfo,
624 outputInfo,
625 permuteDesc))
saoste01fe463152018-10-18 17:49:56 +0100626 {
627 return false;
628 }
629
630 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
631 assert(layer != nullptr);
632 input.Connect(layer->GetInputSlot(0));
saoste01b8471482018-10-10 09:44:51 +0100633
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100634 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
saoste01b8471482018-10-10 09:44:51 +0100635}
636
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000637bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
638{
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100639 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000640 if (!input.IsValid())
641 {
642 return Fail("%s: Operation has invalid inputs", __func__);
643 }
644
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100645 const Operand* blockOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000646 if (!blockOperand)
647 {
648 return Fail("%s: Could not read input 1", __func__);
649 }
650
651 // Convert the block operand to int32
652 std::vector<int32_t> block;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100653 if (!GetTensorInt32Values<hal_1_1::HalPolicy>(*blockOperand, block, model, data))
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000654 {
655 return Fail("%s: Input 1 has invalid values", __func__);
656 }
657
658 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
659
660 unsigned int rank = inputInfo.GetNumDimensions();
661 if (rank != 4)
662 {
663 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
664 }
665
666 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
667 {
668 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
669 " greater than or equal to 1", __func__);
670 }
671
672 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
673 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
674 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
675
676 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
677 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
678
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100679 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000680 if (!output)
681 {
682 return Fail("%s: Could not read output 0", __func__);
683 }
684
685 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
686
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100687 if (!IsLayerSupportedForAnyBackend(__func__,
688 armnn::IsBatchToSpaceNdSupported,
689 data.m_Backends,
690 inputInfo,
691 outputInfo,
692 batchToSpaceNdDesc))
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000693 {
694 return false;
695 }
696
697 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
698 assert(layer != nullptr);
699 input.Connect(layer->GetInputSlot(0));
700
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100701 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000702}
703
arovir01b0717b52018-09-05 17:03:25 +0100704} // namespace hal_1_1
Aron Virginas-Tarcb8ac842019-07-05 15:47:07 +0100705} // namespace armnn_driver