blob: ab8224a0c487f6fadf2fc41f118b8b15d3adc5cc [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01009
arovir01b0717b52018-09-05 17:03:25 +010010#include "../1.0/HalPolicy.hpp"
11
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010012namespace
13{
14static std::vector<V1_0::OperationType> opsEquivalentInV10({
15 V1_0::OperationType::ADD,
16 V1_0::OperationType::AVERAGE_POOL_2D,
17 V1_0::OperationType::CONCATENATION,
18 V1_0::OperationType::CONV_2D,
19 V1_0::OperationType::DEPTHWISE_CONV_2D,
David Monahand5bfae12019-05-30 12:07:44 +010020 V1_0::OperationType::DEQUANTIZE,
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010021 V1_0::OperationType::FLOOR,
22 V1_0::OperationType::FULLY_CONNECTED,
23 V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION,
24 V1_0::OperationType::LOGISTIC,
25 V1_0::OperationType::LSTM,
26 V1_0::OperationType::L2_NORMALIZATION,
27 V1_0::OperationType::L2_POOL_2D,
28 V1_0::OperationType::MAX_POOL_2D,
29 V1_0::OperationType::MUL,
30 V1_0::OperationType::RELU,
31 V1_0::OperationType::RELU1,
32 V1_0::OperationType::RELU6,
33 V1_0::OperationType::SOFTMAX,
Keith Davisa6bc52f2019-06-26 09:39:49 +010034 V1_0::OperationType::SPACE_TO_DEPTH,
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010035 V1_0::OperationType::TANH,
36 V1_0::OperationType::RESHAPE,
37 V1_0::OperationType::RESIZE_BILINEAR,
38});
39
40bool CompliantWithVersion10(const V1_1::Operation & operation)
41{
42 std::vector<V1_0::OperationType>::iterator it;
43 it = std::find(opsEquivalentInV10.begin(), opsEquivalentInV10.end(),
44 static_cast<V1_0::OperationType>(operation.type));
45
46 if(it != opsEquivalentInV10.end())
47 {
48 return true;
49 }
50 return false;
51}
52
53V1_0::Operation ConvertOperationToVersion10(const V1_1::Operation & operation)
54{
55 V1_0::Operation v10Operation;
56 v10Operation.type = static_cast<V1_0::OperationType>(operation.type);
57 v10Operation.inputs = operation.inputs;
58 v10Operation.outputs = operation.outputs;
59 return v10Operation;
60}
61}
62
arovir01b0717b52018-09-05 17:03:25 +010063namespace armnn_driver
64{
65namespace hal_1_1
66{
67
68bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
69{
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010070 if (CompliantWithVersion10(operation))
arovir01b0717b52018-09-05 17:03:25 +010071 {
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010072 hal_1_0::HalPolicy::Operation v10Operation = ConvertOperationToVersion10(operation);
arovir01b0717b52018-09-05 17:03:25 +010073 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
74
75 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
76 }
77 else
78 {
79 switch (operation.type)
80 {
81 case V1_1::OperationType::DIV:
82 return ConvertDiv(operation, model, data);
David Beck38e12942018-09-12 16:02:24 +010083 case V1_1::OperationType::SUB:
84 return ConvertSub(operation, model, data);
narpra013c052562018-09-17 14:25:04 +010085 case V1_1::OperationType::MEAN:
86 return ConvertMean(operation, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +010087 case V1_1::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +010088 return ConvertPad(operation, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +000089 case V1_1::OperationType::SPACE_TO_BATCH_ND:
90 return ConvertSpaceToBatchNd(operation, model, data);
saoste01b8471482018-10-10 09:44:51 +010091 case V1_1::OperationType::SQUEEZE:
92 return ConvertSqueeze(operation, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +000093 case V1_1::OperationType::STRIDED_SLICE:
94 return ConvertStridedSlice(operation, model, data);
saoste01fe463152018-10-18 17:49:56 +010095 case V1_1::OperationType::TRANSPOSE:
96 return ConvertTranspose(operation, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +000097 case V1_1::OperationType::BATCH_TO_SPACE_ND:
98 return ConvertBatchToSpaceNd(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010099 default:
100 return Fail("%s: Operation type %s not supported in ArmnnDriver",
101 __func__, toString(operation.type).c_str());
102 }
103 }
104}
105
106bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
107{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100108 ALOGV("hal_1_1::HalPolicy::ConvertDiv()");
109
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100110 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
111 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100112
113 if (!input0.IsValid() || !input1.IsValid())
114 {
115 return Fail("%s: Operation has invalid inputs", __func__);
116 }
117
118 // The FuseActivation parameter is always the input index 2
119 // and it should be optional
120 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100121 if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100122 {
123 return Fail("%s: Operation has invalid inputs", __func__);
124 }
125
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100126 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
127 if (!output)
arovir01b0717b52018-09-05 17:03:25 +0100128 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100129 return Fail("%s: Could not read output 0", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100130 }
131
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100132 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
133 if (IsDynamicTensor(outputInfo))
134 {
135 return Fail("%s: Dynamic output tensors are not supported", __func__);
136 }
arovir01b0717b52018-09-05 17:03:25 +0100137
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100138 bool isSupported = false;
139 FORWARD_LAYER_SUPPORT_FUNC(__func__,
140 IsDivisionSupported,
141 data.m_Backends,
142 isSupported,
143 input0.GetTensorInfo(),
144 input1.GetTensorInfo(),
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100145 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100146 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100147 {
148 return false;
149 }
150
151 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100152 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
arovir01b0717b52018-09-05 17:03:25 +0100153
154 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
155 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
156
157 if (endLayer)
158 {
159 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100160 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100161 }
162
163 return Fail("%s: ProcessActivation failed", __func__);
164}
165
David Beck38e12942018-09-12 16:02:24 +0100166bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
167{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100168 ALOGV("hal_1_1::HalPolicy::ConvertSub()");
169
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100170 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
171 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
David Beck38e12942018-09-12 16:02:24 +0100172
173 if (!input0.IsValid() || !input1.IsValid())
174 {
175 return Fail("%s: Operation has invalid inputs", __func__);
176 }
177
178 // The FuseActivation parameter is always the input index 2
179 // and it should be optional
180 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100181 if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
David Beck38e12942018-09-12 16:02:24 +0100182 {
183 return Fail("%s: Operation has invalid inputs", __func__);
184 }
185
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100186 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
187 if (!output)
David Beck38e12942018-09-12 16:02:24 +0100188 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100189 return Fail("%s: Could not read output 0", __func__);
David Beck38e12942018-09-12 16:02:24 +0100190 }
191
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100192 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +0100193 if (IsDynamicTensor(outputInfo))
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100194 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100195 return Fail("%s: Dynamic output tensors are not supported", __func__);
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100196 }
David Beck38e12942018-09-12 16:02:24 +0100197
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100198 bool isSupported = false;
199 FORWARD_LAYER_SUPPORT_FUNC(__func__,
200 IsSubtractionSupported,
201 data.m_Backends,
202 isSupported,
203 input0.GetTensorInfo(),
204 input1.GetTensorInfo(),
205 outputInfo);
206 if (!isSupported)
David Beck38e12942018-09-12 16:02:24 +0100207 {
208 return false;
209 }
210
211 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +0100212 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
David Beck38e12942018-09-12 16:02:24 +0100213
214 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
215 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
216
217 if (endLayer)
218 {
219 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100220 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
David Beck38e12942018-09-12 16:02:24 +0100221 }
222
223 return Fail("%s: ProcessActivation failed", __func__);
224}
225
narpra013c052562018-09-17 14:25:04 +0100226bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
227{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100228 ALOGV("hal_1_1::HalPolicy::ConvertMean()");
229
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100230 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
narpra013c052562018-09-17 14:25:04 +0100231 if (!input.IsValid())
232 {
233 return Fail("%s: Operation has invalid inputs", __func__);
234 }
235
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100236 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
237 if (!output)
238 {
239 return Fail("%s: Could not read output 0", __func__);
240 }
241
242 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
243 if (IsDynamicTensor(outputInfo))
244 {
245 return Fail("%s: Dynamic output tensors are not supported", __func__);
246 }
247
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100248 const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
Matteo Martincighae622b72018-10-23 18:25:38 +0100249 if (!axisOperand)
250 {
251 return Fail("%s: Could not read input 1", __func__);
252 }
253
254 std::vector<int32_t> axis;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100255 if (!GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data))
Matteo Martincighae622b72018-10-23 18:25:38 +0100256 {
257 return Fail("%s: Input 1 has invalid values", __func__);
258 }
259
260 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
261
262 // Convert the axis to unsigned int and remove duplicates.
263 unsigned int rank = inputInfo.GetNumDimensions();
264 std::set<unsigned int> uniqueAxis;
265 std::transform(axis.begin(), axis.end(),
266 std::inserter(uniqueAxis, uniqueAxis.begin()),
267 [rank](int i) -> unsigned int { return (i + rank) % rank; });
268
269 // Get the "keep dims" flag.
270 int32_t keepDims = 0;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100271 if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 2, keepDims, model, data))
Matteo Martincighae622b72018-10-23 18:25:38 +0100272 {
273 return Fail("%s: Could not read input 2", __func__);
274 }
narpra013c052562018-09-17 14:25:04 +0100275
276 armnn::MeanDescriptor descriptor;
Matteo Martincighae622b72018-10-23 18:25:38 +0100277 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
278 descriptor.m_KeepDims = keepDims > 0;
narpra013c052562018-09-17 14:25:04 +0100279
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100280 bool isSupported = false;
281 FORWARD_LAYER_SUPPORT_FUNC(__func__,
282 IsMeanSupported,
283 data.m_Backends,
284 isSupported,
285 inputInfo,
286 outputInfo,
287 descriptor);
288 if (!isSupported)
narpra013c052562018-09-17 14:25:04 +0100289 {
290 return false;
291 }
292
293 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
narpra0196bedf02018-09-26 16:57:28 +0100294 assert(layer != nullptr);
295 input.Connect(layer->GetInputSlot(0));
narpra013c052562018-09-17 14:25:04 +0100296
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100297 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
narpra013c052562018-09-17 14:25:04 +0100298}
299
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100300bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
301{
302 ALOGV("hal_1_1::HalPolicy::ConvertPad()");
303 return ::ConvertPad<hal_1_1::HalPolicy>(operation, model, data);
304}
305
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000306bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
307{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100308 ALOGV("hal_1_1::HalPolicy::ConvertSpaceToBatchNd()");
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000309
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100310 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000311 if (!input.IsValid())
312 {
313 return Fail("%s: Operation has invalid inputs", __func__);
314 }
315
316 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
317 unsigned int rank = inputInfo.GetNumDimensions();
318 unsigned int spatialDim = rank - 2;
319
320 if (rank != 4)
321 {
322 Fail("%s: Only inputs with rank 4 are supported", __func__);
323 }
324
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100325 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
326 if (!output)
327 {
328 return Fail("%s: Could not read output 0", __func__);
329 }
330
331 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
332 if (IsDynamicTensor(outputInfo))
333 {
334 return Fail("%s: Dynamic output tensors are not supported", __func__);
335 }
336
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100337 const Operand* blockShapeOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
338 const Operand* paddingsOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000339
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100340 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000341 if (blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
342 {
343 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
344 }
345
346 std::vector<int32_t> blockShape;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100347 GetTensorInt32Values<hal_1_1::HalPolicy>(*blockShapeOperand, blockShape, model, data);
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000348 if (std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i){ return i < 1; }))
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000349 {
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000350 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000351 }
352
353 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
354 if (paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
355 {
356 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
357 }
358
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000359 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000360 std::vector<int32_t> paddings;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100361 GetTensorInt32Values<hal_1_1::HalPolicy>(*paddingsOperand, paddings, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000362 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
363 {
364 int paddingBeforeInput = paddings[i];
365 int paddingAfterInput = paddings[i + 1];
366 if (paddingBeforeInput < 0 || paddingAfterInput < 0)
367 {
368 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
369 }
370
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000371 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000372 }
373
Sadik Armagan8bef7b32018-12-20 14:14:12 +0000374 armnn::SpaceToBatchNdDescriptor descriptor;
375 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
376 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
377 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
378
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100379 bool isSupported = false;
380 FORWARD_LAYER_SUPPORT_FUNC(__func__,
381 IsSpaceToBatchNdSupported,
382 data.m_Backends,
383 isSupported,
384 inputInfo,
385 outputInfo,
386 descriptor);
387 if (!isSupported)
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000388 {
389 return false;
390 }
391
392 armnn::IConnectableLayer* const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
393 assert(layer != nullptr);
394 input.Connect(layer->GetInputSlot(0));
395
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100396 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000397}
398
saoste01b8471482018-10-10 09:44:51 +0100399bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
400{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100401 ALOGV("hal_1_1::HalPolicy::ConvertSqueeze()");
saoste01b8471482018-10-10 09:44:51 +0100402
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100403 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
saoste01b8471482018-10-10 09:44:51 +0100404 if (!input.IsValid())
405 {
406 return Fail("%s: Operation has invalid inputs", __func__);
407 }
408
409 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
saoste01b8471482018-10-10 09:44:51 +0100410 unsigned int rank = inputInfo.GetNumDimensions();
saoste01fe463152018-10-18 17:49:56 +0100411 if (rank > 4)
saoste01b8471482018-10-10 09:44:51 +0100412 {
saoste01fe463152018-10-18 17:49:56 +0100413 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
saoste01b8471482018-10-10 09:44:51 +0100414 }
415
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100416 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
417 if (!output)
418 {
419 return Fail("%s: Could not read output 0", __func__);
420 }
421
422 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
423 {
424 return Fail("%s: Dynamic output tensors are not supported", __func__);
425 }
426
saoste01b8471482018-10-10 09:44:51 +0100427 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
428 // if the operand index is out of bounds.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100429 const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
saoste01b8471482018-10-10 09:44:51 +0100430
saoste01fe463152018-10-18 17:49:56 +0100431 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
432
saoste01b8471482018-10-10 09:44:51 +0100433 std::vector<int32_t> axis;
saoste01fe463152018-10-18 17:49:56 +0100434 if (!axisOperand)
saoste01b8471482018-10-10 09:44:51 +0100435 {
436 axis.assign(dimensionSequence,
saoste01fe463152018-10-18 17:49:56 +0100437 dimensionSequence + rank);
saoste01b8471482018-10-10 09:44:51 +0100438 }
439 else
440 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100441 GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data);
saoste01b8471482018-10-10 09:44:51 +0100442 }
443
saoste01b8471482018-10-10 09:44:51 +0100444
saoste01a893efa2018-10-13 11:56:12 +0100445 std::vector<uint32_t> outputDims;
saoste01fe463152018-10-18 17:49:56 +0100446 for (unsigned int i = 0; i < rank; i++)
saoste01a893efa2018-10-13 11:56:12 +0100447 {
448 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
449 auto currentDimension = inputInfo.GetShape()[i];
saoste01b8471482018-10-10 09:44:51 +0100450 if (skipSqueeze || currentDimension != 1)
451 {
452 outputDims.push_back(currentDimension);
453 }
454 }
455
saoste01fe463152018-10-18 17:49:56 +0100456 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
saoste01b8471482018-10-10 09:44:51 +0100457
458 armnn::TensorInfo outputInfo = inputInfo;
459 outputInfo.SetShape(outShape);
460
461 armnn::ReshapeDescriptor reshapeDesc;
462 reshapeDesc.m_TargetShape = outputInfo.GetShape();
463
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100464 bool isSupported = false;
465 FORWARD_LAYER_SUPPORT_FUNC(__func__,
466 IsReshapeSupported,
467 data.m_Backends,
468 isSupported,
469 inputInfo,
470 reshapeDesc);
471 if (!isSupported)
saoste01b8471482018-10-10 09:44:51 +0100472 {
473 return false;
474 }
475
476 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
477 assert(layer != nullptr);
478 input.Connect(layer->GetInputSlot(0));
saoste01fe463152018-10-18 17:49:56 +0100479
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100480 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
saoste01fe463152018-10-18 17:49:56 +0100481}
482
Sadik Armagan758eee82018-11-15 15:34:49 +0000483bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
484{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100485 ALOGV("hal_1_1::HalPolicy::ConvertStridedSlice()");
486
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100487 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +0000488 if (!input.IsValid())
489 {
490 return Fail("%s: Operation has invalid inputs", __func__);
491 }
Sadik Armagan758eee82018-11-15 15:34:49 +0000492
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100493 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Sadik Armagan758eee82018-11-15 15:34:49 +0000494 unsigned int rank = inputInfo.GetNumDimensions();
495 if (rank > 4)
496 {
497 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
498 }
499
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100500 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
501 if (!output)
502 {
503 return Fail("%s: Could not read output 0", __func__);
504 }
505
506 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
507 if (IsDynamicTensor(outputInfo))
508 {
509 return Fail("%s: Dynamic output tensors are not supported", __func__);
510 }
511
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100512 const Operand* beginOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
513 const Operand* endOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
514 const Operand* stridesOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 3, model);
Sadik Armagan758eee82018-11-15 15:34:49 +0000515
516 std::vector<int32_t> beginValues;
517 std::vector<int32_t> endValues;
518 std::vector<int32_t> stridesValues;
519
520 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
521 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
522 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100523 if (!GetTensorInt32Values<hal_1_1::HalPolicy>(operand, operandValues, model, data))
Sadik Armagan758eee82018-11-15 15:34:49 +0000524 {
525 return false;
526 }
527
528 if (operandValues.size() != rank)
529 {
530 return false;
531 }
532
533 return true;
534 };
535
536 if (!ValidateInputOperands(*beginOperand, beginValues)
537 || !ValidateInputOperands(*endOperand, endValues)
538 || !ValidateInputOperands(*stridesOperand, stridesValues))
539 {
540 return Fail("%s: Operation has invalid input operand", __func__);
541 }
542
543 // Stride cannot have value '0'
544 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
545 {
546 return Fail("%s: Stride must be non-zero value.", __func__);
547 }
548
549 armnn::StridedSliceDescriptor descriptor;
550 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
551 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
552 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
553 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
554
555 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100556 if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
557 !GetInputInt32<hal_1_1::HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
558 !GetInputInt32<hal_1_1::HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
Sadik Armagan758eee82018-11-15 15:34:49 +0000559 {
560 return Fail("%s: Operation has invalid inputs", __func__);
561 }
562
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100563 bool isSupported = false;
564 FORWARD_LAYER_SUPPORT_FUNC(__func__,
565 IsStridedSliceSupported,
566 data.m_Backends,
567 isSupported,
568 inputInfo,
569 outputInfo,
570 descriptor);
571 if (!isSupported)
Sadik Armagan758eee82018-11-15 15:34:49 +0000572 {
573 return false;
574 }
575
576 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
577 assert(layer != nullptr);
578 input.Connect(layer->GetInputSlot(0));
579
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100580 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +0000581}
582
saoste01fe463152018-10-18 17:49:56 +0100583bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
584{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100585 ALOGV("hal_1_1::HalPolicy::ConvertTranspose()");
saoste01fe463152018-10-18 17:49:56 +0100586
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100587 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
saoste01fe463152018-10-18 17:49:56 +0100588 if (!input.IsValid())
589 {
590 return Fail("%s: Operation has invalid inputs", __func__);
591 }
592
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100593 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
saoste01fe463152018-10-18 17:49:56 +0100594 unsigned int rank = inputInfo.GetNumDimensions();
595 if (rank > 4)
596 {
597 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
598 }
599
600 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
601 // if the operand index is out of bounds.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100602 const Operand* permOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
saoste01fe463152018-10-18 17:49:56 +0100603
604 std::vector<int32_t> perm(rank);
605 if (!permOperand)
606 {
607 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
608 for (unsigned int i = rank; i > 0; i--)
609 {
610 perm[rank - i] = boost::numeric_cast<int> (i - 1);
611 }
612 }
613 else
614 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100615 GetTensorInt32Values<hal_1_1::HalPolicy>(*permOperand, perm, model, data);
saoste01fe463152018-10-18 17:49:56 +0100616 }
617
618 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
619
620 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
621 if (!permutationVector.IsEqual(NHWCToArmNN)
622 && !permutationVector.IsEqual(ArmNNToNHWC)
623 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
624 {
625 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
626 }
627
628 armnn::PermuteDescriptor permuteDesc;
629 permuteDesc.m_DimMappings = permutationVector;
630
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100631 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
saoste01fe463152018-10-18 17:49:56 +0100632 if (!output)
633 {
634 return Fail("%s: Could not read output 0", __func__);
635 }
636
637 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
638
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100639 bool isSupported = false;
640 FORWARD_LAYER_SUPPORT_FUNC(__func__,
641 IsPermuteSupported,
642 data.m_Backends,
643 isSupported,
644 inputInfo,
645 outputInfo,
646 permuteDesc);
647 if (!isSupported)
saoste01fe463152018-10-18 17:49:56 +0100648 {
649 return false;
650 }
651
652 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
653 assert(layer != nullptr);
654 input.Connect(layer->GetInputSlot(0));
saoste01b8471482018-10-10 09:44:51 +0100655
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100656 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
saoste01b8471482018-10-10 09:44:51 +0100657}
658
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000659bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
660{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100661 ALOGV("hal_1_1::HalPolicy::ConvertBatchToSpaceNd()");
662
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100663 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000664 if (!input.IsValid())
665 {
666 return Fail("%s: Operation has invalid inputs", __func__);
667 }
668
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100669 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
670 if (!output)
671 {
672 return Fail("%s: Could not read output 0", __func__);
673 }
674
675 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
676 if (IsDynamicTensor(outputInfo))
677 {
678 return Fail("%s: Dynamic output tensors are not supported", __func__);
679 }
680
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100681 const Operand* blockOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000682 if (!blockOperand)
683 {
684 return Fail("%s: Could not read input 1", __func__);
685 }
686
687 // Convert the block operand to int32
688 std::vector<int32_t> block;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100689 if (!GetTensorInt32Values<hal_1_1::HalPolicy>(*blockOperand, block, model, data))
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000690 {
691 return Fail("%s: Input 1 has invalid values", __func__);
692 }
693
694 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
695
696 unsigned int rank = inputInfo.GetNumDimensions();
697 if (rank != 4)
698 {
699 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
700 }
701
702 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
703 {
704 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
705 " greater than or equal to 1", __func__);
706 }
707
708 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
709 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
710 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
711
712 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
713 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
714
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100715 bool isSupported = false;
716 FORWARD_LAYER_SUPPORT_FUNC(__func__,
717 IsBatchToSpaceNdSupported,
718 data.m_Backends,
719 isSupported,
720 inputInfo,
721 outputInfo,
722 batchToSpaceNdDesc);
723 if (!isSupported)
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000724 {
725 return false;
726 }
727
728 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
729 assert(layer != nullptr);
730 input.Connect(layer->GetInputSlot(0));
731
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100732 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000733}
734
arovir01b0717b52018-09-05 17:03:25 +0100735} // namespace hal_1_1
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100736} // namespace armnn_driver