blob: e75b5c2af3210bbbd4937b94f76f297b8d6726c0 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "HalPolicy.hpp"
7
Aron Virginas-Tar573a8fa2019-07-23 14:01:37 +01008#include "Utils.hpp"
Aron Virginas-Tar366e0a62019-07-10 13:01:41 +01009
arovir01b0717b52018-09-05 17:03:25 +010010#include "../1.0/HalPolicy.hpp"
11
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010012namespace
13{
14static std::vector<V1_0::OperationType> opsEquivalentInV10({
15 V1_0::OperationType::ADD,
16 V1_0::OperationType::AVERAGE_POOL_2D,
17 V1_0::OperationType::CONCATENATION,
18 V1_0::OperationType::CONV_2D,
19 V1_0::OperationType::DEPTHWISE_CONV_2D,
David Monahand5bfae12019-05-30 12:07:44 +010020 V1_0::OperationType::DEQUANTIZE,
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010021 V1_0::OperationType::FLOOR,
22 V1_0::OperationType::FULLY_CONNECTED,
23 V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION,
24 V1_0::OperationType::LOGISTIC,
25 V1_0::OperationType::LSTM,
26 V1_0::OperationType::L2_NORMALIZATION,
27 V1_0::OperationType::L2_POOL_2D,
28 V1_0::OperationType::MAX_POOL_2D,
29 V1_0::OperationType::MUL,
30 V1_0::OperationType::RELU,
31 V1_0::OperationType::RELU1,
32 V1_0::OperationType::RELU6,
33 V1_0::OperationType::SOFTMAX,
Keith Davisa6bc52f2019-06-26 09:39:49 +010034 V1_0::OperationType::SPACE_TO_DEPTH,
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010035 V1_0::OperationType::TANH,
36 V1_0::OperationType::RESHAPE,
37 V1_0::OperationType::RESIZE_BILINEAR,
38});
39
40bool CompliantWithVersion10(const V1_1::Operation & operation)
41{
42 std::vector<V1_0::OperationType>::iterator it;
43 it = std::find(opsEquivalentInV10.begin(), opsEquivalentInV10.end(),
44 static_cast<V1_0::OperationType>(operation.type));
45
46 if(it != opsEquivalentInV10.end())
47 {
48 return true;
49 }
50 return false;
51}
52
53V1_0::Operation ConvertOperationToVersion10(const V1_1::Operation & operation)
54{
55 V1_0::Operation v10Operation;
56 v10Operation.type = static_cast<V1_0::OperationType>(operation.type);
57 v10Operation.inputs = operation.inputs;
58 v10Operation.outputs = operation.outputs;
59 return v10Operation;
60}
61}
62
arovir01b0717b52018-09-05 17:03:25 +010063namespace armnn_driver
64{
65namespace hal_1_1
66{
67
68bool HalPolicy::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
69{
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010070 if (CompliantWithVersion10(operation))
arovir01b0717b52018-09-05 17:03:25 +010071 {
Éanna Ó Catháin2fc21f72019-05-13 11:01:33 +010072 hal_1_0::HalPolicy::Operation v10Operation = ConvertOperationToVersion10(operation);
arovir01b0717b52018-09-05 17:03:25 +010073 hal_1_0::HalPolicy::Model v10Model = convertToV1_0(model);
74
75 return hal_1_0::HalPolicy::ConvertOperation(v10Operation, v10Model, data);
76 }
77 else
78 {
79 switch (operation.type)
80 {
81 case V1_1::OperationType::DIV:
82 return ConvertDiv(operation, model, data);
David Beck38e12942018-09-12 16:02:24 +010083 case V1_1::OperationType::SUB:
84 return ConvertSub(operation, model, data);
narpra013c052562018-09-17 14:25:04 +010085 case V1_1::OperationType::MEAN:
86 return ConvertMean(operation, model, data);
Nina Drozd62a4a9f2018-10-01 14:20:25 +010087 case V1_1::OperationType::PAD:
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +010088 return ConvertPad(operation, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +000089 case V1_1::OperationType::SPACE_TO_BATCH_ND:
90 return ConvertSpaceToBatchNd(operation, model, data);
saoste01b8471482018-10-10 09:44:51 +010091 case V1_1::OperationType::SQUEEZE:
92 return ConvertSqueeze(operation, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +000093 case V1_1::OperationType::STRIDED_SLICE:
94 return ConvertStridedSlice(operation, model, data);
saoste01fe463152018-10-18 17:49:56 +010095 case V1_1::OperationType::TRANSPOSE:
96 return ConvertTranspose(operation, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +000097 case V1_1::OperationType::BATCH_TO_SPACE_ND:
98 return ConvertBatchToSpaceNd(operation, model, data);
arovir01b0717b52018-09-05 17:03:25 +010099 default:
100 return Fail("%s: Operation type %s not supported in ArmnnDriver",
101 __func__, toString(operation.type).c_str());
102 }
103 }
104}
105
106bool HalPolicy::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
107{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100108 ALOGV("hal_1_1::HalPolicy::ConvertDiv()");
109
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100110 LayerInputHandle input0 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
111 LayerInputHandle input1 = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 1, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100112
113 if (!input0.IsValid() || !input1.IsValid())
114 {
115 return Fail("%s: Operation has invalid inputs", __func__);
116 }
117
118 // The FuseActivation parameter is always the input index 2
119 // and it should be optional
120 ActivationFn activationFunction;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100121 if (!GetOptionalInputActivation<hal_1_1::HalPolicy>(operation, 2, activationFunction, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100122 {
123 return Fail("%s: Operation has invalid inputs", __func__);
124 }
125
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100126 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
127 if (!output)
arovir01b0717b52018-09-05 17:03:25 +0100128 {
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100129 return Fail("%s: Could not read output 0", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100130 }
131
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100132 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
133 if (IsDynamicTensor(outputInfo))
134 {
135 return Fail("%s: Dynamic output tensors are not supported", __func__);
136 }
arovir01b0717b52018-09-05 17:03:25 +0100137
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100138 bool isSupported = false;
139 FORWARD_LAYER_SUPPORT_FUNC(__func__,
140 IsDivisionSupported,
141 data.m_Backends,
142 isSupported,
143 input0.GetTensorInfo(),
144 input1.GetTensorInfo(),
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100145 outputInfo);
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100146 if (!isSupported)
arovir01b0717b52018-09-05 17:03:25 +0100147 {
148 return false;
149 }
150
151 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100152 armnn::IConnectableLayer* const endLayer = ProcessActivation(outputInfo, activationFunction, startLayer, data);
arovir01b0717b52018-09-05 17:03:25 +0100153
154 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
155 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
156
157 if (endLayer)
158 {
159 BroadcastTensor(input0, input1, startLayer, *data.m_Network);
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100160 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100161 }
162
163 return Fail("%s: ProcessActivation failed", __func__);
164}
165
David Beck38e12942018-09-12 16:02:24 +0100166bool HalPolicy::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
167{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100168 ALOGV("hal_1_1::HalPolicy::ConvertSub()");
Mike Kelly0a879362019-07-29 16:56:31 +0100169 return ::ConvertSub<hal_1_1::HalPolicy>(operation, model, data);
David Beck38e12942018-09-12 16:02:24 +0100170}
171
narpra013c052562018-09-17 14:25:04 +0100172bool HalPolicy::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
173{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100174 ALOGV("hal_1_1::HalPolicy::ConvertMean()");
175
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100176 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
narpra013c052562018-09-17 14:25:04 +0100177 if (!input.IsValid())
178 {
179 return Fail("%s: Operation has invalid inputs", __func__);
180 }
181
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100182 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
183 if (!output)
184 {
185 return Fail("%s: Could not read output 0", __func__);
186 }
187
188 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
189 if (IsDynamicTensor(outputInfo))
190 {
191 return Fail("%s: Dynamic output tensors are not supported", __func__);
192 }
193
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100194 const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
Matteo Martincighae622b72018-10-23 18:25:38 +0100195 if (!axisOperand)
196 {
197 return Fail("%s: Could not read input 1", __func__);
198 }
199
200 std::vector<int32_t> axis;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100201 if (!GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data))
Matteo Martincighae622b72018-10-23 18:25:38 +0100202 {
203 return Fail("%s: Input 1 has invalid values", __func__);
204 }
205
206 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
207
208 // Convert the axis to unsigned int and remove duplicates.
209 unsigned int rank = inputInfo.GetNumDimensions();
210 std::set<unsigned int> uniqueAxis;
211 std::transform(axis.begin(), axis.end(),
212 std::inserter(uniqueAxis, uniqueAxis.begin()),
213 [rank](int i) -> unsigned int { return (i + rank) % rank; });
214
215 // Get the "keep dims" flag.
216 int32_t keepDims = 0;
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100217 if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 2, keepDims, model, data))
Matteo Martincighae622b72018-10-23 18:25:38 +0100218 {
219 return Fail("%s: Could not read input 2", __func__);
220 }
narpra013c052562018-09-17 14:25:04 +0100221
222 armnn::MeanDescriptor descriptor;
Matteo Martincighae622b72018-10-23 18:25:38 +0100223 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
224 descriptor.m_KeepDims = keepDims > 0;
narpra013c052562018-09-17 14:25:04 +0100225
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100226 bool isSupported = false;
227 FORWARD_LAYER_SUPPORT_FUNC(__func__,
228 IsMeanSupported,
229 data.m_Backends,
230 isSupported,
231 inputInfo,
232 outputInfo,
233 descriptor);
234 if (!isSupported)
narpra013c052562018-09-17 14:25:04 +0100235 {
236 return false;
237 }
238
239 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
narpra0196bedf02018-09-26 16:57:28 +0100240 assert(layer != nullptr);
241 input.Connect(layer->GetInputSlot(0));
narpra013c052562018-09-17 14:25:04 +0100242
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100243 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
narpra013c052562018-09-17 14:25:04 +0100244}
245
Aron Virginas-Tarc921f6b2019-07-25 10:14:33 +0100246bool HalPolicy::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
247{
248 ALOGV("hal_1_1::HalPolicy::ConvertPad()");
249 return ::ConvertPad<hal_1_1::HalPolicy>(operation, model, data);
250}
251
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000252bool HalPolicy::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
253{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100254 ALOGV("hal_1_1::HalPolicy::ConvertSpaceToBatchNd()");
Finn Williamsd74c5052019-07-30 17:06:00 +0100255 return ::ConvertSpaceToBatchNd<hal_1_1::HalPolicy>(operation, model, data);
Nattapat Chaimanowong81a68342018-11-05 14:04:47 +0000256}
257
saoste01b8471482018-10-10 09:44:51 +0100258bool HalPolicy::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
259{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100260 ALOGV("hal_1_1::HalPolicy::ConvertSqueeze()");
saoste01b8471482018-10-10 09:44:51 +0100261
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100262 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
saoste01b8471482018-10-10 09:44:51 +0100263 if (!input.IsValid())
264 {
265 return Fail("%s: Operation has invalid inputs", __func__);
266 }
267
268 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
saoste01b8471482018-10-10 09:44:51 +0100269 unsigned int rank = inputInfo.GetNumDimensions();
saoste01fe463152018-10-18 17:49:56 +0100270 if (rank > 4)
saoste01b8471482018-10-10 09:44:51 +0100271 {
saoste01fe463152018-10-18 17:49:56 +0100272 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
saoste01b8471482018-10-10 09:44:51 +0100273 }
274
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100275 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
276 if (!output)
277 {
278 return Fail("%s: Could not read output 0", __func__);
279 }
280
281 if (IsDynamicTensor(GetTensorInfoForOperand(*output)))
282 {
283 return Fail("%s: Dynamic output tensors are not supported", __func__);
284 }
285
saoste01b8471482018-10-10 09:44:51 +0100286 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
287 // if the operand index is out of bounds.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100288 const Operand* axisOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
saoste01b8471482018-10-10 09:44:51 +0100289
saoste01fe463152018-10-18 17:49:56 +0100290 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
291
saoste01b8471482018-10-10 09:44:51 +0100292 std::vector<int32_t> axis;
saoste01fe463152018-10-18 17:49:56 +0100293 if (!axisOperand)
saoste01b8471482018-10-10 09:44:51 +0100294 {
295 axis.assign(dimensionSequence,
saoste01fe463152018-10-18 17:49:56 +0100296 dimensionSequence + rank);
saoste01b8471482018-10-10 09:44:51 +0100297 }
298 else
299 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100300 GetTensorInt32Values<hal_1_1::HalPolicy>(*axisOperand, axis, model, data);
saoste01b8471482018-10-10 09:44:51 +0100301 }
302
saoste01b8471482018-10-10 09:44:51 +0100303
saoste01a893efa2018-10-13 11:56:12 +0100304 std::vector<uint32_t> outputDims;
saoste01fe463152018-10-18 17:49:56 +0100305 for (unsigned int i = 0; i < rank; i++)
saoste01a893efa2018-10-13 11:56:12 +0100306 {
307 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
308 auto currentDimension = inputInfo.GetShape()[i];
saoste01b8471482018-10-10 09:44:51 +0100309 if (skipSqueeze || currentDimension != 1)
310 {
311 outputDims.push_back(currentDimension);
312 }
313 }
314
saoste01fe463152018-10-18 17:49:56 +0100315 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
saoste01b8471482018-10-10 09:44:51 +0100316
317 armnn::TensorInfo outputInfo = inputInfo;
318 outputInfo.SetShape(outShape);
319
320 armnn::ReshapeDescriptor reshapeDesc;
321 reshapeDesc.m_TargetShape = outputInfo.GetShape();
322
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100323 bool isSupported = false;
324 FORWARD_LAYER_SUPPORT_FUNC(__func__,
325 IsReshapeSupported,
326 data.m_Backends,
327 isSupported,
328 inputInfo,
329 reshapeDesc);
330 if (!isSupported)
saoste01b8471482018-10-10 09:44:51 +0100331 {
332 return false;
333 }
334
335 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
336 assert(layer != nullptr);
337 input.Connect(layer->GetInputSlot(0));
saoste01fe463152018-10-18 17:49:56 +0100338
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100339 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
saoste01fe463152018-10-18 17:49:56 +0100340}
341
Sadik Armagan758eee82018-11-15 15:34:49 +0000342bool HalPolicy::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
343{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100344 ALOGV("hal_1_1::HalPolicy::ConvertStridedSlice()");
345
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100346 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +0000347 if (!input.IsValid())
348 {
349 return Fail("%s: Operation has invalid inputs", __func__);
350 }
Sadik Armagan758eee82018-11-15 15:34:49 +0000351
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100352 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
Sadik Armagan758eee82018-11-15 15:34:49 +0000353 unsigned int rank = inputInfo.GetNumDimensions();
354 if (rank > 4)
355 {
356 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
357 }
358
Aron Virginas-Tarb7421e52019-07-26 13:14:39 +0100359 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
360 if (!output)
361 {
362 return Fail("%s: Could not read output 0", __func__);
363 }
364
365 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
366 if (IsDynamicTensor(outputInfo))
367 {
368 return Fail("%s: Dynamic output tensors are not supported", __func__);
369 }
370
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100371 const Operand* beginOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model);
372 const Operand* endOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 2, model);
373 const Operand* stridesOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 3, model);
Sadik Armagan758eee82018-11-15 15:34:49 +0000374
375 std::vector<int32_t> beginValues;
376 std::vector<int32_t> endValues;
377 std::vector<int32_t> stridesValues;
378
379 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
380 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
381 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100382 if (!GetTensorInt32Values<hal_1_1::HalPolicy>(operand, operandValues, model, data))
Sadik Armagan758eee82018-11-15 15:34:49 +0000383 {
384 return false;
385 }
386
387 if (operandValues.size() != rank)
388 {
389 return false;
390 }
391
392 return true;
393 };
394
395 if (!ValidateInputOperands(*beginOperand, beginValues)
396 || !ValidateInputOperands(*endOperand, endValues)
397 || !ValidateInputOperands(*stridesOperand, stridesValues))
398 {
399 return Fail("%s: Operation has invalid input operand", __func__);
400 }
401
402 // Stride cannot have value '0'
403 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
404 {
405 return Fail("%s: Stride must be non-zero value.", __func__);
406 }
407
408 armnn::StridedSliceDescriptor descriptor;
409 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
410 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
411 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
412 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
413
414 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100415 if (!GetInputInt32<hal_1_1::HalPolicy>(operation, 4, descriptor.m_BeginMask, model, data) ||
416 !GetInputInt32<hal_1_1::HalPolicy>(operation, 5, descriptor.m_EndMask, model, data) ||
417 !GetInputInt32<hal_1_1::HalPolicy>(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
Sadik Armagan758eee82018-11-15 15:34:49 +0000418 {
419 return Fail("%s: Operation has invalid inputs", __func__);
420 }
421
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100422 bool isSupported = false;
423 FORWARD_LAYER_SUPPORT_FUNC(__func__,
424 IsStridedSliceSupported,
425 data.m_Backends,
426 isSupported,
427 inputInfo,
428 outputInfo,
429 descriptor);
430 if (!isSupported)
Sadik Armagan758eee82018-11-15 15:34:49 +0000431 {
432 return false;
433 }
434
435 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
436 assert(layer != nullptr);
437 input.Connect(layer->GetInputSlot(0));
438
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100439 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
Sadik Armagan758eee82018-11-15 15:34:49 +0000440}
441
saoste01fe463152018-10-18 17:49:56 +0100442bool HalPolicy::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
443{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100444 ALOGV("hal_1_1::HalPolicy::ConvertTranspose()");
saoste01fe463152018-10-18 17:49:56 +0100445
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100446 LayerInputHandle input = ConvertToLayerInputHandle<hal_1_1::HalPolicy>(operation, 0, model, data);
saoste01fe463152018-10-18 17:49:56 +0100447 if (!input.IsValid())
448 {
449 return Fail("%s: Operation has invalid inputs", __func__);
450 }
451
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100452 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
saoste01fe463152018-10-18 17:49:56 +0100453 unsigned int rank = inputInfo.GetNumDimensions();
454 if (rank > 4)
455 {
456 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
457 }
458
459 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
460 // if the operand index is out of bounds.
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100461 const Operand* permOperand = GetInputOperand<hal_1_1::HalPolicy>(operation, 1, model, false);
saoste01fe463152018-10-18 17:49:56 +0100462
463 std::vector<int32_t> perm(rank);
464 if (!permOperand)
465 {
466 // NOTE: If perm is not given, it is set to (n-1...0), where n is the rank of the tensor
467 for (unsigned int i = rank; i > 0; i--)
468 {
469 perm[rank - i] = boost::numeric_cast<int> (i - 1);
470 }
471 }
472 else
473 {
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100474 GetTensorInt32Values<hal_1_1::HalPolicy>(*permOperand, perm, model, data);
saoste01fe463152018-10-18 17:49:56 +0100475 }
476
477 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
478
479 auto permutationVector = armnn::PermutationVector(outputDims.data(), outputDims.size());
480 if (!permutationVector.IsEqual(NHWCToArmNN)
481 && !permutationVector.IsEqual(ArmNNToNHWC)
482 && !permutationVector.IsEqual({ 3, 2, 0, 1 }))
483 {
484 return Fail("%s: Only [0, 3, 1, 2], [0, 2, 3, 1] and [3, 2, 0, 1] permutations are supported.", __func__);
485 }
486
487 armnn::PermuteDescriptor permuteDesc;
488 permuteDesc.m_DimMappings = permutationVector;
489
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100490 const Operand* output = GetOutputOperand<hal_1_1::HalPolicy>(operation, 0, model);
saoste01fe463152018-10-18 17:49:56 +0100491 if (!output)
492 {
493 return Fail("%s: Could not read output 0", __func__);
494 }
495
496 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
497
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100498 bool isSupported = false;
499 FORWARD_LAYER_SUPPORT_FUNC(__func__,
500 IsPermuteSupported,
501 data.m_Backends,
502 isSupported,
503 inputInfo,
504 outputInfo,
505 permuteDesc);
506 if (!isSupported)
saoste01fe463152018-10-18 17:49:56 +0100507 {
508 return false;
509 }
510
511 armnn::IConnectableLayer* const layer = data.m_Network->AddPermuteLayer(permuteDesc);
512 assert(layer != nullptr);
513 input.Connect(layer->GetInputSlot(0));
saoste01b8471482018-10-10 09:44:51 +0100514
Aron Virginas-Tarcd700e42019-06-14 14:54:52 +0100515 return SetupAndTrackLayerOutputSlot<hal_1_1::HalPolicy>(operation, 0, *layer, model, data);
saoste01b8471482018-10-10 09:44:51 +0100516}
517
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000518bool HalPolicy::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
519{
Aron Virginas-Tar29404fb2019-07-24 13:55:31 +0100520 ALOGV("hal_1_1::HalPolicy::ConvertBatchToSpaceNd()");
Finn Williams23b87b32019-07-30 11:44:05 +0100521 return ::ConvertBatchToSpaceNd<hal_1_1::HalPolicy>(operation, model, data);
Éanna Ó Catháin2cd99b92018-11-14 14:33:52 +0000522}
523
arovir01b0717b52018-09-05 17:03:25 +0100524} // namespace hal_1_1
Ferran Balaguerd30093c2019-07-09 17:04:47 +0100525} // namespace armnn_driver