blob: be052a6faa0a442cbc085ca5e30b8bbc80dbfd69 [file] [log] [blame]
Sadik Armagan8f397a12022-06-17 15:38:22 +01001//
2// Copyright © 2022 Arm Ltd and Contributors. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#include "Converter.hpp"
7#include <half/half.hpp>
8#include <armnnUtils/TensorUtils.hpp>
9
10namespace armnn_driver
11{
12
13using namespace android::nn;
14using Half = half_float::half;
15
16namespace
17{
18
19} // anonymouse namespace
20
21bool Converter::ConvertOperation(const Operation& operation, const Model& model, ConversionData& data)
22{
23 switch (operation.type)
24 {
25 case OperationType::ABS:
26 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Abs);
27 case OperationType::ADD:
28 return ConvertAdd(operation, model, data);
29 case OperationType::ARGMAX:
30 return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Max);
31 case OperationType::ARGMIN:
32 return ConvertArgMinMax(operation, model, data, ArgMinMaxFunction::Min);
33 case OperationType::AVERAGE_POOL_2D:
34 return ConvertAveragePool2d(operation, model, data);
Kevin May9636a9b2022-09-21 15:41:41 +010035 case OperationType::BATCH_MATMUL:
36 return ConvertBatchMatMul(operation, model, data);
Sadik Armagan8f397a12022-06-17 15:38:22 +010037 case OperationType::BATCH_TO_SPACE_ND:
38 return ConvertBatchToSpaceNd(operation, model, data);
39 case OperationType::CAST:
40 return ConvertCast(operation, model, data);
41 case OperationType::CONCATENATION:
42 return ConvertConcatenation(operation, model, data);
43 case OperationType::CONV_2D:
44 return ConvertConv2d(operation, model, data);
45 case OperationType::DEPTH_TO_SPACE:
46 return ConvertDepthToSpace(operation, model, data);
47 case OperationType::DEPTHWISE_CONV_2D:
48 return ConvertDepthwiseConv2d(operation, model, data);
49 case OperationType::DEQUANTIZE:
50 return ConvertDequantize(operation, model, data);
51 case OperationType::DIV:
52 return ConvertDiv(operation, model, data);
53 case OperationType::ELU:
54 return ConvertElu(operation, model, data);
55 case OperationType::EQUAL:
56 return ConvertComparison(operation, model, data, ComparisonOperation::Equal);
57 case OperationType::EXP:
58 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Exp);
59 case OperationType::EXPAND_DIMS:
60 return ConvertExpandDims(operation, model, data);
61 case OperationType::FILL:
62 return ConvertFill(operation, model, data);
63 case OperationType::FLOOR:
64 return ConvertFloor(operation, model, data);
65 case OperationType::FULLY_CONNECTED:
66 return ConvertFullyConnected(operation, model, data);
67 case OperationType::GATHER:
68 return ConvertGather(operation, model, data);
69 case OperationType::GREATER:
70 return ConvertComparison(operation, model, data, ComparisonOperation::Greater);
71 case OperationType::GREATER_EQUAL:
72 return ConvertComparison(operation, model, data, ComparisonOperation::GreaterOrEqual);
73 case OperationType::GROUPED_CONV_2D:
74 return ConvertGroupedConv2d(operation, model, data);
75 case OperationType::HARD_SWISH:
76 return ConvertHardSwish(operation, model, data);
77 case OperationType::INSTANCE_NORMALIZATION:
78 return ConvertInstanceNormalization(operation, model, data);
79 case OperationType::L2_NORMALIZATION:
80 return ConvertL2Normalization(operation, model, data);
81 case OperationType::L2_POOL_2D:
82 return ConvertL2Pool2d(operation, model, data);
83 case OperationType::LESS:
84 return ConvertComparison(operation, model, data, ComparisonOperation::Less);
85 case OperationType::LESS_EQUAL:
86 return ConvertComparison(operation, model, data, ComparisonOperation::LessOrEqual);
87 case OperationType::LOCAL_RESPONSE_NORMALIZATION:
88 return ConvertLocalResponseNormalization(operation, model, data);
89 case OperationType::LOG:
90 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Log);
91 case OperationType::LOGICAL_AND:
92 return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalAnd);
93 case OperationType::LOGICAL_NOT:
94 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::LogicalNot);
95 case OperationType::LOGICAL_OR:
96 return ConvertLogicalBinary(operation, model, data, LogicalBinaryOperation::LogicalOr);
97 case OperationType::LOGISTIC:
98 return ConvertLogistic(operation, model, data);
99 case OperationType::LOG_SOFTMAX:
100 return ConvertLogSoftmax(operation, model, data);
101 case OperationType::LSTM:
102 return ConvertLstm(operation, model, data);
103 case OperationType::MAX_POOL_2D:
104 return ConvertMaxPool2d(operation, model, data);
105 case OperationType::MAXIMUM:
106 return ConvertMaximum(operation, model, data);
107 case OperationType::MEAN:
108 return ConvertMean(operation, model, data);
109 case OperationType::MINIMUM:
110 return ConvertMinimum(operation, model, data);
111 case OperationType::MUL:
112 return ConvertMul(operation, model, data);
113 case OperationType::NEG:
114 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Neg);
115 case OperationType::NOT_EQUAL:
116 return ConvertComparison(operation, model, data, ComparisonOperation::NotEqual);
117 case OperationType::PAD:
118 return ConvertPad(operation, model, data);
119 case OperationType::PAD_V2:
120 return ConvertPadV2(operation, model, data);
121 case OperationType::PRELU:
122 return ConvertPrelu(operation, model, data);
123 case OperationType::QUANTIZE:
124 return ConvertQuantize(operation, model, data);
125 case OperationType::QUANTIZED_LSTM:
126 return ConvertQuantizedLstm(operation, model, data);
127 case OperationType::QUANTIZED_16BIT_LSTM:
128 return ConvertQuantized16BitLstm(operation, model, data);
129 case OperationType::RANK:
130 return ConvertRank(operation, model, data);
131 case OperationType::REDUCE_MAX:
132 return ConvertReduce(operation, model, data, armnn::ReduceOperation::Max);
133 case OperationType::REDUCE_MIN:
134 return ConvertReduce(operation, model, data, armnn::ReduceOperation::Min);
135 case OperationType::REDUCE_SUM:
136 return ConvertReduce(operation, model, data, armnn::ReduceOperation::Sum);
137 case OperationType::RELU:
138 return ConvertReLu(operation, model, data);
139 case OperationType::RELU1:
140 return ConvertReLu1(operation, model, data);
141 case OperationType::RELU6:
142 return ConvertReLu6(operation, model, data);
143 case OperationType::RESHAPE:
144 return ConvertReshape(operation, model, data);
145 case OperationType::RESIZE_BILINEAR:
146 return ConvertResize(operation, model, data, ResizeMethod::Bilinear);
147 case OperationType::RESIZE_NEAREST_NEIGHBOR:
148 return ConvertResize(operation, model, data, ResizeMethod::NearestNeighbor);
149 case OperationType::RSQRT:
150 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Rsqrt);
151 case OperationType::SIN:
152 return ConvertElementwiseUnary(operation, model, data, UnaryOperation::Sin);
153 case OperationType::SOFTMAX:
154 return ConvertSoftmax(operation, model, data);
155 case OperationType::SPACE_TO_BATCH_ND :
156 return ConvertSpaceToBatchNd(operation, model, data);
157 case OperationType::SPACE_TO_DEPTH:
158 return ConvertSpaceToDepth(operation, model, data);
159 case OperationType::SQRT:
160 return ConvertSqrt(operation, model, data);
161 case OperationType::SQUEEZE:
162 return ConvertSqueeze(operation, model, data);
163 case OperationType::STRIDED_SLICE:
164 return ConvertStridedSlice(operation, model, data);
165 case OperationType::SUB:
166 return ConvertSub(operation, model, data);
167 case OperationType::TRANSPOSE:
168 return ConvertTranspose(operation, model, data);
169 case OperationType::TRANSPOSE_CONV_2D:
170 return ConvertTransposeConv2d(operation, model, data);
171 case OperationType::TANH:
172 return ConvertTanH(operation, model, data);
173 default:
174 VLOG(DRIVER) << "Operation type: " << operation.type << "is not supported in ArmnnDriver";
175 return false;
176 }
177}
178
179bool Converter::ConvertAdd(const Operation& operation, const Model& model, ConversionData& data)
180{
181 VLOG(DRIVER) << "Converter::ConvertAdd()";
182 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
183 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
184
185 if (!input0.IsValid() || !input1.IsValid())
186 {
187 return Fail("%s: Operation has invalid inputs", __func__);
188 }
189
190 // The FuseActivation parameter is always the input index 2, and it should be optional
191 ActivationFn activationFunction;
192 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
193 {
194 return Fail("%s: Operation has invalid inputs", __func__);
195 }
196
197 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
198 if (!outputOperand)
199 {
200 return false;
201 }
202
203 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
204 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
205
206 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
207
208 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100209 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +0100210 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
211 {
212 FORWARD_LAYER_SUPPORT_FUNC(__func__,
213 IsAdditionSupported,
214 data.m_Backends,
215 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100216 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +0100217 inputInfo0,
218 inputInfo1,
219 outputInfo);
220 };
221
222 if(!IsDynamicTensor(outputInfo))
223 {
224 validateFunc(outputInfo, isSupported);
225 }
226 else
227 {
228 isSupported = AreDynamicTensorsSupported();
229 }
230
231 if (!isSupported)
232 {
233 return false;
234 }
235
236 armnn::IConnectableLayer* const startLayer = data.m_Network->AddAdditionLayer();
Cathal Corbett53837672022-09-01 11:34:37 +0100237 startLayer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +0100238
239 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
240 if (!isReshapeSupported)
241 {
242 return false;
243 }
244
245 return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
246 data, nullptr, validateFunc, activationFunction);
247}
248
249bool Converter::ConvertArgMinMax(const Operation& operation,
250 const Model& model,
251 ConversionData& data,
252 armnn::ArgMinMaxFunction argMinMaxFunction)
253{
254 VLOG(DRIVER) << "Converter::ConvertArgMinMax()";
255 VLOG(DRIVER) << "argMinMaxFunction = " << GetArgMinMaxFunctionAsCString(argMinMaxFunction);
256
257 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
258
259 if (!input0.IsValid())
260 {
261 return Fail("%s: Operation has invalid inputs", __func__);
262 }
263
264 int32_t axis;
265 if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
266 {
267 return Fail("%s: Operation has invalid inputs. Failed to read axis.", __func__);
268 }
269
270 const armnn::TensorInfo& inputInfo = input0.GetTensorInfo();
271 int rank = static_cast<int>(inputInfo.GetNumDimensions());
272
273 if (((axis < -rank) && (axis < 0)) || ((axis >= rank) && (axis > 0)))
274 {
275 // Square bracket denotes inclusive n while parenthesis denotes exclusive n
276 // E.g. Rank 4 tensor can have axis in range [-4, 3)
277 // -1 == 3, -2 == 2, -3 == 1, -4 == 0
278 return Fail("%s: Axis must be in range [-n, n)", __func__);
279 }
280
281 const Operand* output = GetOutputOperand(operation, 0, model);
282 if (!output)
283 {
284 return Fail("%s: Could not read output 0", __func__);
285 }
286
287 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
288
289 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
290
291 armnn::ArgMinMaxDescriptor descriptor;
292 descriptor.m_Function = argMinMaxFunction;
293 descriptor.m_Axis = axis;
294
295 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100296 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +0100297 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
298 {
299 FORWARD_LAYER_SUPPORT_FUNC(__func__,
300 IsArgMinMaxSupported,
301 data.m_Backends,
302 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100303 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +0100304 inputInfo0,
305 outputInfo,
306 descriptor);
307 };
308
309 if(IsDynamicTensor(outputInfo))
310 {
311 isSupported = AreDynamicTensorsSupported();
312 }
313 else
314 {
315 validateFunc(outputInfo, isSupported);
316 }
317
318 if (!isSupported)
319 {
320 return false;
321 }
322
323 armnn::IConnectableLayer* layer = data.m_Network->AddArgMinMaxLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100324 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +0100325 assert(layer != nullptr);
326
327 input0.Connect(layer->GetInputSlot(0));
328
329 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
330}
331
332bool Converter::ConvertAveragePool2d(const Operation& operation, const Model& model, ConversionData& data)
333{
334 VLOG(DRIVER) << "Converter::ConvertAveragePool2d()";
335 return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Average, model, data);
336}
337
Kevin May9636a9b2022-09-21 15:41:41 +0100338bool Converter::ConvertBatchMatMul(const Operation& operation, const Model& model, ConversionData& data)
339{
340 VLOG(DRIVER) << "Converter::ConvertBatchMatMul()";
341 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
342 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
343
344 if (!input0.IsValid() || !input1.IsValid())
345 {
346 return Fail("%s: Operation has invalid inputs", __func__);
347 }
348
349 const armnn::TensorInfo& inputInfo0 = input0.GetTensorInfo();
350 const armnn::TensorInfo& inputInfo1 = input1.GetTensorInfo();
351
352 unsigned int rankInput0 = inputInfo0.GetNumDimensions();
353 if (rankInput0 > 4 || rankInput0 < 2)
354 {
355 Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
356 }
357
358 unsigned int rankInput1 = inputInfo1.GetNumDimensions();
359 if (rankInput1 > 4 || rankInput1 < 2)
360 {
361 Fail("%s: Only inputs with rank at least 2 and up to 4 are supported", __func__);
362 }
363
364 // Determine data type of input tensor 0
365 OperandType input0Type;
366 if (!GetOperandType(operation, 0, model, input0Type))
367 {
368 return Fail("%s: Operation has invalid inputs", __func__);
369 }
370
371 // Determine data type of input tensor 0
372 OperandType input1Type;
373 if (!GetOperandType(operation, 0, model, input1Type))
374 {
375 return Fail("%s: Operation has invalid inputs", __func__);
376 }
377
378 if (input0Type != input1Type)
379 {
380 return Fail("%s: Operation has invalid inputs (Inputs must have same OperandCode)", __func__);
381 }
382
383 const Operand* output = GetOutputOperand(operation, 0, model);
384 if (!output)
385 {
386 return Fail("%s: Could not read output 0", __func__);
387 }
388
389 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
390
391 armnn::BatchMatMulDescriptor batchMatMulDesc;
392
393 // Inputs 2 and 3 are adjoint in Android NeuralNetworks, but they perform transpose.
394 // This is why we are linking them with transpose parameters in the descriptor
395 batchMatMulDesc.m_TransposeX = GetOptionalBool(operation, 2, model, data);
396 batchMatMulDesc.m_TransposeY = GetOptionalBool(operation, 3, model, data);
397
398 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100399 armnn::BackendId setBackend;
Kevin May9636a9b2022-09-21 15:41:41 +0100400 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
401 {
402 FORWARD_LAYER_SUPPORT_FUNC(__func__,
403 IsBatchMatMulSupported,
404 data.m_Backends,
405 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100406 setBackend,
Kevin May9636a9b2022-09-21 15:41:41 +0100407 inputInfo0,
408 inputInfo1,
409 outputInfo,
410 batchMatMulDesc);
411 };
412
413 if(!IsDynamicTensor(outputInfo))
414 {
415 validateFunc(outputInfo, isSupported);
416 }
417 else
418 {
419 isSupported = AreDynamicTensorsSupported();
420 }
421
422
423 if (!isSupported)
424 {
425 return false;
426 }
427
428 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchMatMulLayer(batchMatMulDesc);
Cathal Corbett53837672022-09-01 11:34:37 +0100429 layer->SetBackendId(setBackend);
Kevin May9636a9b2022-09-21 15:41:41 +0100430 assert(layer != nullptr);
431 input0.Connect(layer->GetInputSlot(0));
432 input1.Connect(layer->GetInputSlot(1));
433
434 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
435}
436
Sadik Armagan8f397a12022-06-17 15:38:22 +0100437bool Converter::ConvertBatchToSpaceNd(const Operation& operation, const Model& model, ConversionData& data)
438{
439 VLOG(DRIVER) << "Converter::ConvertBatchToSpaceNd()";
440 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
441 if (!input.IsValid())
442 {
443 return Fail("%s: Operation has invalid inputs", __func__);
444 }
445
446 const Operand* output = GetOutputOperand(operation, 0, model);
447 if (!output)
448 {
449 return Fail("%s: Could not read output 0", __func__);
450 }
451
452 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
453
454 const Operand* blockOperand = GetInputOperand(operation, 1, model);
455 if (!blockOperand)
456 {
457 return Fail("%s: Could not read input 1", __func__);
458 }
459
460 // Convert the block operand to int32
461 std::vector<int32_t> block;
462 if (!GetTensorInt32Values(*blockOperand, block, model, data))
463 {
464 return Fail("%s: Input 1 has invalid values", __func__);
465 }
466
467 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
468
469 unsigned int rank = inputInfo.GetNumDimensions();
470 if (rank != 4)
471 {
472 Fail("%s: Only inputs with rank equal to 4 are supported", __func__);
473 }
474
475 if (std::any_of(block.cbegin(), block.cend(), [](int32_t i){ return i < 1; }))
476 {
477 return Fail("%s: Block sizes for each spatial dimension of the input tensor must be"
478 " greater than or equal to 1", __func__);
479 }
480
481 armnn::BatchToSpaceNdDescriptor batchToSpaceNdDesc;
482 batchToSpaceNdDesc.m_BlockShape.assign(block.cbegin(), block.cend());
483 batchToSpaceNdDesc.m_DataLayout = armnn::DataLayout::NHWC;
484
485 if (Is12OrLaterOperand(*output))
486 {
487 batchToSpaceNdDesc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
488 }
489 // Setting crops to 0,0 0,0 as it is not supported in Android NN API
490 batchToSpaceNdDesc.m_Crops = {{0, 0}, {0, 0}};
491
492 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100493 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +0100494 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
495 {
496 FORWARD_LAYER_SUPPORT_FUNC(__func__,
497 IsBatchToSpaceNdSupported,
498 data.m_Backends,
499 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100500 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +0100501 inputInfo,
502 outputInfo,
503 batchToSpaceNdDesc);
504 };
505
506 if(!IsDynamicTensor(outputInfo))
507 {
508 validateFunc(outputInfo, isSupported);
509 }
510 else
511 {
512 isSupported = AreDynamicTensorsSupported();
513 }
514
515
516 if (!isSupported)
517 {
518 return false;
519 }
520
521 armnn::IConnectableLayer* const layer = data.m_Network->AddBatchToSpaceNdLayer(batchToSpaceNdDesc);
Cathal Corbett53837672022-09-01 11:34:37 +0100522 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +0100523 assert(layer != nullptr);
524 input.Connect(layer->GetInputSlot(0));
525
526 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
527}
528
529bool Converter::ConvertCast(const Operation& operation, const Model& model, ConversionData& data)
530{
531 VLOG(DRIVER) << "Converter::ConvertCast()";
532
533 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
534
535 if (!input.IsValid())
536 {
537 return Fail("%s: Operation has invalid inputs", __func__);
538 }
539
540 const Operand* output = GetOutputOperand(operation, 0, model);
541 if (!output)
542 {
543 return Fail("%s: Could not read output 0", __func__);
544 }
545
546 const TensorInfo& inputInfo = input.GetTensorInfo();
547 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
548
549 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100550 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +0100551 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
552 {
553 FORWARD_LAYER_SUPPORT_FUNC(__func__,
554 IsCastSupported,
555 data.m_Backends,
556 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100557 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +0100558 inputInfo,
559 outputInfo);
560 };
561
562 if(!IsDynamicTensor(outputInfo))
563 {
564 validateFunc(outputInfo, isSupported);
565 }
566 else
567 {
568 isSupported = AreDynamicTensorsSupported();
569 }
570
571 if (!isSupported)
572 {
573 return false;
574 }
575
576 IConnectableLayer* layer = data.m_Network->AddCastLayer();
Cathal Corbett53837672022-09-01 11:34:37 +0100577 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +0100578 assert(layer != nullptr);
579 input.Connect(layer->GetInputSlot(0));
580
581 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
582}
583
584bool Converter::ConvertComparison(const Operation& operation,
585 const Model& model,
586 ConversionData& data,
587 ComparisonOperation comparisonOperation)
588{
589 VLOG(DRIVER) << "Converter::ConvertComparison()";
590 VLOG(DRIVER) << "comparisonOperation = " << GetComparisonOperationAsCString(comparisonOperation);
591
592 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
593 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
594
595 if (!(input0.IsValid() && input1.IsValid()))
596 {
597 return Fail("%s: Operation has invalid inputs", __func__);
598 }
599
600 const Operand* output = GetOutputOperand(operation, 0, model);
601 if (!output)
602 {
603 return Fail("%s: Could not read output 0", __func__);
604 }
605
606 const TensorInfo& inputInfo0 = input0.GetTensorInfo();
607 const TensorInfo& inputInfo1 = input1.GetTensorInfo();
608 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
609
610 ComparisonDescriptor descriptor(comparisonOperation);
611
612 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100613 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +0100614 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
615 {
616 FORWARD_LAYER_SUPPORT_FUNC(__func__,
617 IsComparisonSupported,
618 data.m_Backends,
619 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100620 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +0100621 inputInfo0,
622 inputInfo1,
623 outputInfo,
624 descriptor);
625 };
626
627 if(!IsDynamicTensor(outputInfo))
628 {
629 validateFunc(outputInfo, isSupported);
630 }
631 else
632 {
633 isSupported = AreDynamicTensorsSupported();
634 }
635
636 if (!isSupported)
637 {
638 return false;
639 }
640
641 IConnectableLayer* layer = data.m_Network->AddComparisonLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100642 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +0100643 assert(layer != nullptr);
644
645 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
646 if (!isReshapeSupported)
647 {
648 return false;
649 }
650
651 if(IsDynamicTensor(outputInfo))
652 {
653 input0.Connect(layer->GetInputSlot(0));
654 input1.Connect(layer->GetInputSlot(1));
655 }
656
657 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
658}
659
660
661bool Converter::ConvertConcatenation(const Operation& operation, const Model& model, ConversionData& data)
662{
663 VLOG(DRIVER) << "Converter::ConvertConcatenation()";
664
665 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
666 if (operation.inputs.size() <= 1)
667 {
668 return Fail("%s: Operation has insufficient arguments", __func__);
669 }
670
671 // Get inputs and outputs
672 const std::size_t numInputTensors = operation.inputs.size() - 1;
673
674 int32_t concatDim;
675 if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim, model, data))
676 {
677 return Fail("%s: Operation has invalid inputs", __func__);
678 }
679
680 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
681 if (!outputOperand)
682 {
683 return Fail("%s: Operation has no outputs", __func__);
684 }
685
686 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
687 armnn::TensorShape outputShape = outputInfo.GetShape();
688 const bool isDynamicTensor = IsDynamicTensor(outputInfo);
689 //
690 // handle negative concat dims along the lines of tensorflow as described here:
691 // https://www.tensorflow.org/api_docs/python/tf/concat
692 // "negative axis refers to axis + rank(values)-th dimension"
693 //
694 if (concatDim < 0)
695 {
696 concatDim += outputShape.GetNumDimensions();
697 }
698
699 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
700 {
701 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
702 }
703
704 std::vector<LayerInputHandle> inputHandles;
705 std::vector<armnn::TensorShape> inputShapes;
706
707 inputHandles.reserve(numInputTensors);
708 inputShapes.reserve(numInputTensors);
709
710 bool inputsHaveBeenReshaped = false;
711 unsigned int tensorDimensionsAdded = 0;
712 for (uint32_t i = 0; i < numInputTensors; ++i)
713 {
714 const Operand* operand = GetInputOperand(operation, i, model);
715 if (!operand)
716 {
717 return Fail("%s: Operation has invalid inputs", __func__);
718 }
719
720 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i, model, data);
721 if (!operandInputHandle.IsValid())
722 {
723 return Fail("%s: Operation has invalid inputs", __func__);
724 }
725
726 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
727 if (operandShape.GetNumDimensions() == 0)
728 {
729 return Fail("%s: Operands with rank 0 are not supported", __func__);
730 }
731
732 if (RequiresReshape(operandShape))
733 {
734 inputsHaveBeenReshaped = true;
735
736 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
737
738 // Expand the tensor to three dimensions
739 if (operandShape.GetNumDimensions() == 2)
740 {
741 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
742 tensorDimensionsAdded = 1;
743 }
744 else
745 {
746 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
747 tensorDimensionsAdded = 2;
748 }
749
750 armnn::ReshapeDescriptor reshapeDescriptor;
751 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
752
753 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100754 armnn::BackendId setBackendReshape;
Sadik Armagan8f397a12022-06-17 15:38:22 +0100755 FORWARD_LAYER_SUPPORT_FUNC(__func__,
756 IsReshapeSupported,
757 data.m_Backends,
758 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100759 setBackendReshape,
Sadik Armagan8f397a12022-06-17 15:38:22 +0100760 operandInputHandle.GetTensorInfo(),
761 reshapeInfo,
762 reshapeDescriptor);
763
764 if (!isSupported)
765 {
766 return false;
767 }
768 armnn::IConnectableLayer& newReshape = AddReshapeLayer(*data.m_Network, operandInputHandle, reshapeInfo);
Cathal Corbett53837672022-09-01 11:34:37 +0100769 newReshape.SetBackendId(setBackendReshape);
Sadik Armagan8f397a12022-06-17 15:38:22 +0100770
771 // Point to the reshape operation rather then the input operation
772 operandShape = reshapeInfo.GetShape();
773 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
774 }
775
776 inputShapes.emplace_back(operandShape);
777 inputHandles.emplace_back(operandInputHandle);
778
779 if (!inputHandles.back().IsValid())
780 {
781 return Fail("%s: Operation has invalid inputs", __func__);
782 }
783 }
784
785 ARMNN_ASSERT(inputShapes.size() == inputHandles.size());
786
787 if (inputsHaveBeenReshaped)
788 {
789 // Adjust the concatenation dimension by the amount of dimensions added (if any)
790 concatDim += tensorDimensionsAdded;
791
792 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
793 if (tensorDimensionsAdded == 1)
794 {
795 if (IsDynamicTensor(outputInfo))
796 {
797 outputShape = armnn::TensorShape({1, 0, 0}, {true, false, false});
798 }
799 else
800 {
801 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
802 }
803 }
804 else if (tensorDimensionsAdded == 2)
805 {
806 if (IsDynamicTensor(outputInfo))
807 {
808 outputShape = armnn::TensorShape({1, 1, 0}, {true, true, false});
809 }
810 else
811 {
812 outputShape = armnn::TensorShape({1, 1, outputShape[0]});
813 }
814 }
815 }
816
817 // Check if permutations is required and get the pair of permutations required for the concatenation.
818 // Permutation is required when the concat dimension is 2 for a 4D tensor or 1 for a 3D tensor.
819 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
820 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
821 bool needPermute = CreateConcatPermutationParameters(inputShapes[0].GetNumDimensions(),
822 concatDim,
823 permutationPair);
824
825 // Only relevant to static tensors as dynamic output tensors will be transposed as a result of inferring from input
826 if (!isDynamicTensor)
827 {
828 if (needPermute)
829 {
830 outputShape = armnnUtils::TransposeTensorShape(outputShape, permutationPair.first);
831 }
832
833 outputInfo.SetShape(outputShape);
834 }
835 // this is no-op for identity swizzles, otherwise it replaces both
836 // the handles and shapes with the swizzled layer output handles and shapes
837 if (!TransposeInputTensors(data, inputHandles, inputShapes, permutationPair.first))
838 {
839 return false;
840 }
841
842 // Create an armnn concat layer descriptor - this will also perform validation on the input shapes
843 armnn::OriginsDescriptor concatDescriptor;
844
845 try
846 {
847 // The concat descriptor is always created across the only supported concat dimension
848 // which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
849 concatDescriptor = armnn::CreateDescriptorForConcatenation(inputShapes.begin(),
850 inputShapes.end(),
851 concatDim);
852 } catch (std::exception& error)
853 {
854 return Fail("%s: Error preparing concat descriptor. %s", __func__, error.what());
855 }
856
857 // Validate the output shape is correct given the input shapes based on the
858 // only valid concat dimension which is 0, 1 or 3 for a 4-D tensor, or 0 or 2 for a 3-D tensor.
859 if (!isDynamicTensor)
860 {
861 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
862 {
863 return Fail("%s: Error validating the output shape for concat", __func__);
864 }
865 }
866
867 std::vector<const armnn::TensorInfo*> inputTensorInfos;
868 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
869 [](const LayerInputHandle& h)->const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
870
871 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100872 armnn::BackendId setBackendConcat;
Sadik Armagan8f397a12022-06-17 15:38:22 +0100873 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported){
Cathal Corbett53837672022-09-01 11:34:37 +0100874 FORWARD_LAYER_SUPPORT_FUNC(__func__,
875 IsConcatSupported,
876 data.m_Backends,
877 isSupported,
878 setBackendConcat,
879 inputTensorInfos,
880 outputInfo,
881 concatDescriptor);
Sadik Armagan8f397a12022-06-17 15:38:22 +0100882 };
883
884 if (!isDynamicTensor)
885 {
886 validateFunc(outputInfo, isSupported);
887 }
888 else
889 {
890 isSupported = AreDynamicTensorsSupported();
891 }
892
893 if (!isSupported)
894 {
895 return false;
896 }
897
898 armnn::IConnectableLayer* layer = data.m_Network->AddConcatLayer(concatDescriptor);
Cathal Corbett53837672022-09-01 11:34:37 +0100899 layer->SetBackendId(setBackendConcat);
Sadik Armagan8f397a12022-06-17 15:38:22 +0100900 assert(layer != nullptr);
901 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
902 // Connect inputs to the layer
903 const int numInputSlots = layer->GetNumInputSlots();
904 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
905 for (int i = 0; i < numInputSlots; ++i)
906 {
907 // connect the input directly to the merge (concat) layer
908 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
909 }
910
911 // Transpose the output shape
912 auto transposeOutputShape = [&](){
913 armnn::TransposeDescriptor transposeDesc;
914 transposeDesc.m_DimMappings = permutationPair.second;
915 armnn::TensorInfo inputTransposeInfo = layer->GetOutputSlot(0).GetTensorInfo();
916 armnn::TensorInfo outputTransposeInfo = armnnUtils::TransposeTensorShape(inputTransposeInfo,
917 permutationPair.second);
918 isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100919 armnn::BackendId setBackendTranspose;
Sadik Armagan8f397a12022-06-17 15:38:22 +0100920 FORWARD_LAYER_SUPPORT_FUNC(__func__,
921 IsTransposeSupported,
922 data.m_Backends,
923 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100924 setBackendTranspose,
Sadik Armagan8f397a12022-06-17 15:38:22 +0100925 inputTransposeInfo,
926 outputTransposeInfo,
927 transposeDesc);
928 if (!isSupported)
929 {
930 return false;
931 }
932 // Add permutation layer and connect the output to it, the permutation becomes the output layer
933 armnn::IConnectableLayer& deswizzleLayer = AddTransposeLayer(*data.m_Network, layer->GetOutputSlot(0),
934 permutationPair.second);
Cathal Corbett53837672022-09-01 11:34:37 +0100935 deswizzleLayer.SetBackendId(setBackendTranspose);
Sadik Armagan8f397a12022-06-17 15:38:22 +0100936 layer = &deswizzleLayer;
937
938 return true;
939 };
940
941 if (needPermute && !isDynamicTensor)
942 {
943 transposeOutputShape();
944 }
945
946 if (inputsHaveBeenReshaped)
947 {
948 if (isDynamicTensor)
949 {
950 // Infer the output shapes of concat if outputs are type 1 dynamic
951 ARMNN_ASSERT(layer->GetOutputSlot(0).IsTensorInfoSet());
952 if (!ValidateConcatOutputShape(inputShapes,
953 layer->GetOutputSlot(0).GetTensorInfo().GetShape(),
954 concatDim))
955 {
956 return Fail("%s: Error validating the output shape for concat", __func__);
957 }
958 transposeOutputShape();
959 }
960
961 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
962 // Undo the reshape knowing the amount of dimensions added
963 if (tensorDimensionsAdded == 1)
964 {
965 afterConcatInfo.SetShape(
966 armnn::TensorShape({afterConcatInfo.GetShape()[1], afterConcatInfo.GetShape()[2]}));
967 }
968 else if (tensorDimensionsAdded == 2)
969 {
970 afterConcatInfo.SetShape(armnn::TensorShape({afterConcatInfo.GetShape()[2]}));
971 }
972
973 armnn::ReshapeDescriptor reshapeDescriptor;
974 reshapeDescriptor.m_TargetShape = afterConcatInfo.GetShape();
975 armnn::TensorInfo concatInfo = layer->GetOutputSlot(0).GetTensorInfo();
976
977 isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +0100978 armnn::BackendId setBackendReshape2;
Sadik Armagan8f397a12022-06-17 15:38:22 +0100979 auto validateReshapeFunc = [&](const armnn::TensorInfo& afterConcatInfo, bool& isSupported){
980 FORWARD_LAYER_SUPPORT_FUNC(__func__,
981 IsReshapeSupported,
982 data.m_Backends,
983 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +0100984 setBackendReshape2,
Sadik Armagan8f397a12022-06-17 15:38:22 +0100985 concatInfo,
986 afterConcatInfo,
987 reshapeDescriptor);
988 };
989
990 if (!IsDynamicTensor(afterConcatInfo))
991 {
992 validateReshapeFunc(afterConcatInfo, isSupported);
993 }
994 else
995 {
996 isSupported = AreDynamicTensorsSupported();
997 }
998
999 if (!isSupported)
1000 {
1001 return false;
1002 }
1003 layer = &AddReshapeLayer(*data.m_Network, layer->GetOutputSlot(0), afterConcatInfo);
Cathal Corbett53837672022-09-01 11:34:37 +01001004 layer->SetBackendId(setBackendReshape2);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001005 return SetupAndTrackLayerOutputSlot(operation,
1006 0,
1007 *layer,
1008 model,
1009 data,
1010 nullptr,
1011 validateReshapeFunc);
1012 }
1013
1014 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1015}
1016
1017bool Converter::ConvertConv2d(const Operation& operation, const Model& model, ConversionData& data)
1018{
1019 VLOG(DRIVER) << "Converter::ConvertConv2d()";
1020
1021 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1022 if (!input.IsValid())
1023 {
1024 return Fail("%s: Operation has invalid inputs", __func__);
1025 }
1026
1027 const Operand* output = GetOutputOperand(operation, 0, model);
1028 if (!output)
1029 {
1030 return Fail("%s: Could not read output 0", __func__);
1031 }
1032
1033 const TensorInfo& inputInfo = input.GetTensorInfo();
1034 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1035
1036 Convolution2dDescriptor desc;
1037 desc.m_DataLayout = DataLayout::NHWC;
1038
1039 // Determine whether padding is implicit or explicit
1040 bool implicitPadding = operation.inputs.size() == 7
1041 || (operation.inputs.size() >= 8
1042 && GetInputOperand(operation, 7, model)->type == OperandType::BOOL);
1043
1044 if (implicitPadding)
1045 {
1046 desc.m_DataLayout = OptionalDataLayout(operation, 7, model, data);
1047 }
1048 else if (operation.inputs.size() >= 10)
1049 {
1050 desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
1051 }
1052
1053 const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
1054
1055 // ArmNN does not currently support non-fixed weights or bias
1056 // The NNAPI filter is always OHWI [depth_out, filter_height, filter_width, depth_in] but ArmNN expects the
1057 // filter's height and width indices to match the input's height and width indices so we permute it to OIHW if
1058 // the DataLayout is NCHW
1059
1060 if (!IsWeightsValid(operation, 1, model) && desc.m_DataLayout == DataLayout::NCHW)
1061 {
1062 return Fail("%s: Operation has unsupported weights OperandLifeTime", __func__);
1063 }
1064
1065 LayerInputHandle weightsInput = (desc.m_DataLayout == DataLayout::NCHW)
Sadik Armagan1e276f32022-07-19 12:37:20 +01001066 ? ConvertToLayerInputHandle(operation, 1, model, data, OHWIToOIHW, &input)
1067 : ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001068
1069 if (!weightsInput.IsValid())
1070 {
1071 return Fail("%s: Operation has invalid inputs", __func__);
1072 }
1073
Sadik Armagan1e276f32022-07-19 12:37:20 +01001074 LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
Sadik Armagan8f397a12022-06-17 15:38:22 +01001075 if (!biasInput.IsValid())
1076 {
1077 return Fail("%s: Operation has invalid inputs", __func__);
1078 }
1079
1080 biasInput.SanitizeQuantizationScale(weightsInput, input);
1081 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1082 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1083
1084 ActivationFn activation;
1085 if (implicitPadding)
1086 {
1087 ::android::nn::PaddingScheme paddingScheme;
1088 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1089 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1090 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1091 || !GetInputActivationFunction(operation, 6, activation, model, data)
1092 || !GetOptionalConvolutionDilationParams(operation, 8, desc, model, data))
1093 {
1094 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1095 }
1096
1097 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1098 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1099 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1100 const uint32_t kernelX = weightsInfo.GetShape()[widthIndex];
1101 const uint32_t kernelY = weightsInfo.GetShape()[heightIndex];
1102 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1103 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1104
1105 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1106 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1107
1108 }
1109 else if (operation.inputs.size() >= 10)
1110 {
1111 // explicit padding
1112 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1113 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1114 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1115 || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1116 || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1117 || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1118 || !GetInputActivationFunction(operation, 9, activation, model, data)
1119 || !GetOptionalConvolutionDilationParams(operation, 11, desc, model, data))
1120 {
1121 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1122 }
1123 }
1124 else
1125 {
1126 return Fail("%s: Unsupported number of operation inputs", __func__);
1127 }
1128
1129 desc.m_BiasEnabled = true;
1130 Optional<TensorInfo> biases(biasInfo);
1131
Sadik Armaganb0161572022-08-03 11:27:05 +01001132 bool requiresValidation = true;
1133 const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1134 const Operand* biasOperand = GetInputOperand(operation, 2, model);
1135 if (IsConnectedToDequantize(weightsInput.GetOutputSlot())
1136 || IsConnectedToDequantize(biasInput.GetOutputSlot()))
Sadik Armagan8f397a12022-06-17 15:38:22 +01001137 {
Sadik Armaganb0161572022-08-03 11:27:05 +01001138 // Do not require validation for now. There will be an optimization step
1139 // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1140 // then at the end of the optimization there will be layer supported validation.
1141 requiresValidation = false;
1142 VLOG(DRIVER) << "Converter::ConvertConv2d(): Weights and Biases are as INPUTS.";
1143 }
1144
Cathal Corbett53837672022-09-01 11:34:37 +01001145 armnn::BackendId setBackend;
Sadik Armaganb0161572022-08-03 11:27:05 +01001146 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
Sadik Armagan8f397a12022-06-17 15:38:22 +01001147 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1148 IsConvolution2dSupported,
1149 data.m_Backends,
1150 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001151 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001152 inputInfo,
1153 outputInfo,
1154 desc,
1155 weightsInfo,
1156 biases);
1157 };
1158
Sadik Armaganb0161572022-08-03 11:27:05 +01001159 if (requiresValidation)
Sadik Armagan8f397a12022-06-17 15:38:22 +01001160 {
Sadik Armaganb0161572022-08-03 11:27:05 +01001161 VLOG(DRIVER) << "Converter::ConvertConv2d(): Requires Validation!";
1162 bool isSupported = false;
1163 if (!IsDynamicTensor(outputInfo))
1164 {
1165 validateFunc(outputInfo, isSupported);
1166 }
1167 else
1168 {
1169 isSupported = AreDynamicTensorsSupported();
1170 }
Sadik Armagan8f397a12022-06-17 15:38:22 +01001171
Sadik Armaganb0161572022-08-03 11:27:05 +01001172 if (!isSupported)
1173 {
1174 return false;
1175 }
Sadik Armagan8f397a12022-06-17 15:38:22 +01001176 }
1177
1178 armnn::IConnectableLayer* startLayer = data.m_Network->AddConvolution2dLayer(desc);
Cathal Corbett53837672022-09-01 11:34:37 +01001179 startLayer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001180
1181 if (!startLayer)
1182 {
1183 return Fail("%s: AddConvolution2dLayer failed", __func__);
1184 }
1185
1186 input.Connect(startLayer->GetInputSlot(0));
1187 weightsInput.Connect(startLayer->GetInputSlot(1));
1188 biasInput.Connect(startLayer->GetInputSlot(2));
1189
1190 return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1191}
1192
1193bool Converter::ConvertDepthToSpace(const Operation& operation, const Model& model, ConversionData& data)
1194{
1195 VLOG(DRIVER) << "Converter::ConvertDepthToSpace()";
1196
1197 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1198 if (!input.IsValid() )
1199 {
1200 return Fail("%s: Operation has invalid inputs", __func__);
1201 }
1202
1203 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1204 unsigned int rank = inputInfo.GetNumDimensions();
1205 if (rank != 4)
1206 {
1207 return Fail("%s: Only inputs with rank 4 are supported", __func__);
1208 }
1209
1210 const Operand* output = GetOutputOperand(operation, 0, model);
1211 if (!output)
1212 {
1213 return Fail("%s: Could not read output 0", __func__);
1214 }
1215
1216 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1217
1218 armnn::DepthToSpaceDescriptor descriptor;
1219
1220 GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_BlockSize, model, data);
1221 if (descriptor.m_BlockSize <= 1)
1222 {
1223 return Fail("%s: Block size must be at least 1 in all dimensions");
1224 }
1225
1226 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
1227 if (Is12OrLaterOperand(*output))
1228 {
1229 descriptor.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
1230 }
1231
1232 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01001233 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01001234 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1235 {
1236 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1237 IsDepthToSpaceSupported,
1238 data.m_Backends,
1239 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001240 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001241 inputInfo,
1242 outputInfo,
1243 descriptor);
1244 };
1245
1246 if(!IsDynamicTensor(outputInfo))
1247 {
1248 validateFunc(outputInfo, isSupported);
1249 }
1250 else
1251 {
1252 isSupported = AreDynamicTensorsSupported();
1253 }
1254
1255 if (!isSupported)
1256 {
1257 return false;
1258 }
1259
1260 armnn::IConnectableLayer* const layer = data.m_Network->AddDepthToSpaceLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01001261 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001262 assert(layer != nullptr);
1263 input.Connect(layer->GetInputSlot(0));
1264
1265 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1266}
1267
1268bool Converter::ConvertDepthwiseConv2d(const Operation& operation, const Model& model, ConversionData& data)
1269{
1270 VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d()";
1271
1272 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1273
1274 if (!input.IsValid())
1275 {
1276 return Fail("%s: Operation has invalid inputs", __func__);
1277 }
1278
1279 const Operand* output = GetOutputOperand(operation, 0, model);
1280
1281 if (!output)
1282 {
1283 return Fail("%s: Could not read output 0", __func__);
1284 }
1285
1286 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1287 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1288
1289 // ArmNN does not currently support non-fixed weights or bias
1290 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1291 const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1292
1293 if (!weightsOperand)
1294 {
1295 return Fail("%s: Could not read weights", __func__);
1296 }
1297 // Basic sanity check on the weights shape.
1298 // ANEURALNETWORKS_DEPTHWISE_CONV_2D specifies a 4-D tensor, of shape
1299 // [1, filter_height, filter_width, depth_out]
1300 if (weightsOperand->dimensions[0] != 1)
1301 {
1302 return Fail("%s: Filter operand dimension 0 is invalid, should be 1", __func__);
1303 }
1304
1305 armnn::DepthwiseConvolution2dDescriptor desc;
1306 desc.m_DataLayout = armnn::DataLayout::NHWC;
1307
1308 // Determine whether padding is implicit or explicit
1309 bool implicitPadding = operation.inputs.size() == 8
1310 || (operation.inputs.size() >= 9
1311 && GetInputOperand(operation, 8, model)->type == OperandType::BOOL);
1312
1313 // Look ahead to find the optional DataLayout, if present
1314 const uint32_t dataLayoutFlagIndex = implicitPadding ? 8 : 11;
1315 desc.m_DataLayout = OptionalDataLayout(operation, dataLayoutFlagIndex, model, data);
1316
1317 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1318 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1319 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1320
Sadik Armagan1e276f32022-07-19 12:37:20 +01001321 LayerInputHandle weightsInput = ConvertToLayerInputHandle(operation, 1, model, data, g_DontPermute, &input);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001322 if (!weightsInput.IsValid())
1323 {
1324 return Fail("%s: Operation has invalid inputs", __func__);
1325 }
1326
1327 const Operand* biasOperand = GetInputOperand(operation, 2, model);
1328 if (!biasOperand)
1329 {
1330 return Fail("%s: Could not read bias", __func__);
1331 }
1332
Sadik Armagan1e276f32022-07-19 12:37:20 +01001333 LayerInputHandle biasInput = ConvertToLayerInputHandle(operation, 2, model, data, g_DontPermute, &input); // 1D
Sadik Armagan8f397a12022-06-17 15:38:22 +01001334 if (!biasInput.IsValid())
1335 {
1336 return Fail("%s: Operation has invalid inputs", __func__);
1337 }
1338
1339 biasInput.SanitizeQuantizationScale(weightsInput, input);
1340 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1341 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1342
1343 ActivationFn activation;
1344 if (implicitPadding)
1345 {
1346 ::android::nn::PaddingScheme paddingScheme;
1347 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data)
1348 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data)
1349 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data)
1350 || !GetInputActivationFunction(operation, 7, activation, model, data)
1351 || !GetOptionalConvolutionDilationParams(operation, 9, desc, model, data))
1352 {
1353 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
1354 }
1355
1356 const uint32_t kernelX = weightsInfo.GetShape()[2];
1357 const uint32_t kernelY = weightsInfo.GetShape()[1];
1358 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1359 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1360
1361 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_DilationX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1362 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_DilationY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1363 }
1364 else if (operation.inputs.size() >= 11)
1365 {
1366 // explicit padding
1367 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data)
1368 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data)
1369 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data)
1370 || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data)
1371 || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data)
1372 || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data)
1373 || !GetInputActivationFunction(operation, 10, activation, model, data)
1374 || !GetOptionalConvolutionDilationParams(operation, 12, desc, model, data))
1375 {
1376 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
1377 }
1378 }
1379 else
1380 {
1381 return Fail("%s: Unsupported number of operation inputs", __func__);
1382 }
1383
1384 desc.m_BiasEnabled = true;
1385 Optional<TensorInfo> biases(biasInfo);
1386
Sadik Armaganb0161572022-08-03 11:27:05 +01001387 bool requiresValidation = true;
1388 if (IsConnectedToDequantize(weightsInput.GetOutputSlot()) || IsConnectedToDequantize(biasInput.GetOutputSlot()))
Sadik Armagan8f397a12022-06-17 15:38:22 +01001389 {
Sadik Armaganb0161572022-08-03 11:27:05 +01001390 // Do not require validation for now. There will be an optimization step
1391 // [ConvertConstDequantisationLayersToConstLayers] will convert layers to Constant layers
1392 // then at the end of the optimization there will be layer supported validation.
1393 requiresValidation = false;
1394 VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Weights and Biases are as INPUTS.";
1395 }
1396
Cathal Corbett53837672022-09-01 11:34:37 +01001397 armnn::BackendId setBackend;
Sadik Armaganb0161572022-08-03 11:27:05 +01001398 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported) {
Sadik Armagan8f397a12022-06-17 15:38:22 +01001399 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1400 IsDepthwiseConvolutionSupported,
1401 data.m_Backends,
1402 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001403 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001404 inputInfo,
1405 outputInfo,
1406 desc,
1407 weightsInfo,
1408 biases);
1409 };
1410
Sadik Armaganb0161572022-08-03 11:27:05 +01001411 if (requiresValidation)
Sadik Armagan8f397a12022-06-17 15:38:22 +01001412 {
Sadik Armaganb0161572022-08-03 11:27:05 +01001413 VLOG(DRIVER) << "Converter::ConvertDepthwiseConv2d(): Requires Validation!";
1414 bool isSupported = false;
1415 if (!IsDynamicTensor(outputInfo))
1416 {
1417 validateFunc(outputInfo, isSupported);
1418 }
1419 else
1420 {
1421 isSupported = AreDynamicTensorsSupported();
1422 }
Sadik Armagan8f397a12022-06-17 15:38:22 +01001423
Sadik Armaganb0161572022-08-03 11:27:05 +01001424 if (!isSupported)
1425 {
1426 return false;
1427 }
Sadik Armagan8f397a12022-06-17 15:38:22 +01001428 }
1429
1430 armnn::IConnectableLayer* startLayer = data.m_Network->AddDepthwiseConvolution2dLayer(desc);
Cathal Corbett53837672022-09-01 11:34:37 +01001431 startLayer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001432
1433 if (!startLayer)
1434 {
1435 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1436 }
1437
1438 input.Connect(startLayer->GetInputSlot(0));
1439
1440 // Connect weights and bias inputs
1441 weightsInput.Connect(startLayer->GetInputSlot(1));
1442 biasInput.Connect(startLayer->GetInputSlot(2));
1443
1444 return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model, data, nullptr, validateFunc, activation);
1445}
1446
1447bool Converter::ConvertDequantize(const Operation& operation, const Model& model, ConversionData& data)
1448{
1449 VLOG(DRIVER) << "Converter::ConvertDequantize()";
1450
1451 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1452 if (!input.IsValid())
1453 {
1454 return Fail("%s: Operation has invalid input", __func__);
1455 }
1456
1457 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1458 const armnn::Optional<unsigned int>& quantizationDim = inputInfo.GetQuantizationDim();
1459 if (quantizationDim.has_value() && quantizationDim.value() != 0)
1460 {
1461 return Fail("%s: Operation has quantization dimension different than 0", __func__);
1462 }
1463
1464 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1465 if (!outputOperand)
1466 {
1467 return Fail("%s: Operation has invalid outputs", __func__);
1468 }
1469
1470 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1471
1472 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01001473 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01001474 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1475 {
1476 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1477 IsDequantizeSupported,
1478 data.m_Backends,
1479 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001480 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001481 inputInfo,
1482 outputInfo);
1483 };
1484
1485 if(IsDynamicTensor(outputInfo))
1486 {
1487 isSupported = AreDynamicTensorsSupported();
1488 }
1489 else
1490 {
1491 validateFunc(outputInfo, isSupported);
1492 }
1493
1494 if (!isSupported)
1495 {
1496 return false;
1497 }
1498
1499 armnn::IConnectableLayer* const layer = data.m_Network->AddDequantizeLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01001500 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001501 assert(layer != nullptr);
1502 input.Connect(layer->GetInputSlot(0));
1503
1504 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1505}
1506
1507bool Converter::ConvertDiv(const Operation& operation, const Model& model, ConversionData& data)
1508{
1509 VLOG(DRIVER) << "Converter::ConvertDiv()";
1510
1511 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1512 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
1513
1514 if (!input0.IsValid() || !input1.IsValid())
1515 {
1516 return Fail("%s: Operation has invalid inputs", __func__);
1517 }
1518
1519 // The FuseActivation parameter is always the input index 2
1520 // and it should be optional
1521 ActivationFn activationFunction;
1522 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
1523 {
1524 return Fail("%s: Operation has invalid inputs", __func__);
1525 }
1526
1527 const Operand* output = GetOutputOperand(operation, 0, model);
1528 if (!output)
1529 {
1530 return Fail("%s: Could not read output 0", __func__);
1531 }
1532
1533 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1534
1535 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01001536 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01001537 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1538 {
1539 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1540 IsDivisionSupported,
1541 data.m_Backends,
1542 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001543 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001544 input0.GetTensorInfo(),
1545 input1.GetTensorInfo(),
1546 outputInfo);
1547 };
1548
1549 if(!IsDynamicTensor(outputInfo))
1550 {
1551 validateFunc(outputInfo, isSupported);
1552 }
1553 else
1554 {
1555 isSupported = AreDynamicTensorsSupported();
1556 }
1557
1558 if (!isSupported)
1559 {
1560 return false;
1561 }
1562
1563 armnn::IConnectableLayer* const startLayer = data.m_Network->AddDivisionLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01001564 startLayer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001565
1566 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
1567 if (!isReshapeSupported)
1568 {
1569 return false;
1570 }
1571
1572 return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
1573 data, nullptr, validateFunc, activationFunction);
1574}
1575
1576bool Converter::ConvertElementwiseUnary(const Operation& operation,
1577 const Model& model,
1578 ConversionData& data,
1579 UnaryOperation unaryOperation)
1580{
1581 VLOG(DRIVER) << "Converter::ConvertElementwiseUnary()";
1582 VLOG(DRIVER) << "unaryOperation = " << GetUnaryOperationAsCString(unaryOperation);
1583
1584 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1585
1586 if (!input.IsValid())
1587 {
1588 return Fail("%s: Operation has invalid input", __func__);
1589 }
1590
1591 const Operand* output = GetOutputOperand(operation, 0, model);
1592 if (!output)
1593 {
1594 return Fail("%s: Could not read output 0", __func__);
1595 }
1596
1597 const TensorInfo& inputInfo = input.GetTensorInfo();
1598 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1599
1600 ElementwiseUnaryDescriptor descriptor(unaryOperation);
1601
1602 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01001603 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01001604 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1605 {
1606 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1607 IsElementwiseUnarySupported,
1608 data.m_Backends,
1609 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001610 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001611 inputInfo,
1612 outputInfo,
1613 descriptor);
1614 };
1615
1616 if(!IsDynamicTensor(outputInfo))
1617 {
1618 validateFunc(outputInfo, isSupported);
1619 }
1620 else
1621 {
1622 isSupported = AreDynamicTensorsSupported();
1623 }
1624
1625 if (!isSupported)
1626 {
1627 return false;
1628 }
1629
1630 IConnectableLayer* layer = data.m_Network->AddElementwiseUnaryLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01001631 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001632 assert(layer != nullptr);
1633 input.Connect(layer->GetInputSlot(0));
1634
1635 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1636}
1637
1638bool Converter::ConvertElu(const Operation& operation, const Model& model, ConversionData& data)
1639{
1640 VLOG(DRIVER) << "Converter::ConvertElu()";
1641
1642 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
1643 if (!input0.IsValid())
1644 {
1645 return Fail("%s: Operation has invalid inputs", __func__);
1646 }
1647
1648 // Determine data type of input tensor
1649 OperandType inputType;
1650 if (!GetOperandType(operation, 0, model, inputType))
1651 {
1652 return Fail("%s: Operation has invalid inputs", __func__);
1653 }
1654
1655 ActivationDescriptor desc;
1656 desc.m_Function = ActivationFunction::Elu;
1657
1658 // Read alpha
1659 if (inputType == OperandType::TENSOR_FLOAT16)
1660 {
1661 Half alpha;
1662
1663 if (!GetInputScalar(operation, 1, OperandType::FLOAT16, alpha, model, data))
1664 {
1665 return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
1666 }
1667
1668 desc.m_A = static_cast<float>(alpha);
1669 }
1670 else if (inputType == OperandType::TENSOR_FLOAT32)
1671 {
1672 if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_A, model, data))
1673 {
1674 return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
1675 }
1676 }
1677 else
1678 {
1679 return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
1680 }
1681
1682 return ::ConvertToActivation(operation, __func__, desc, model, data);
1683}
1684
1685bool Converter::ConvertExpandDims(const Operation& operation, const Model& model, ConversionData& data)
1686{
1687 VLOG(DRIVER) << "Converter::ConvertExpandDims()";
1688
1689 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1690
1691 if (!input.IsValid())
1692 {
1693 return Fail("%s: Operation has invalid input", __func__);
1694 }
1695
1696 const Operand* output = GetOutputOperand(operation, 0, model);
1697 if (!output)
1698 {
1699 return Fail("%s: Operation has invalid output", __func__);
1700 }
1701
1702 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1703
1704 int32_t axis;
1705 if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
1706 {
1707 return Fail("%s: failed to get axis input value", __func__);
1708 }
1709
1710 TensorShape targetShape;
1711
1712 try
1713 {
1714 targetShape = armnnUtils::ExpandDims(input.GetTensorInfo().GetShape(), axis);
1715 }
1716 catch (const std::exception& e)
1717 {
1718 return Fail("%s: %s", __func__, e.what());
1719 }
1720
1721 ReshapeDescriptor reshapeDescriptor;
1722 reshapeDescriptor.m_TargetShape = targetShape;
1723
1724 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01001725 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01001726 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1727 {
1728 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1729 IsReshapeSupported,
1730 data.m_Backends,
1731 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001732 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001733 input.GetTensorInfo(),
1734 outputInfo,
1735 reshapeDescriptor);
1736 };
1737
1738 if(!IsDynamicTensor(outputInfo))
1739 {
1740 if (targetShape != outputInfo.GetShape())
1741 {
1742 return Fail("%s: Shape of the output operand does not match the resolved expanded shape", __func__);
1743 }
1744 validateFunc(outputInfo, isSupported);
1745 }
1746 else
1747 {
1748 isSupported = AreDynamicTensorsSupported();
1749 }
1750
1751 if (!isSupported)
1752 {
1753 return false;
1754 }
1755
1756 IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01001757 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001758 assert(layer != nullptr);
1759 input.Connect(layer->GetInputSlot(0));
1760
1761 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1762}
1763
1764bool Converter::ConvertFill(const Operation& operation, const Model& model, ConversionData& data)
1765{
1766 VLOG(DRIVER) << "Converter::ConvertFill()";
1767 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1768 if (!input.IsValid())
1769 {
1770 return Fail("%s: Operation has invalid inputs", __func__);
1771 }
1772
1773 const Operand* output = GetOutputOperand(operation, 0, model);
1774 if (!output)
1775 {
1776 return Fail("%s: Could not read output", __func__);
1777 }
1778
1779 const TensorInfo& inputInfo = input.GetTensorInfo();
1780 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1781 if (IsDynamicTensor(outputInfo))
1782 {
1783 return Fail("%s: Dynamic output tensors are not supported", __func__);
1784 }
1785
1786 // Determine data type of output tensor
1787 OperandType outputType = output->type;
1788 FillDescriptor descriptor;
1789 // Read the scalar fill value
1790 if (outputType == OperandType::TENSOR_FLOAT16)
1791 {
1792 Half value;
1793
1794 if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
1795 {
1796 return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1797 }
1798
1799 descriptor.m_Value = static_cast<float>(value);
1800 }
1801 else if (outputType == OperandType::TENSOR_FLOAT32)
1802 {
1803 if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Value, model, data))
1804 {
1805 return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1806 }
1807 }
1808 else if (outputType == OperandType::TENSOR_INT32)
1809 {
1810 int32_t value;
1811
1812 if (!GetInputScalar(operation, 1, OperandType::INT32, value, model, data))
1813 {
1814 return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
1815 }
1816
1817 descriptor.m_Value = static_cast<float>(value);
1818 }
1819 else
1820 {
1821 return Fail("%s: Unsupported input tensor type: %d", __func__, outputType);
1822 }
1823
1824 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01001825 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01001826 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1827 IsFillSupported,
1828 data.m_Backends,
1829 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001830 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001831 inputInfo,
1832 outputInfo,
1833 descriptor);
1834 if (!isSupported)
1835 {
1836 return false;
1837 }
1838
1839 IConnectableLayer* const layer = data.m_Network->AddFillLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01001840 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001841 assert(layer != nullptr);
1842 input.Connect(layer->GetInputSlot(0));
1843
1844 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
1845}
1846
1847bool Converter::ConvertFloor(const Operation& operation, const Model& model, ConversionData& data)
1848{
1849 VLOG(DRIVER) << "Converter::ConvertFloor()";
1850 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1851 if (!input.IsValid())
1852 {
1853 return Fail("%s: Operation has invalid inputs", __func__);
1854 }
1855
1856 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
1857 if (!outputOperand)
1858 {
1859 return Fail("%s: Operation has invalid outputs", __func__);
1860 }
1861
1862 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
1863
1864 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01001865 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01001866 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1867 {
1868 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1869 IsFloorSupported,
1870 data.m_Backends,
1871 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001872 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001873 input.GetTensorInfo(),
1874 outputInfo);
1875 };
1876
1877 if(!IsDynamicTensor(outputInfo))
1878 {
1879 validateFunc(outputInfo, isSupported);
1880 }
1881 else
1882 {
1883 isSupported = AreDynamicTensorsSupported();
1884 }
1885
1886 if (!isSupported)
1887 {
1888 return false;
1889 }
1890
1891 armnn::IConnectableLayer* layer = data.m_Network->AddFloorLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01001892 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01001893 assert(layer != nullptr);
1894 input.Connect(layer->GetInputSlot(0));
1895
1896 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
1897}
1898
1899bool Converter::ConvertFullyConnected(const Operation& operation, const Model& model, ConversionData& data)
1900{
1901 VLOG(DRIVER) << "Converter::ConvertFullyConnected()";
1902 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
1903 if (!input.IsValid())
1904 {
1905 return Fail("%s: Operation has invalid inputs", __func__);
1906 }
1907
1908 const Operand* output = GetOutputOperand(operation, 0, model);
1909 if (!output)
1910 {
1911 return Fail("%s: Could not read output 0", __func__);
1912 }
1913
1914 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1915 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1916
1917 LayerInputHandle weightsInput = LayerInputHandle();
1918 const Operand* weightsOperand = GetInputOperand(operation, 1, model);
1919 if (!weightsOperand)
1920 {
1921 return Fail("%s: Could not read weights", __func__);
1922 }
1923
1924 // If weights are constant a separate constant layer will be created to store data.
1925 // Otherwise handle non const weights as inputs.
1926 weightsInput = ConvertToLayerInputHandle(operation, 1, model, data);
1927 if (!weightsInput.IsValid())
1928 {
1929 return Fail("%s: Operation has invalid inputs", __func__);
1930 }
1931
1932 LayerInputHandle biasInput = LayerInputHandle();
1933 const Operand* biasOperand = GetInputOperand(operation, 2, model);
1934 if (!biasOperand)
1935 {
1936 return Fail("%s: Could not read bias", __func__);
1937 }
1938
1939 // If bias are constant a separate constant layer will be created to store data.
1940 // Otherwise handle non const bias as inputs.
1941 biasInput = ConvertToLayerInputHandle(operation, 2, model, data); // 1D
1942 if (!biasInput.IsValid())
1943 {
1944 return Fail("%s: Operation has invalid inputs", __func__);
1945 }
1946
1947 armnn::TensorInfo weightsInfo = weightsInput.GetTensorInfo();
1948 armnn::TensorInfo reshapedInfo = inputInfo;
1949 try
1950 {
1951 reshapedInfo.SetShape(FlattenFullyConnectedInput(inputInfo.GetShape(), weightsInfo.GetShape()));
1952 }
1953 catch (const std::exception& e)
1954 {
1955 return Fail("%s: %s", __func__, e.what());
1956 }
1957
1958 // Ensuring that the bias value is within 1% of the weights input (small float differences can exist)
1959 armnn::TensorInfo biasInfo = biasInput.GetTensorInfo();
1960 SanitizeBiasQuantizationScale(biasInfo, weightsInfo, reshapedInfo);
1961
1962 ActivationFn activationFunction;
1963 if (!GetInputActivationFunction(operation, 3, activationFunction, model, data))
1964 {
1965 return Fail("%s: Operation has invalid inputs", __func__);
1966 }
1967
1968 armnn::FullyConnectedDescriptor desc;
1969 desc.m_TransposeWeightMatrix = true;
1970 desc.m_BiasEnabled = true;
1971 desc.m_ConstantWeights = IsOperandConstant(*weightsOperand);
1972
1973 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01001974 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01001975 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
1976 {
1977 if (!VerifyFullyConnectedShapes(reshapedInfo.GetShape(),
1978 weightsInfo.GetShape(),
1979 outputInfo.GetShape(),
1980 desc.m_TransposeWeightMatrix))
1981 {
1982 isSupported = false;
1983 Fail("%s: Expected outputShape does not match actual outputShape", __func__);
1984 return;
1985 }
1986
1987 FORWARD_LAYER_SUPPORT_FUNC(__func__,
1988 IsFullyConnectedSupported,
1989 data.m_Backends,
1990 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01001991 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01001992 reshapedInfo,
1993 outputInfo,
1994 weightsInfo,
1995 biasInfo,
1996 desc);
1997 };
1998
1999 if(!IsDynamicTensor(outputInfo))
2000 {
2001 validateFunc(outputInfo, isSupported);
2002 }
2003 else
2004 {
2005 isSupported = AreDynamicTensorsSupported();
2006 }
2007
2008 if (!isSupported)
2009 {
2010 return false;
2011 }
2012
2013 // Add FullyConnected layer. Weights and bias will be connected as constant layers or non const inputs.
2014 armnn::IConnectableLayer* startLayer = data.m_Network->AddFullyConnectedLayer(desc);
Cathal Corbett53837672022-09-01 11:34:37 +01002015 startLayer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002016
2017 if (inputInfo.GetNumDimensions() > 2U)
2018 {
2019 armnn::ReshapeDescriptor reshapeDescriptor;
2020 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
2021
2022 armnn::IConnectableLayer* reshapeLayer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
2023 assert(reshapeLayer != nullptr);
2024 input.Connect(reshapeLayer->GetInputSlot(0));
2025 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
2026 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
2027 }
2028 else
2029 {
2030 input.Connect(startLayer->GetInputSlot(0));
2031 }
2032
2033 // Connect weights and bias inputs
2034 weightsInput.Connect(startLayer->GetInputSlot(1));
2035 biasInput.Connect(startLayer->GetInputSlot(2));
2036
2037 return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
2038 data, nullptr, validateFunc, activationFunction);
2039}
2040
2041bool Converter::ConvertGather(const Operation& operation, const Model& model, ConversionData& data)
2042{
2043 VLOG(DRIVER) << "Converter::ConvertGather()";
2044
2045 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2046 if (!input.IsValid())
2047 {
2048 return Fail("%s: Operation has invalid input", __func__);
2049 }
2050 auto inputDimensions = input.GetTensorInfo().GetNumDimensions();
2051
2052 LayerInputHandle indices = ConvertToLayerInputHandle(operation, 2, model, data);
2053 if (!indices.IsValid())
2054 {
2055 return Fail("%s: Operation has invalid indices", __func__);
2056 }
2057 auto indicesDimensions = indices.GetTensorInfo().GetNumDimensions();
2058
2059 const Operand* output = GetOutputOperand(operation, 0, model);
2060 if (!output)
2061 {
2062 return Fail("%s: Operation has invalid output", __func__);
2063 }
2064 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2065 auto outputDimensions = outputInfo.GetNumDimensions();
2066 if (outputDimensions != inputDimensions + indicesDimensions - 1)
2067 {
2068 return Fail("%s: Operation has invalid output dimensions: %d. Output must be an (%d + %d - 1)-D tensor",
2069 __func__, outputDimensions, inputDimensions, indicesDimensions);
2070 }
2071
2072 int32_t axis;
2073 if (!GetInputScalar(operation, 1, OperandType::INT32, axis, model, data))
2074 {
2075 return Fail("%s: Operation has invalid or unsupported axis operand", __func__);
2076 }
2077 if (((axis < -inputDimensions) && (axis < 0)) || ((axis >= inputDimensions) && (axis > 0)))
2078 {
2079 return Fail("%s: Operation has invalid axis: %d. It is out of bounds [-%d, %d))", __func__, axis,
2080 inputDimensions, inputDimensions);
2081 }
2082
2083 GatherDescriptor desc;
2084 desc.m_Axis = axis;
2085
2086 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01002087 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01002088 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2089 {
2090 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2091 IsGatherSupported,
2092 data.m_Backends,
2093 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01002094 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01002095 input.GetTensorInfo(),
2096 indices.GetTensorInfo(),
2097 outputInfo,
2098 desc);
2099 };
2100
2101 if(!IsDynamicTensor(outputInfo))
2102 {
2103 validateFunc(outputInfo, isSupported);
2104 }
2105 else
2106 {
2107 isSupported = AreDynamicTensorsSupported();
2108 }
2109
2110 if (!isSupported)
2111 {
2112 return false;
2113 }
2114
2115 IConnectableLayer* layer = data.m_Network->AddGatherLayer(desc);
Cathal Corbett53837672022-09-01 11:34:37 +01002116 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002117 assert(layer != nullptr);
2118 input.Connect(layer->GetInputSlot(0));
2119 indices.Connect(layer->GetInputSlot(1));
2120
2121 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2122}
2123
2124bool Converter::ConvertGroupedConv2d(const Operation& operation, const Model& model, ConversionData& data)
2125{
2126 VLOG(DRIVER) << "Converter::ConvertGroupedConv2d()";
2127 //
2128 // Parse data
2129 //
2130 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2131 if (!input.IsValid())
2132 {
2133 return Fail("%s: Operation has invalid inputs", __func__);
2134 }
2135 const TensorInfo& inputInfo = input.GetTensorInfo();
2136
2137 const Operand* output = GetOutputOperand(operation, 0, model);
2138 if (!output)
2139 {
2140 return Fail("%s: Could not read output 0", __func__);
2141 }
2142 TensorInfo outputInfo = GetTensorInfoForOperand(*output);
2143
2144 // Look ahead to determine data layout
2145 DataLayout dataLayout = DataLayout::NHWC;
2146 if (operation.inputs.size() == 12)
2147 {
2148 dataLayout = OptionalDataLayout(operation, 11, model, data);
2149 }
2150 else
2151 {
2152 dataLayout = OptionalDataLayout(operation, 8, model, data);
2153 }
2154
2155 // NOTE:
2156 // NNAPI weights are always OHWI, i.e. [depth_out, filter_height, filter_width, depth_group],
2157 // but Arm NN expects the filter's height and width indices to match the input's height and
2158 // width indices so when the DataLayout is NCHW, we need to permute the weights to OIHW
2159 const PermutationVector ohwiToOihw = { 0u, 2u, 3u, 1u };
2160 const ConstTensorPin weightsPin = (dataLayout == DataLayout::NCHW) ?
2161 ConvertOperationInputToConstTensorPin(operation, 1,
2162 model, data, ohwiToOihw) :
2163 ConvertOperationInputToConstTensorPin(operation, 1, model, data);
2164 const ConstTensorPin biasesPin =
2165 ConvertOperationInputToConstTensorPin(operation, 2, model, data);
2166 if (!weightsPin.IsValid() || !biasesPin.IsValid())
2167 {
2168 return Fail("%s: Operation has invalid inputs", __func__);
2169 }
2170
2171 ConstTensor weights = weightsPin.GetConstTensor();
2172 ConstTensor biases = biasesPin.GetConstTensor();
2173 SanitizeBiasQuantizationScale(biases.GetInfo(), weights.GetInfo(), inputInfo);
2174
2175 const TensorShape& inputShape = inputInfo.GetShape();
2176 const TensorShape& outputShape = outputInfo.GetShape();
2177 const TensorShape& weightsShape = weights.GetShape();
2178 const TensorShape& biasesShape = biases.GetShape();
2179
2180 armnnUtils::DataLayoutIndexed dataLayoutIndexed(dataLayout);
2181 const unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
2182 const unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
2183 const unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
2184
2185 Convolution2dDescriptor desc;
2186 desc.m_DataLayout = dataLayout;
2187 desc.m_BiasEnabled = true;
2188
2189 int numGroups;
2190 ActivationFn activation;
2191
2192 if (operation.inputs.size() == 12)
2193 {
2194 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
2195 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
2196 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
2197 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
2198 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
2199 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
2200 !GetInputScalar(operation, 9, OperandType::INT32, numGroups, model, data) ||
2201 !GetInputActivationFunction(operation, 10, activation, model, data))
2202 {
2203 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
2204 }
2205
2206 }
2207 else if (operation.inputs.size() == 9)
2208 {
2209 ::android::nn::PaddingScheme paddingScheme;
2210 if (!GetInputPaddingScheme(operation, 3, paddingScheme, model, data) ||
2211 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX, model, data) ||
2212 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY, model, data) ||
2213 !GetInputScalar(operation, 6, OperandType::INT32, numGroups, model, data) ||
2214 !GetInputActivationFunction(operation, 7, activation, model, data))
2215 {
2216 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
2217 }
2218
2219 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
2220 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
2221
2222 const uint32_t kernelX = weightsShape[widthIndex];
2223 const uint32_t kernelY = weightsShape[heightIndex];
2224
2225 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
2226 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
2227 }
2228 else
2229 {
2230 return Fail("%s: Unsupported number of operation inputs", __func__);
2231 }
2232
2233 // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2234 const unsigned int outputChannels = weightsShape[0];
2235
2236 const unsigned int channelsPerGroup = weightsShape[channelsIndex];
2237 const unsigned int channelMultiplier = outputChannels / numGroups;
2238
2239 //
2240 // Validate all relevant inputs
2241 //
2242 if (numGroups <= 0)
2243 {
2244 return Fail("%s: Number of groups must be greater than 0. Got: %d", __func__, numGroups);
2245 }
2246
2247 if (outputChannels % numGroups != 0u)
2248 {
2249 return Fail("%s: Output channels must be divisible by the number of groups", __func__);
2250 }
2251
2252 //
2253 // Set up Splitter layer
2254 //
2255 unsigned int splitterDimSizes[4] = { inputShape[0], inputShape[1], inputShape[2], inputShape[3] };
2256 splitterDimSizes[channelsIndex] /= numGroups; // split in depth
2257
2258 TensorInfo splitterOutputInfo(4,
2259 splitterDimSizes,
2260 inputInfo.GetDataType(),
2261 inputInfo.GetQuantizationScale(),
2262 inputInfo.GetQuantizationOffset());
2263
2264 std::vector<std::reference_wrapper<TensorInfo>> splitterOutputInfos(numGroups, std::ref(splitterOutputInfo));
2265
2266 ViewsDescriptor splitterDesc(numGroups);
2267 for (unsigned int group = 0u; group < numGroups; ++group)
2268 {
2269 splitterDesc.SetViewOriginCoord(group, channelsIndex, splitterDimSizes[channelsIndex] * group);
2270 for (unsigned int dimIdx = 0u; dimIdx < 4u; dimIdx++)
2271 {
2272 splitterDesc.SetViewSize(group, dimIdx, splitterDimSizes[dimIdx]);
2273 }
2274 }
2275
2276 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01002277 armnn::BackendId setBackendSplit;
Sadik Armagan8f397a12022-06-17 15:38:22 +01002278 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2279 IsSplitterSupported,
2280 data.m_Backends,
2281 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01002282 setBackendSplit,
Sadik Armagan8f397a12022-06-17 15:38:22 +01002283 inputInfo,
2284 splitterOutputInfos,
2285 splitterDesc);
2286 if (!isSupported)
2287 {
2288 return false;
2289 }
2290
2291 IConnectableLayer* splitterLayer = data.m_Network->AddSplitterLayer(splitterDesc);
Cathal Corbett53837672022-09-01 11:34:37 +01002292 splitterLayer->SetBackendId(setBackendSplit);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002293 if (!splitterLayer)
2294 {
2295 return Fail("%s: Failed to add SplitterLayer", __func__);
2296 }
2297
2298 input.Connect(splitterLayer->GetInputSlot(0));
2299 for (unsigned int group = 0u; group < splitterLayer->GetNumOutputSlots(); ++group)
2300 {
2301 splitterLayer->GetOutputSlot(group).SetTensorInfo(splitterOutputInfo);
2302 }
2303
2304 //
2305 // Set up Convolution2d layers for each group
2306 //
2307
2308 // Set up group tensor shapes
2309 TensorShape groupInputShape(inputShape);
2310 groupInputShape[channelsIndex] = channelsPerGroup;
2311
2312 TensorShape groupWeightsShape(weightsShape);
2313 groupWeightsShape[0] /= channelMultiplier * numGroups;
2314
2315 TensorShape groupBiasesShape({ 1 });
2316
2317 // Set up group tensor infos
2318 TensorInfo groupInputInfo(inputInfo);
2319 groupInputInfo.SetShape(groupInputShape);
2320
2321 const TensorInfo& weightsInfo = weights.GetInfo();
2322 TensorInfo groupWeightsInfo(weightsInfo);
2323 groupWeightsInfo.SetShape(groupWeightsShape);
2324
2325 const TensorInfo& biasesInfo = biases.GetInfo();
2326 TensorInfo groupBiasesInfo(biasesInfo);
2327 groupBiasesInfo.SetShape(groupBiasesShape);
2328
2329 TensorInfo groupOutputInfo(outputInfo);
2330
2331 TensorShape groupOutputShape(outputShape);
2332 const bool isDynamic = IsDynamicTensor(outputInfo);
2333 if (!isDynamic)
2334 {
2335 groupOutputShape[channelsIndex] = 1;
2336 }
2337 groupOutputInfo.SetShape(groupOutputShape);
2338
2339 const unsigned int weightsDataTypeSize = GetDataTypeSize(groupWeightsInfo.GetDataType());
2340 const unsigned int biasesDataTypeSize = GetDataTypeSize(groupBiasesInfo.GetDataType());
2341
2342 std::vector<IConnectableLayer*> convLayers(numGroups * channelMultiplier, nullptr);
2343 for (unsigned int group = 0u; group < numGroups; ++group)
2344 {
2345 for (unsigned int m = 0u; m < channelMultiplier; ++m)
2346 {
2347 auto index = group * channelMultiplier + m;
2348
2349 const unsigned int weightsDataOffset = groupWeightsShape.GetNumElements() * index * weightsDataTypeSize;
2350 const unsigned int biasesDataOffset = groupBiasesShape.GetNumElements() * index * biasesDataTypeSize;
2351
2352 if (weightsInfo.HasPerAxisQuantization())
2353 {
2354 // Extract per-axis quantization scales for group weights
2355 const std::vector<float>& weightsQuantScales = weightsInfo.GetQuantizationScales();
2356 groupWeightsInfo.SetQuantizationScales(
2357 std::vector<float>(weightsQuantScales.begin() + index,
2358 weightsQuantScales.begin() + index + groupWeightsShape[0]));
2359
2360 // Extract per-axis quantization scales for group biases
2361 const std::vector<float>& biasesQuantScales = biasesInfo.GetQuantizationScales();
2362 groupBiasesInfo.SetQuantizationScales(
2363 std::vector<float>(biasesQuantScales.begin() + index,
2364 biasesQuantScales.begin() + index + groupWeightsShape[0]));
2365 }
2366
2367 // Extract weights and biases data for current group convolution
2368 ConstTensor groupWeights(groupWeightsInfo,
2369 static_cast<const void *>(reinterpret_cast<const char *>(weights.GetMemoryArea()) +
2370 weightsDataOffset));
2371 ConstTensor groupBiases(groupBiasesInfo,
2372 static_cast<const void *>(reinterpret_cast<const char *>(biases.GetMemoryArea()) +
2373 biasesDataOffset));
2374
2375 isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01002376 armnn::BackendId setBackendConv;
Sadik Armagan8f397a12022-06-17 15:38:22 +01002377 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2378 {
2379 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2380 IsConvolution2dSupported,
2381 data.m_Backends,
2382 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01002383 setBackendConv,
Sadik Armagan8f397a12022-06-17 15:38:22 +01002384 groupInputInfo,
2385 outputInfo,
2386 desc,
2387 groupWeightsInfo,
2388 Optional<TensorInfo>(groupBiasesInfo));
2389 };
2390
2391 if(!isDynamic)
2392 {
2393 validateFunc(groupOutputInfo, isSupported);
2394 }
2395 else
2396 {
2397 isSupported = AreDynamicTensorsSupported();
2398 }
2399
2400 if (!isSupported)
2401 {
2402 return false;
2403 }
Teresa Charlind9360332022-08-30 14:27:10 +01002404
2405 IConnectableLayer* weightsLayer = data.m_Network->AddConstantLayer(groupWeights);
2406 IConnectableLayer* biasLayer = data.m_Network->AddConstantLayer(groupBiases);
2407 IConnectableLayer* convLayer = data.m_Network->AddConvolution2dLayer(desc);
2408
Cathal Corbett53837672022-09-01 11:34:37 +01002409 convLayer->SetBackendId(setBackendConv);
2410
Sadik Armagan8f397a12022-06-17 15:38:22 +01002411 if (!convLayer)
2412 {
2413 return Fail("%s: AddConvolution2dLayer failed", __func__);
2414 }
2415
2416 splitterLayer->GetOutputSlot(group).Connect(convLayer->GetInputSlot(0));
Teresa Charlind9360332022-08-30 14:27:10 +01002417 weightsLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(1));
2418 biasLayer->GetOutputSlot(0).Connect(convLayer->GetInputSlot(2));
2419
2420 weightsLayer->GetOutputSlot(0).SetTensorInfo(groupWeightsInfo);
2421 biasLayer->GetOutputSlot(0).SetTensorInfo(groupBiasesInfo);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002422 convLayer->GetOutputSlot(0).SetTensorInfo(groupOutputInfo);
2423
2424 if(isDynamic)
2425 {
2426 convLayer->GetOutputSlot(0).IsTensorInfoSet();
2427
2428 validateFunc(convLayer->GetOutputSlot(0).GetTensorInfo(), isSupported);
2429
2430 outputInfo = convLayer->GetOutputSlot(0).GetTensorInfo();
2431
2432 if (!isSupported)
2433 {
2434 return false;
2435 }
2436 }
2437
2438 convLayers[index] = convLayer;
2439 }
2440 }
2441
2442 //
2443 // Set up Concat layer
2444 //
2445 ConcatDescriptor concatDescriptor;
2446 // Equivalent to outputShape[channelsIndex], but we can't know the outputShape in the case of dynamic tensors
2447 concatDescriptor = ConcatDescriptor(weightsShape[0]);
2448 for (unsigned int group = 0u; group < numGroups; ++group)
2449 {
2450 for (unsigned int m = 0u; m < channelMultiplier; ++m)
2451 {
2452 auto index = group * channelMultiplier + m;
2453 concatDescriptor.SetViewOriginCoord(index, channelsIndex, index);
2454 concatDescriptor.SetConcatAxis(channelsIndex);
2455 }
2456 }
2457
2458 isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01002459 armnn::BackendId setBackendConcat;
Sadik Armagan8f397a12022-06-17 15:38:22 +01002460 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2461 IsConcatSupported,
2462 data.m_Backends,
2463 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01002464 setBackendConcat,
Sadik Armagan8f397a12022-06-17 15:38:22 +01002465 std::vector<const TensorInfo*>(numGroups * channelMultiplier, &groupOutputInfo),
2466 outputInfo,
2467 concatDescriptor);
2468
2469 if (!isSupported)
2470 {
2471 return false;
2472 }
2473
2474 IConnectableLayer* concatLayer = data.m_Network->AddConcatLayer(concatDescriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01002475 concatLayer->SetBackendId(setBackendConcat);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002476 if (!concatLayer)
2477 {
2478 return Fail("%s: AddConcatLayer failed", __func__);
2479 }
2480
2481 for (unsigned int group = 0u; group < numGroups; ++group)
2482 {
2483 for (unsigned int m = 0u; m < channelMultiplier; ++m)
2484 {
2485 auto index = group * channelMultiplier + m;
2486 convLayers[index]->GetOutputSlot(0).Connect(concatLayer->GetInputSlot(index));
2487 }
2488 }
2489 concatLayer->GetOutputSlot(0).SetTensorInfo(outputInfo);
2490
2491 return SetupAndTrackLayerOutputSlot(operation, 0, *concatLayer, model,
2492 data, nullptr, nullptr, activation);
2493}
2494
2495bool Converter::ConvertHardSwish(const Operation& operation, const Model& model, ConversionData& data)
2496{
2497 VLOG(DRIVER) << "Converter::ConvertHardSwish()";
2498 ActivationDescriptor desc;
2499 desc.m_Function = ActivationFunction::HardSwish;
2500
2501 return ::ConvertToActivation(operation, __func__, desc, model, data);
2502}
2503
2504bool Converter::ConvertInstanceNormalization(const Operation& operation, const Model& model, ConversionData& data)
2505{
2506 VLOG(DRIVER) << "Converter::ConvertInstanceNormalization()";
2507
2508 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2509 if (!input.IsValid())
2510 {
2511 return Fail("%s: Operation has an invalid input 0", __func__);
2512 }
2513
2514 const Operand* output = GetOutputOperand(operation, 0, model);
2515 if (!output)
2516 {
2517 return Fail("%s: Operation has an invalid output", __func__);
2518 }
2519
2520 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2521
2522 // Determine data type of input tensor
2523 OperandType inputType;
2524 if (!GetOperandType(operation, 0, model, inputType))
2525 {
2526 return Fail("%s: Operation has invalid inputs", __func__);
2527 }
2528
2529 InstanceNormalizationDescriptor desc;
2530
2531 // Read gamma, beta & epsilon
2532 if (inputType == OperandType::TENSOR_FLOAT16)
2533 {
2534 Half fp16Gamma;
2535 Half fp16Beta;
2536 Half fp16Epsilon;
2537
2538 if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Gamma, model, data) ||
2539 !GetInputScalar(operation, 2, OperandType::FLOAT16, fp16Beta, model, data) ||
2540 !GetInputScalar(operation, 3, OperandType::FLOAT16, fp16Epsilon, model, data))
2541 {
2542 return Fail("%s: Operation has invalid inputs (FLOAT16)", __func__);
2543 }
2544
2545 desc.m_Gamma = static_cast<float>(fp16Gamma);
2546 desc.m_Beta = static_cast<float>(fp16Beta);
2547 desc.m_Eps = static_cast<float>(fp16Epsilon);
2548 }
2549 else if (inputType == OperandType::TENSOR_FLOAT32)
2550 {
2551 if (!GetInputScalar(operation, 1, OperandType::FLOAT32, desc.m_Gamma, model, data) ||
2552 !GetInputScalar(operation, 2, OperandType::FLOAT32, desc.m_Beta, model, data) ||
2553 !GetInputScalar(operation, 3, OperandType::FLOAT32, desc.m_Eps, model, data))
2554 {
2555 return Fail("%s: Operation has invalid inputs (FLOAT32)", __func__);
2556 }
2557 }
2558 else
2559 {
2560 return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2561 }
2562
2563 desc.m_DataLayout = OptionalDataLayout(operation, 4, model, data);
2564
2565 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01002566 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01002567 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2568 {
2569 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2570 IsInstanceNormalizationSupported,
2571 data.m_Backends,
2572 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01002573 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01002574 input.GetTensorInfo(),
2575 outputInfo,
2576 desc);
2577 };
2578
2579 if(IsDynamicTensor(outputInfo))
2580 {
2581 isSupported = AreDynamicTensorsSupported();
2582 }
2583 else
2584 {
2585 validateFunc(outputInfo, isSupported);
2586 }
2587
2588 if (!isSupported)
2589 {
2590 return false;
2591 }
2592
2593 IConnectableLayer* layer = data.m_Network->AddInstanceNormalizationLayer(desc);
Cathal Corbett53837672022-09-01 11:34:37 +01002594 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002595 input.Connect(layer->GetInputSlot(0));
2596
2597 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2598}
2599
2600bool Converter::ConvertL2Normalization(const Operation& operation, const Model& model, ConversionData& data)
2601{
2602 VLOG(DRIVER) << "Converter::ConvertL2Normalization()";
2603
2604 if (operation.inputs.size() != 1)
2605 {
2606 return Fail("%s: Optional inputs are not supported", __func__);
2607 }
2608
2609 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2610 if (!input.IsValid())
2611 {
2612 return Fail("%s: Operation has invalid inputs", __func__);
2613 }
2614
2615 const Operand* output = GetOutputOperand(operation, 0, model);
2616 if (!output)
2617 {
2618 return Fail("%s: Could not read output 0", __func__);
2619 }
2620
2621 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2622 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2623
2624 if (outputInfo.GetNumDimensions() != 4u)
2625 {
2626 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2627 }
2628
2629 armnn::L2NormalizationDescriptor desc;
2630 desc.m_DataLayout = armnn::DataLayout::NHWC;
2631
2632 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01002633 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01002634 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2635 {
2636 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2637 IsL2NormalizationSupported,
2638 data.m_Backends,
2639 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01002640 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01002641 inputInfo,
2642 outputInfo,
2643 desc);
2644 };
2645
2646 if(!IsDynamicTensor(outputInfo))
2647 {
2648 validateFunc(outputInfo, isSupported);
2649 }
2650 else
2651 {
2652 isSupported = AreDynamicTensorsSupported();
2653 }
2654
2655 if (!isSupported)
2656 {
2657 return false;
2658 }
2659
2660 armnn::IConnectableLayer* layer = data.m_Network->AddL2NormalizationLayer(desc);
Cathal Corbett53837672022-09-01 11:34:37 +01002661 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002662 assert(layer != nullptr);
2663 input.Connect(layer->GetInputSlot(0));
2664
2665 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2666}
2667
2668bool Converter::ConvertL2Pool2d(const Operation& operation, const Model& model, ConversionData& data)
2669{
2670 VLOG(DRIVER) << "Converter::ConvertL2Pool2d()";
2671 return ConvertPooling2d(operation, __func__, PoolingAlgorithm::L2, model, data);
2672}
2673
2674bool Converter::ConvertLocalResponseNormalization(const Operation& operation,
2675 const Model& model,
2676 ConversionData& data)
2677{
2678 VLOG(DRIVER) << "Converter::ConvertLocalResponseNormalization()";
2679
2680 if (operation.inputs.size() != 5)
2681 {
2682 return Fail("%s: Optional inputs are not supported", __func__);
2683 }
2684
2685 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2686 if (!input.IsValid())
2687 {
2688 return Fail("%s: Operation has invalid inputs", __func__);
2689 }
2690
2691 const Operand* output = GetOutputOperand(operation, 0, model);
2692 if (!output)
2693 {
2694 return Fail("%s: Could not read output 0", __func__);
2695 }
2696
2697 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2698 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2699
2700 if (outputInfo.GetNumDimensions() != 4u)
2701 {
2702 return Fail("%s: Tensor Rank other than 4 is not supported", __func__);
2703 }
2704
2705 armnn::NormalizationDescriptor descriptor;
2706 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
2707 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
2708 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
2709
2710 if (!input.IsValid() ||
2711 !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize, model, data) ||
2712 !GetInputFloat32(operation, 2, descriptor.m_K, model, data) ||
2713 !GetInputFloat32(operation, 3, descriptor.m_Alpha, model, data) ||
2714 !GetInputFloat32(operation, 4, descriptor.m_Beta, model, data))
2715 {
2716 return Fail("%s: Operation has invalid inputs", __func__);
2717 }
2718
2719 // ArmNN expects normSize to be the full size of the normalization
2720 // window rather than the radius as in AndroidNN.
2721 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
2722
2723 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01002724 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01002725 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2726 {
2727 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2728 IsNormalizationSupported,
2729 data.m_Backends,
2730 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01002731 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01002732 inputInfo,
2733 outputInfo,
2734 descriptor);
2735 };
2736
2737 if(!IsDynamicTensor(outputInfo))
2738 {
2739 validateFunc(outputInfo, isSupported);
2740 }
2741 else
2742 {
2743 isSupported = AreDynamicTensorsSupported();
2744 }
2745
2746 if (!isSupported)
2747 {
2748 return false;
2749 }
2750
2751
2752 armnn::IConnectableLayer* layer = data.m_Network->AddNormalizationLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01002753 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002754 assert(layer != nullptr);
2755 input.Connect(layer->GetInputSlot(0));
2756
2757 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2758}
2759
2760bool Converter::ConvertLogicalBinary(const Operation& operation,
2761 const Model& model,
2762 ConversionData& data,
2763 armnn::LogicalBinaryOperation logicalOperation)
2764{
2765 VLOG(DRIVER) << "Converter::ConvertLogicalBinary()";
2766 VLOG(DRIVER) << "ConvertLogicalBinary()";
2767 VLOG(DRIVER) << "logicalOperation = " << GetLogicalBinaryOperationAsCString(logicalOperation);
2768
2769 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
2770 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
2771
2772 if (!(input0.IsValid() && input1.IsValid()))
2773 {
2774 return Fail("%s: Operation has invalid inputs", __func__);
2775 }
2776
2777 const Operand* output = GetOutputOperand(operation, 0, model);
2778 if (!output)
2779 {
2780 return Fail("%s: Could not read output 0", __func__);
2781 }
2782
2783 const TensorInfo& inputInfo0 = input0.GetTensorInfo();
2784 const TensorInfo& inputInfo1 = input1.GetTensorInfo();
2785 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2786
2787 LogicalBinaryDescriptor descriptor(logicalOperation);
2788
2789 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01002790 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01002791 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2792 {
2793 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2794 IsLogicalBinarySupported,
2795 data.m_Backends,
2796 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01002797 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01002798 inputInfo0,
2799 inputInfo1,
2800 outputInfo,
2801 descriptor);
2802 };
2803
2804 if(!IsDynamicTensor(outputInfo))
2805 {
2806 validateFunc(outputInfo, isSupported);
2807 }
2808 else
2809 {
2810 isSupported = AreDynamicTensorsSupported();
2811 }
2812
2813 if (!isSupported)
2814 {
2815 return false;
2816 }
2817
2818 IConnectableLayer* layer = data.m_Network->AddLogicalBinaryLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01002819 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002820 assert(layer != nullptr);
2821
2822 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
2823 if (!isReshapeSupported)
2824 {
2825 return false;
2826 }
2827
2828 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2829}
2830
2831bool Converter::ConvertLogistic(const Operation& operation, const Model& model, ConversionData& data)
2832{
2833 VLOG(DRIVER) << "Converter::ConvertLogistic()";
2834 armnn::ActivationDescriptor desc;
2835 desc.m_Function = armnn::ActivationFunction::Sigmoid;
2836
2837 return ConvertToActivation(operation, __func__, desc, model, data);
2838}
2839
2840bool Converter::ConvertLogSoftmax(const Operation& operation, const Model& model, ConversionData& data)
2841{
2842 VLOG(DRIVER) << "Converter::ConvertLogSoftmax()";
2843
2844 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2845 if (!input.IsValid())
2846 {
2847 return Fail("%s: Failed to read input 0", __func__);
2848 }
2849
2850 const Operand* output = GetOutputOperand(operation, 0, model);
2851 if (!output)
2852 {
2853 return Fail("%s: Failed to read output", __func__);
2854 }
2855
2856 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2857
2858 // Determine data type of input tensor
2859 OperandType inputType;
2860 if (!GetOperandType(operation, 0, model, inputType))
2861 {
2862 return Fail("%s: Operation has invalid inputs", __func__);
2863 }
2864
2865 LogSoftmaxDescriptor descriptor;
2866
2867 // Read beta
2868 if (inputType == OperandType::TENSOR_FLOAT16)
2869 {
2870 Half fp16Beta;
2871 if (!GetInputScalar(operation, 1, OperandType::FLOAT16, fp16Beta, model, data))
2872 {
2873 return Fail("%s: Failed to read input 1 (FLOAT16)", __func__);
2874 }
2875
2876 descriptor.m_Beta = static_cast<float>(fp16Beta);
2877 }
2878 else if (inputType == OperandType::TENSOR_FLOAT32)
2879 {
2880 if (!GetInputScalar(operation, 1, OperandType::FLOAT32, descriptor.m_Beta, model, data))
2881 {
2882 return Fail("%s: Failed to read input 1 (FLOAT32)", __func__);
2883 }
2884 }
2885 else
2886 {
2887 return Fail("%s: Unsupported input tensor type: %d", __func__, inputType);
2888 }
2889
2890 // Read axis
2891 if (!GetInputInt32(operation, 2, descriptor.m_Axis, model, data))
2892 {
2893 return Fail("%s: Failed to read input 2", __func__);
2894 }
2895
2896 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01002897 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01002898 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
2899 {
2900 FORWARD_LAYER_SUPPORT_FUNC(__func__,
2901 IsLogSoftmaxSupported,
2902 data.m_Backends,
2903 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01002904 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01002905 input.GetTensorInfo(),
2906 outputInfo,
2907 descriptor);
2908 };
2909
2910 if(IsDynamicTensor(outputInfo))
2911 {
2912 isSupported = AreDynamicTensorsSupported();
2913 }
2914 else
2915 {
2916 validateFunc(outputInfo, isSupported);
2917 }
2918
2919 if (!isSupported)
2920 {
2921 return false;
2922 }
2923
2924 IConnectableLayer* layer = data.m_Network->AddLogSoftmaxLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01002925 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01002926 if (!layer)
2927 {
2928 return Fail("%s: AddLogSoftmaxLayer() returned nullptr", __func__);
2929 }
2930
2931 input.Connect(layer->GetInputSlot(0));
2932
2933 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
2934}
2935
2936bool Converter::ConvertLstm(const Operation& operation, const Model& model, ConversionData& data)
2937{
2938 VLOG(DRIVER) << "Converter::ConvertLstm()";
2939
2940 // Inputs:
2941 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
2942 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
2943 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
2944 if (!input.IsValid())
2945 {
2946 return Fail("%s: Could not read input 0: input", __func__);
2947 }
2948 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
2949 LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18, model, data);
2950 if (!outputStateIn.IsValid())
2951 {
2952 return Fail("%s: Could not read input 18: outputStateIn", __func__);
2953 }
2954 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
2955 LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19, model, data);
2956 if (!cellStateIn.IsValid())
2957 {
2958 return Fail("%s: Could not read input 19: cellStateIn", __func__);
2959 }
2960
2961 // Get the mandatory input tensors:
2962 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2963 // [num_units, input_size].
2964 const ConstTensorPin inputToForgetWeightsPin =
2965 (DequantizeAndMakeConstTensorPin(operation, model, data, 2));
2966 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2967 // [num_units, input_size].
2968 const ConstTensorPin inputToCellWeightsPin =
2969 (DequantizeAndMakeConstTensorPin(operation, model, data, 3));
2970 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2971 // [num_units, input_size].
2972 const ConstTensorPin inputToOutputWeightsPin =
2973 (DequantizeAndMakeConstTensorPin(operation, model, data, 4));
2974 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2975 // [num_units, output_size].
2976 const ConstTensorPin recurrentToForgetWeightsPin =
2977 (DequantizeAndMakeConstTensorPin(operation, model, data, 6));
2978 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2979 // [num_units, output_size].
2980 const ConstTensorPin recurrentToCellWeightsPin =
2981 (DequantizeAndMakeConstTensorPin(operation, model, data, 7));
2982 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
2983 // [num_units, output_size].
2984 const ConstTensorPin recurrentToOutputWeightsPin =
2985 (DequantizeAndMakeConstTensorPin(operation, model, data, 8));
2986 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2987 const ConstTensorPin forgetGateBiasPin =
2988 ConvertOperationInputToConstTensorPin(operation, 13, model, data);
2989 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2990 const ConstTensorPin cellBiasPin =
2991 ConvertOperationInputToConstTensorPin(operation, 14, model, data);
2992 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
2993 const ConstTensorPin outputGateBiasPin =
2994 ConvertOperationInputToConstTensorPin(operation, 15, model, data);
2995
2996 if (!inputToForgetWeightsPin.IsValid() ||
2997 !inputToCellWeightsPin.IsValid() ||
2998 !inputToOutputWeightsPin.IsValid() ||
2999 !recurrentToForgetWeightsPin.IsValid() ||
3000 !recurrentToCellWeightsPin.IsValid() ||
3001 !recurrentToOutputWeightsPin.IsValid() ||
3002 !forgetGateBiasPin.IsValid() ||
3003 !cellBiasPin.IsValid() ||
3004 !outputGateBiasPin.IsValid())
3005 {
3006 return Fail("%s: Operation has invalid tensor inputs", __func__);
3007 }
3008
3009 // Get the optional input tensors:
3010 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
3011 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
3012 const ConstTensorPin inputToInputWeightsPin =
3013 (DequantizeAndMakeConstTensorPin(operation, model, data, 1, true));
3014 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
3015 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
3016 // “num_units”), or the second dimension of the “projection_weights”, if defined.
3017 const ConstTensorPin recurrentToInputWeightsPin =
3018 (DequantizeAndMakeConstTensorPin(operation, model, data, 5, true));
3019 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3020 const ConstTensorPin cellToInputWeightsPin =
3021 (DequantizeAndMakeConstTensorPin(operation, model, data, 9, true));
3022 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3023 const ConstTensorPin cellToForgetWeightsPin =
3024 (DequantizeAndMakeConstTensorPin(operation, model, data, 10, true));
3025 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3026 const ConstTensorPin cellToOutputWeightsPin =
3027 (DequantizeAndMakeConstTensorPin(operation, model, data, 11, true));
3028 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
3029 const ConstTensorPin inputGateBiasPin =
3030 ConvertOperationInputToConstTensorPin(operation,
3031 12,
3032 model,
3033 data,
3034 g_DontPermute,
3035 nullptr,
3036 true);
3037
3038 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
3039 // [output_size, num_units].
3040 const ConstTensorPin projectionWeightsPin =
3041 (DequantizeAndMakeConstTensorPin(operation, model, data, 16, true));
3042 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
3043 const ConstTensorPin projectionBiasPin =
3044 ConvertOperationInputToConstTensorPin(operation,
3045 17,
3046 model,
3047 data,
3048 g_DontPermute,
3049 nullptr,
3050 true);
3051
3052 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
3053 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
3054 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
3055 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
3056 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
3057 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
3058 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
3059 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
3060 {
3061 return Fail("%s: Operation has invalid tensor inputs", __func__);
3062 }
3063
3064 // Get the mandatory input scalars (actually 1-D tensors of size 1):
3065 // 20: The activation function: A value indicating the activation function:
3066 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
3067 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
3068 // If set to 0.0 then clipping is disabled.
3069 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
3070 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
3071 ActivationFn activation = ActivationFn::kActivationNone;
3072 float cellClip;
3073 float projClip;
3074 if (!GetInputActivationFunctionFromTensor(operation, 20, activation, model, data) ||
3075 !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip, model, data) ||
3076 !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip, model, data))
3077 {
3078 return Fail("%s: Operation has invalid scalar inputs", __func__);
3079 }
3080
3081 // Get the normalization tensors
3082 // 23: The input layer normalization weights. A 1-D tensor of shape [num_units].
3083 // Used to rescale normalized inputs to activation at input gate.
3084 const ConstTensorPin inputLayerNormWeightsPin
3085 (DequantizeAndMakeConstTensorPin(operation, model, data, 23, true));
3086
3087 // 24: The forget layer normalization weights. A 1-D tensor of shape [num_units].
3088 // Used to rescale normalized inputs to activation at forget gate.
3089 const ConstTensorPin forgetLayerNormWeightsPin =
3090 ConvertOperationInputToConstTensorPin(operation,
3091 24,
3092 model,
3093 data,
3094 g_DontPermute,
3095 nullptr,
3096 true);
3097
3098 // 25: The cell layer normalization weights. A 1-D tensor of shape [num_units].
3099 // Used to rescale normalized inputs to activation at cell gate.
3100 const ConstTensorPin cellLayerNormWeightsPin =
3101 ConvertOperationInputToConstTensorPin(operation,
3102 25,
3103 model,
3104 data,
3105 g_DontPermute,
3106 nullptr,
3107 true);
3108
3109 // 26: The output layer normalization weights. A 1-D tensor of shape [num_units].
3110 // Used to rescale normalized inputs to activation at output gate.
3111 const ConstTensorPin outputLayerNormWeightsPin =
3112 ConvertOperationInputToConstTensorPin(operation,
3113 26,
3114 model,
3115 data,
3116 g_DontPermute,
3117 nullptr,
3118 true);
3119
3120 // Outputs:
3121 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4]
3122 // with CIFG, or [batch_size, num_units * 3] without CIFG.
3123 const Operand* scratchBuffer = GetOutputOperand(operation, 0, model);
3124 if (!scratchBuffer)
3125 {
3126 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
3127 }
3128 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
3129 const Operand* outputStateOut = GetOutputOperand(operation, 1, model);
3130 if (!outputStateOut)
3131 {
3132 return Fail("%s: Could not read output 1: outputStateOut", __func__);
3133 }
3134 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
3135 const Operand* cellStateOut = GetOutputOperand(operation, 2, model);
3136 if (!cellStateOut)
3137 {
3138 return Fail("%s: Could not read output 2: cellStateOut", __func__);
3139 }
3140 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
3141 // effectively the same as the current “output state (out)” value.
3142 const Operand* output = GetOutputOperand(operation, 3, model);
3143 if (!output)
3144 {
3145 return Fail("%s: Could not read output 3: output", __func__);
3146 }
3147
3148 // set the params structure for the AddLstmLayer call
3149 LstmInputParams params;
3150 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
3151 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
3152 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
3153 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
3154 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
3155 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
3156 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
3157 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
3158 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
3159 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
3160 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
3161 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
3162 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
3163 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
3164 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
3165 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
3166 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
3167 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
3168 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
3169 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
3170 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
3171
3172 // set the layer descriptor
3173 LstmDescriptor desc;
3174 desc.m_ActivationFunc = activation;
3175 desc.m_ClippingThresCell = cellClip;
3176 desc.m_ClippingThresProj = projClip;
3177 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
3178 params.m_RecurrentToInputWeights == nullptr ||
3179 params.m_InputGateBias == nullptr);
3180 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
3181 params.m_CellToOutputWeights != nullptr);
3182 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
3183 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
3184 params.m_ForgetLayerNormWeights != nullptr ||
3185 params.m_CellLayerNormWeights != nullptr ||
3186 params.m_OutputLayerNormWeights != nullptr);
3187
3188 // validate the optional input groups
3189 if (desc.m_CifgEnabled &&
3190 (params.m_InputToInputWeights != nullptr ||
3191 params.m_RecurrentToInputWeights != nullptr ||
3192 params.m_InputGateBias != nullptr))
3193 {
3194 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
3195 " and input gate bias must be provided", __func__);
3196 }
3197
3198 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
3199 {
3200 return Fail("%s: projection bias should not be provided without projection weights", __func__);
3201 }
3202
3203 if (desc.m_PeepholeEnabled &&
3204 (params.m_CellToForgetWeights == nullptr ||
3205 params.m_CellToOutputWeights == nullptr ||
3206 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
3207 {
3208 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
3209 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
3210 }
3211
3212 if (desc.m_LayerNormEnabled &&
3213 (params.m_ForgetLayerNormWeights == nullptr ||
3214 params.m_CellLayerNormWeights == nullptr ||
3215 params.m_OutputLayerNormWeights == nullptr ||
3216 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
3217 {
3218 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
3219 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
3220 }
3221
3222 // Check if the layer is supported
3223 // Inputs
3224 const TensorInfo& inputInfo = input.GetTensorInfo();
3225 const TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
3226 const TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
3227
3228 // Outputs
3229 const TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
3230 const TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
3231 const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
3232 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3233
3234 // Basic parameters
3235 LstmInputParamsInfo paramsInfo;
3236 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
3237 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
3238 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
3239 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
3240 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
3241 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
3242 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
3243 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
3244 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
3245
3246 // Optional parameters
3247 if (!desc.m_CifgEnabled)
3248 {
3249 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
3250 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
3251 if (params.m_CellToInputWeights != nullptr)
3252 {
3253 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
3254 }
3255 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
3256 }
3257
3258 if (desc.m_ProjectionEnabled)
3259 {
3260 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
3261 if (params.m_ProjectionBias != nullptr)
3262 {
3263 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
3264 }
3265 }
3266
3267 if (desc.m_PeepholeEnabled)
3268 {
3269 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
3270 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
3271 }
3272
3273 if (desc.m_LayerNormEnabled)
3274 {
3275 if(!desc.m_CifgEnabled)
3276 {
3277 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
3278 }
3279 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
3280 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
3281 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
3282 }
3283
3284 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01003285 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01003286 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3287 {
3288 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3289 IsLstmSupported,
3290 data.m_Backends,
3291 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01003292 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01003293 inputInfo,
3294 outputStateInInfo,
3295 cellStateInInfo,
3296 scratchBufferInfo,
3297 outputStateOutInfo,
3298 cellStateOutInfo,
3299 outputInfo,
3300 desc,
3301 paramsInfo);
3302 };
3303
3304 bool isDynamic = false;
3305 if (!IsDynamicTensor(outputStateOutInfo) &&
3306 !IsDynamicTensor(scratchBufferInfo) &&
3307 !IsDynamicTensor(cellStateOutInfo) &&
3308 !IsDynamicTensor(outputInfo))
3309 {
3310 validateFunc(outputInfo, isSupported);
3311 }
3312 else
3313 {
3314 isDynamic = true;
3315 isSupported = AreDynamicTensorsSupported();
3316 }
3317
3318 if (!isSupported)
3319 {
3320 return false;
3321 }
3322
3323 // Add the layer
3324 IConnectableLayer* layer = data.m_Network->AddLstmLayer(desc, params, "Lstm");
Cathal Corbett53837672022-09-01 11:34:37 +01003325 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01003326
3327 input.Connect(layer->GetInputSlot(0));
3328 outputStateIn.Connect(layer->GetInputSlot(1));
3329 cellStateIn.Connect(layer->GetInputSlot(2));
3330
3331 if (!isDynamic)
3332 {
3333 return (
3334 SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3335 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3336 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3337 SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3, model, data));
3338 }
3339 else
3340 {
3341 return (
3342 SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
3343 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
3344 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data) &&
3345 SetupAndTrackLayerOutputSlot(
3346 operation, 3, *layer, 3, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
3347 }
3348
3349}
3350
3351bool Converter::ConvertMaxPool2d(const Operation& operation, const Model& model, ConversionData& data)
3352{
3353 VLOG(DRIVER) << "Converter::ConvertMaxPool2d()";
3354 return ConvertPooling2d(operation, __func__, PoolingAlgorithm::Max, model, data);
3355}
3356
3357bool Converter::ConvertMaximum(const Operation& operation, const Model& model, ConversionData& data)
3358{
3359 VLOG(DRIVER) << "Converter::ConvertMaximum()";
3360
3361 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
3362 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
3363
3364 if (!input0.IsValid() || !input1.IsValid())
3365 {
3366 return Fail("%s: Operation has invalid inputs", __func__);
3367 }
3368
3369 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
3370 if (!outputOperand)
3371 {
3372 return Fail("%s: Could not read output", __func__);
3373 }
3374
3375 const TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
3376
3377 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01003378 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01003379 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
3380 {
3381 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3382 IsMaximumSupported,
3383 data.m_Backends,
3384 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01003385 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01003386 input0.GetTensorInfo(),
3387 input1.GetTensorInfo(),
3388 outInfo);
3389 };
3390
3391 if(IsDynamicTensor(outInfo))
3392 {
3393 isSupported = AreDynamicTensorsSupported();
3394 }
3395 else
3396 {
3397 validateFunc(outInfo, isSupported);
3398 }
3399
3400 if (!isSupported)
3401 {
3402 return false;
3403 }
3404
3405 IConnectableLayer* layer = data.m_Network->AddMaximumLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01003406 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01003407 assert(layer != nullptr);
3408 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
3409 if (!isReshapeSupported)
3410 {
3411 return false;
3412 }
3413
3414 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3415}
3416
3417bool Converter::ConvertMean(const Operation& operation, const Model& model, ConversionData& data)
3418{
3419 VLOG(DRIVER) << "Converter::ConvertMean()";
3420
3421 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3422 if (!input.IsValid())
3423 {
3424 return Fail("%s: Operation has invalid inputs", __func__);
3425 }
3426
3427 const Operand* output = GetOutputOperand(operation, 0, model);
3428 if (!output)
3429 {
3430 return Fail("%s: Could not read output 0", __func__);
3431 }
3432
3433 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3434
3435 const Operand* axisOperand = GetInputOperand(operation, 1, model);
3436 if (!axisOperand)
3437 {
3438 return Fail("%s: Could not read input 1", __func__);
3439 }
3440
3441 std::vector<int32_t> axis;
3442 if (!GetTensorInt32Values(*axisOperand, axis, model, data))
3443 {
3444 return Fail("%s: Input 1 has invalid values", __func__);
3445 }
3446
3447 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3448
3449 // Convert the axis to unsigned int and remove duplicates.
3450 unsigned int rank = inputInfo.GetNumDimensions();
3451 std::set<unsigned int> uniqueAxis;
3452 std::transform(axis.begin(), axis.end(),
3453 std::inserter(uniqueAxis, uniqueAxis.begin()),
3454 [rank](int i) -> unsigned int { return (i + rank) % rank; });
3455
3456 // Get the "keep dims" flag.
3457 int32_t keepDims = 0;
3458 if (!GetInputInt32(operation, 2, keepDims, model, data))
3459 {
3460 return Fail("%s: Could not read input 2", __func__);
3461 }
3462
3463 armnn::MeanDescriptor descriptor;
3464 descriptor.m_Axis.assign(uniqueAxis.begin(), uniqueAxis.end());
3465 descriptor.m_KeepDims = keepDims > 0;
3466
3467 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01003468 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01003469 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3470 {
3471 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3472 IsMeanSupported,
3473 data.m_Backends,
3474 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01003475 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01003476 inputInfo,
3477 outputInfo,
3478 descriptor);
3479 };
3480
3481 if(!IsDynamicTensor(outputInfo))
3482 {
3483 validateFunc(outputInfo, isSupported);
3484 }
3485 else
3486 {
3487 isSupported = AreDynamicTensorsSupported();
3488 }
3489
3490 if (!isSupported)
3491 {
3492 return false;
3493 }
3494
3495 armnn::IConnectableLayer* const layer = data.m_Network->AddMeanLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01003496 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01003497 assert(layer != nullptr);
3498 input.Connect(layer->GetInputSlot(0));
3499
3500 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3501}
3502
3503bool Converter::ConvertMinimum(const Operation& operation, const Model& model, ConversionData& data)
3504{
3505 VLOG(DRIVER) << "Converter::ConvertMinimum()";
3506
3507 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
3508 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
3509
3510 if (!input0.IsValid() || !input1.IsValid())
3511 {
3512 return Fail("%s: Operation has invalid inputs", __func__);
3513 }
3514
3515 const Operand* output = GetOutputOperand(operation, 0, model);
3516 if (!output)
3517 {
3518 return Fail("%s: Could not read output 0", __func__);
3519 }
3520
3521 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3522
3523 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01003524 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01003525 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3526 {
3527 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3528 IsMinimumSupported,
3529 data.m_Backends,
3530 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01003531 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01003532 input0.GetTensorInfo(),
3533 input1.GetTensorInfo(),
3534 outputInfo);
3535 };
3536
3537 if(IsDynamicTensor(outputInfo))
3538 {
3539 isSupported = AreDynamicTensorsSupported();
3540 }
3541 else
3542 {
3543 validateFunc(outputInfo, isSupported);
3544 }
3545
3546 if (!isSupported)
3547 {
3548 return false;
3549 }
3550
3551 IConnectableLayer* const layer = data.m_Network->AddMinimumLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01003552 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01003553 assert(layer != nullptr);
3554 bool isReshapeSupported = BroadcastTensor(input0, input1, layer, data);
3555 if (!isReshapeSupported)
3556 {
3557 return false;
3558 }
3559
3560 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3561}
3562
3563bool Converter::ConvertMul(const Operation& operation, const Model& model, ConversionData& data)
3564{
3565 VLOG(DRIVER) << "Converter::ConvertMul()";
3566
3567 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
3568 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
3569
3570 if (!input0.IsValid() || !input1.IsValid())
3571 {
3572 return Fail("%s: Operation has invalid inputs", __func__);
3573 }
3574
3575 // The FuseActivation parameter is always the input index 2
3576 // and it should be optional
3577 ActivationFn activationFunction;
3578 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
3579 {
3580 return Fail("%s: Operation has invalid inputs", __func__);
3581 }
3582
3583 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
3584
3585 if (outputOperand == nullptr)
3586 {
3587 return false;
3588 }
3589
3590 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3591
3592 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01003593 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01003594 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3595 {
3596 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3597 IsMultiplicationSupported,
3598 data.m_Backends,
3599 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01003600 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01003601 input0.GetTensorInfo(),
3602 input1.GetTensorInfo(),
3603 outputInfo);
3604 };
3605
3606 if(!IsDynamicTensor(outputInfo))
3607 {
3608 validateFunc(outputInfo, isSupported);
3609 }
3610 else
3611 {
3612 isSupported = AreDynamicTensorsSupported();
3613 }
3614
3615 if (!isSupported)
3616 {
3617 return false;
3618 }
3619
3620 armnn::IConnectableLayer* const startLayer = data.m_Network->AddMultiplicationLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01003621 startLayer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01003622
3623 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
3624 if (!isReshapeSupported)
3625 {
3626 return false;
3627 }
3628
3629 return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
3630 data, nullptr, validateFunc, activationFunction);
3631}
3632
3633bool Converter::ConvertPad(const Operation& operation, const Model& model, ConversionData& data)
3634{
3635 VLOG(DRIVER) << "Converter::ConvertPad()";
3636
3637 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3638 if (!input.IsValid())
3639 {
3640 return Fail("%s: Operation has invalid inputs", __func__);
3641 }
3642
3643 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
3644 unsigned int rank = inputInfo.GetNumDimensions();
3645
3646 armnn::PadDescriptor descriptor;
3647 if (!ConvertPaddings(operation, model, data, rank, descriptor))
3648 {
3649 return Fail("%s: Could not convert paddings", __func__);
3650 }
3651
3652 // For a ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED tensor,
3653 // the scale and zeroPoint must be the same as input0
3654 // Before Android Q, the pad value for ANEURALNETWORKS_TENSOR_QUANT8_ASYMM was undefined. Since Android Q the pad
3655 // value must be "logical zero" we set it to be equal to the QuantizationOffset so effectively it ends up as
3656 // (QuantizationOffset - QuantizationOffset) * scale = 0.
3657 if (inputInfo.GetDataType() == armnn::DataType::QAsymmU8 || inputInfo.GetDataType() == armnn::DataType::QAsymmS8)
3658 {
3659 descriptor.m_PadValue = inputInfo.GetQuantizationOffset();
3660 }
3661
3662 const Operand* output = GetOutputOperand(operation, 0, model);
3663 if (!output)
3664 {
3665 return Fail("%s: Could not read output", __func__);
3666 }
3667
3668 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3669
3670 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01003671 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01003672 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3673 {
3674 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3675 IsPadSupported,
3676 data.m_Backends,
3677 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01003678 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01003679 inputInfo,
3680 outputInfo,
3681 descriptor);
3682 };
3683
3684 if(!IsDynamicTensor(outputInfo))
3685 {
3686 validateFunc(outputInfo, isSupported);
3687 }
3688 else
3689 {
3690 isSupported = AreDynamicTensorsSupported();
3691 }
3692
3693 if (!isSupported)
3694 {
3695 return false;
3696 }
3697
3698 armnn::IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01003699 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01003700 assert(layer != nullptr);
3701 input.Connect(layer->GetInputSlot(0));
3702
3703 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3704}
3705
3706bool Converter::ConvertPadV2(const Operation& operation, const Model& model, ConversionData& data)
3707{
3708 VLOG(DRIVER) << "Converter::ConvertPadV2()";
3709
3710 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3711 if (!input.IsValid())
3712 {
3713 return Fail("%s: Could not read input 0", __func__);
3714 }
3715
3716 const Operand* output = GetOutputOperand(operation, 0, model);
3717 if (!output)
3718 {
3719 return Fail("%s: Could not read output", __func__);
3720 }
3721
3722 const TensorInfo& inputInfo = input.GetTensorInfo();
3723 unsigned int rank = inputInfo.GetNumDimensions();
3724
3725 PadDescriptor descriptor;
3726 if (!ConvertPaddings(operation, model, data, rank, descriptor))
3727 {
3728 return Fail("%s: Could not convert paddings", __func__);
3729 }
3730
3731 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3732
3733 // Determine type of padding value
3734 OperandType operandType0;
3735 OperandType operandType2;
3736
3737 if (!GetOperandType(operation, 0, model, operandType0) ||
3738 !GetOperandType(operation, 2, model, operandType2))
3739 {
3740 return Fail("%s: Operation has invalid inputs", __func__);
3741 }
3742
3743 // Read value to use for padding
3744 if (operandType0 == OperandType::TENSOR_FLOAT16 && operandType2 == OperandType::FLOAT16)
3745 {
3746 Half f16PadValue;
3747 if (!GetInputScalar(operation, 2, operandType2, f16PadValue, model, data))
3748 {
3749 return Fail("%s: Could not read input 2 (FLOAT16)", __func__);
3750 }
3751
3752 descriptor.m_PadValue = f16PadValue;
3753 }
3754 else if (operandType0 == OperandType::TENSOR_FLOAT32 && operandType2 == OperandType::FLOAT32)
3755 {
3756 if (!GetInputFloat32(operation, 2, descriptor.m_PadValue, model, data))
3757 {
3758 return Fail("%s: Could not read input 2 (FLOAT32)", __func__);
3759 }
3760 }
3761 else if (isQuantizedOperand(operandType0) && operandType2 == OperandType::INT32)
3762 {
3763 int32_t intPadValue = 0;
3764 if (!GetInputInt32(operation, 2, intPadValue, model, data))
3765 {
3766 return Fail("%s: Could not read input 2 (INT32)", __func__);
3767 }
3768 descriptor.m_PadValue = intPadValue;
3769 }
3770 else
3771 {
3772 return Fail("%s: Operation has invalid inputs: type mismatch", __func__);
3773 }
3774
3775 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01003776 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01003777 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3778 {
3779 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3780 IsPadSupported,
3781 data.m_Backends,
3782 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01003783 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01003784 inputInfo,
3785 outputInfo,
3786 descriptor);
3787 };
3788
3789 if(IsDynamicTensor(outputInfo))
3790 {
3791 isSupported = AreDynamicTensorsSupported();
3792 }
3793 else
3794 {
3795 validateFunc(outputInfo, isSupported);
3796 }
3797
3798 if (!isSupported)
3799 {
3800 return false;
3801 }
3802
3803 IConnectableLayer* const layer = data.m_Network->AddPadLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01003804 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01003805 assert(layer != nullptr);
3806 input.Connect(layer->GetInputSlot(0));
3807
3808 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3809}
3810
3811bool Converter::ConvertPrelu(const Operation& operation, const Model& model, ConversionData& data)
3812{
3813 VLOG(DRIVER) << "Converter::ConvertPrelu()";
3814
3815 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3816 LayerInputHandle alpha = ConvertToLayerInputHandle(operation, 1, model, data);
3817
3818 if (!input.IsValid() || !alpha.IsValid())
3819 {
3820 return Fail("%s: Operation has invalid inputs", __func__);
3821 }
3822
3823 const Operand* output = GetOutputOperand(operation, 0, model);
3824
3825 if (!output)
3826 {
3827 return Fail("%s: Could not read output", __func__);
3828 }
3829
3830 const TensorInfo& inputInfo = input.GetTensorInfo();
3831 const TensorInfo& alphaInfo = alpha.GetTensorInfo();
3832 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
3833
3834 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01003835 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01003836 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3837 {
3838 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3839 IsPreluSupported,
3840 data.m_Backends,
3841 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01003842 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01003843 inputInfo,
3844 alphaInfo,
3845 outputInfo);
3846 };
3847
3848 if(IsDynamicTensor(outputInfo))
3849 {
3850 isSupported = AreDynamicTensorsSupported();
3851 }
3852 else
3853 {
3854 validateFunc(outputInfo, isSupported);
3855 }
3856
3857 if (!isSupported)
3858 {
3859 return false;
3860 }
3861
3862 IConnectableLayer* const layer = data.m_Network->AddPreluLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01003863 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01003864
3865 if (!layer)
3866 {
3867 return Fail("%s: AddPreluLayer failed", __func__);
3868 }
3869
3870 bool isReshapeSupported = BroadcastTensor(input, alpha, layer, data);
3871 if (!isReshapeSupported)
3872 {
3873 return false;
3874 }
3875
3876 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3877}
3878
3879bool Converter::ConvertQuantize(const Operation& operation, const Model& model, ConversionData& data)
3880{
3881 VLOG(DRIVER) << "Converter::ConvertQuantize()";
3882
3883 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3884 if (!input.IsValid())
3885 {
3886 return Fail("%s: Operation has invalid input", __func__);
3887 }
3888
3889 const Operand* const outputOperand = GetOutputOperand(operation, 0, model);
3890 if (!outputOperand)
3891 {
3892 return Fail("%s: Operation has invalid outputs", __func__);
3893 }
3894
3895 const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
3896
3897 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01003898 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01003899 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
3900 {
3901 FORWARD_LAYER_SUPPORT_FUNC(__func__,
3902 IsQuantizeSupported,
3903 data.m_Backends,
3904 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01003905 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01003906 input.GetTensorInfo(),
3907 outputInfo);
3908 };
3909
3910 if(IsDynamicTensor(outputInfo))
3911 {
3912 isSupported = AreDynamicTensorsSupported();
3913 }
3914 else
3915 {
3916 validateFunc(outputInfo, isSupported);
3917 }
3918
3919 if (!isSupported)
3920 {
3921 return false;
3922 }
3923
3924 IConnectableLayer* const layer = data.m_Network->AddQuantizeLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01003925 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01003926 assert(layer != nullptr);
3927 input.Connect(layer->GetInputSlot(0));
3928
3929 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
3930}
3931
3932bool Converter::ConvertQuantizedLstm(const Operation& operation, const Model& model, ConversionData& data)
3933{
3934 VLOG(DRIVER) << "Converter::ConvertQuantizedLstm()";
3935
3936 VLOG(DRIVER) << "ConvertQuantizedLstm()";
3937
3938 //Inputs:
3939 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
3940 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
3941 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
3942 if (!input.IsValid())
3943 {
3944 return Fail("%s: Could not read input 0: input", __func__);
3945 }
3946
3947 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM, of shape [batch_size, output_size].
3948 LayerInputHandle outputStatePrevTimeStep = ConvertToLayerInputHandle(operation, 18, model, data);
3949 if (!outputStatePrevTimeStep.IsValid())
3950 {
3951 return Fail("%s: Could not read input 18: outputStatePrevTimeStep", __func__);
3952 }
3953
3954 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
3955 LayerInputHandle cellStatePrevTimeStep = ConvertToLayerInputHandle(operation, 19, model, data);
3956 if (!cellStatePrevTimeStep.IsValid())
3957 {
3958 return Fail("%s: Could not read input 19: cellStatePrevTimeStep", __func__);
3959 }
3960
3961 // Get the mandatory input tensors:
3962
3963 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3964 // [num_units, input_size].
3965 const ConstTensorPin inputToForgetWeightsPin =
3966 ConvertOperationInputToConstTensorPin(operation, 2, model, data);
3967
3968 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3969 // [num_units, input_size].
3970 const ConstTensorPin inputToCellWeightsPin =
3971 ConvertOperationInputToConstTensorPin(operation, 3, model, data);
3972
3973 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3974 // [num_units, input_size].
3975 const ConstTensorPin inputToOutputWeightsPin =
3976 ConvertOperationInputToConstTensorPin(operation, 4, model, data);
3977
3978 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3979 // [num_units, output_size].
3980 const ConstTensorPin recurrentToForgetWeightsPin =
3981 ConvertOperationInputToConstTensorPin(operation, 6, model, data);
3982
3983 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3984 // [num_units, output_size].
3985 const ConstTensorPin recurrentToCellWeightsPin =
3986 ConvertOperationInputToConstTensorPin(operation, 7, model, data);
3987
3988 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
3989 // [num_units, output_size].
3990 const ConstTensorPin recurrentToOutputWeightsPin =
3991 ConvertOperationInputToConstTensorPin(operation, 8, model, data);
3992
3993 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3994 const ConstTensorPin forgetGateBiasPin =
3995 ConvertOperationInputToConstTensorPin(operation, 13, model, data);
3996
3997 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
3998 const ConstTensorPin cellBiasPin =
3999 ConvertOperationInputToConstTensorPin(operation, 14, model, data);
4000
4001 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
4002 const ConstTensorPin outputGateBiasPin =
4003 ConvertOperationInputToConstTensorPin(operation, 15, model, data);
4004
4005 if (!inputToForgetWeightsPin.IsValid() ||
4006 !inputToCellWeightsPin.IsValid() ||
4007 !inputToOutputWeightsPin.IsValid() ||
4008 !recurrentToForgetWeightsPin.IsValid() ||
4009 !recurrentToCellWeightsPin.IsValid() ||
4010 !recurrentToOutputWeightsPin.IsValid() ||
4011 !forgetGateBiasPin.IsValid() ||
4012 !cellBiasPin.IsValid() ||
4013 !outputGateBiasPin.IsValid())
4014 {
4015 return Fail("%s: Operation has invalid tensor inputs", __func__);
4016 }
4017
4018 // Get the optional input tensors:
4019
4020 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4021 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
4022 const ConstTensorPin inputToInputWeightsPin =
4023 ConvertOperationInputToConstTensorPin(operation,
4024 1,
4025 model,
4026 data,
4027 g_DontPermute,
4028 nullptr,
4029 true);
4030
4031 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4032 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
4033 // “num_units”), or the second dimension of the “projection_weights”, if defined.
4034 const ConstTensorPin recurrentToInputWeightsPin =
4035 ConvertOperationInputToConstTensorPin(operation,
4036 5,
4037 model,
4038 data,
4039 g_DontPermute,
4040 nullptr,
4041 true);
4042
4043 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
4044 // [num_units].
4045 const ConstTensorPin cellToInputWeightsPin =
4046 ConvertOperationInputToConstTensorPin(operation,
4047 9,
4048 model,
4049 data,
4050 g_DontPermute,
4051 nullptr,
4052 true);
4053
4054 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
4055 // [num_units].
4056 const ConstTensorPin cellToForgetWeightsPin =
4057 ConvertOperationInputToConstTensorPin(operation,
4058 10,
4059 model,
4060 data,
4061 g_DontPermute,
4062 nullptr,
4063 true);
4064
4065 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape
4066 // [num_units].
4067 const ConstTensorPin cellToOutputWeightsPin =
4068 ConvertOperationInputToConstTensorPin(operation,
4069 11,
4070 model,
4071 data,
4072 g_DontPermute,
4073 nullptr,
4074 true);
4075
4076 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [num_units].
4077 const ConstTensorPin inputGateBiasPin =
4078 ConvertOperationInputToConstTensorPin(operation,
4079 12,
4080 model,
4081 data,
4082 g_DontPermute,
4083 nullptr,
4084 true);
4085
4086 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_SYMM, of shape
4087 // [output_size, num_units].
4088 const ConstTensorPin projectionWeightsPin =
4089 ConvertOperationInputToConstTensorPin(operation,
4090 16,
4091 model,
4092 data,
4093 g_DontPermute,
4094 nullptr,
4095 true);
4096
4097 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_INT32, of shape [output_size].
4098 const ConstTensorPin projectionBiasPin =
4099 ConvertOperationInputToConstTensorPin(operation,
4100 17,
4101 model,
4102 data,
4103 g_DontPermute,
4104 nullptr,
4105 true);
4106
4107 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional())
4108 || (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional())
4109 || (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional())
4110 || (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional())
4111 || (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional())
4112 || (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional())
4113 || (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional())
4114 || (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
4115 {
4116 return Fail("%s: Operation has invalid tensor inputs", __func__);
4117 }
4118
4119
4120 // Get the optional normalization tensors
4121
4122 // 20: The input layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
4123 // Used to rescale normalized inputs to activation at input gate.
4124 const ConstTensorPin inputLayerNormWeightsPin =
4125 ConvertOperationInputToConstTensorPin(operation,
4126 20,
4127 model,
4128 data,
4129 g_DontPermute,
4130 nullptr,
4131 true);
4132
4133 // 21: The forget layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM
4134 // Used to rescale normalized inputs to activation at forget gate.
4135 const ConstTensorPin forgetLayerNormWeightsPin =
4136 ConvertOperationInputToConstTensorPin(operation,
4137 21,
4138 model,
4139 data,
4140 g_DontPermute,
4141 nullptr,
4142 true);
4143
4144 // 22: The cell layer normalization weights. A 1-D tensor of shape [num_units] ANEURALNETWORKS_TENSOR_QUANT16_SYMM.
4145 // Used to rescale normalized inputs to activation at cell gate.
4146 const ConstTensorPin cellLayerNormWeightsPin =
4147 ConvertOperationInputToConstTensorPin(operation,
4148 22,
4149 model,
4150 data,
4151 g_DontPermute,
4152 nullptr,
4153 true);
4154
4155 // 23: The output layer normalization weights. A 1-D tensor of shape [num_units].
4156 // Used to rescale normalized inputs to activation at output gate.
4157 const ConstTensorPin outputLayerNormWeightsPin =
4158 ConvertOperationInputToConstTensorPin(operation,
4159 23,
4160 model,
4161 data,
4162 g_DontPermute,
4163 nullptr,
4164 true);
4165
4166 if ((!inputLayerNormWeightsPin.IsValid() && !inputLayerNormWeightsPin.IsOptional())
4167 || (!forgetLayerNormWeightsPin.IsValid() && !forgetLayerNormWeightsPin.IsOptional())
4168 || (!cellLayerNormWeightsPin.IsValid() && !cellLayerNormWeightsPin.IsOptional())
4169 || (!outputLayerNormWeightsPin.IsValid() && !outputLayerNormWeightsPin.IsOptional()))
4170 {
4171 return Fail("%s: Operation has invalid tensor inputs", __func__);
4172 }
4173
4174 // Get the optional input scalars:
4175 // 24: The cell clip: If provided the cell state is clipped by this value prior to the cell output activation.
4176 // 25: The projection clip: If provided and projection is enabled, this is used for clipping the projected values.
4177
4178 // Get the mandatory input scalars:
4179 // 26: The scale of the intermediate result of matmul, i.e. input to layer normalization, at input gate.
4180 // 27: The scale of the intermediate result of matmul, i.e. input to layer normalization, at forget gate.
4181 // 28: The scale of the intermediate result of matmul, i.e. input to layer normalization, at cell gate.
4182 // 29: The scale of the intermediate result of matmul, i.e. input to layer normalization, at output gate.
4183 // 30: The zero point of the hidden state, i.e. input to projection.
4184 // 31: The scale of the hidden state, i.e. input to projection.
4185 float cellClip, projClip, matMulInputGate, matMulForgetGate, matMulCellGate, matMulOutputGate, projInputScale;
4186 int projInputZeroPoint;
4187
4188 if (!GetInputScalar(operation, 24, OperandType::FLOAT32, cellClip, model, data, true) ||
4189 !GetInputScalar(operation, 25, OperandType::FLOAT32, projClip, model, data, true) ||
4190 !GetInputScalar(operation, 26, OperandType::FLOAT32, matMulInputGate, model, data) ||
4191 !GetInputScalar(operation, 27, OperandType::FLOAT32, matMulForgetGate, model, data) ||
4192 !GetInputScalar(operation, 28, OperandType::FLOAT32, matMulCellGate, model, data) ||
4193 !GetInputScalar(operation, 29, OperandType::FLOAT32, matMulOutputGate, model, data) ||
4194 !GetInputScalar(operation, 30, OperandType::INT32, projInputZeroPoint, model, data) ||
4195 !GetInputScalar(operation, 31, OperandType::FLOAT32, projInputScale, model, data))
4196 {
4197 return Fail("%s: Operation has invalid scalar inputs", __func__);
4198 }
4199
4200 // Outputs:
4201 // 0: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size,
4202 // output_size].
4203 const Operand* outputStateOut = GetOutputOperand(operation, 0, model);
4204 if (!outputStateOut)
4205 {
4206 return Fail("%s: Could not read output 0: outputStateOut", __func__);
4207 }
4208
4209 // 1: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT16_SYMM, of shape [batch_size, num_units].
4210 const Operand* cellStateOut = GetOutputOperand(operation, 1, model);
4211 if (!cellStateOut)
4212 {
4213 return Fail("%s: Could not read output 1: cellStateOut", __func__);
4214 }
4215
4216 // 2: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_QUANT8_ASYMM_SIGNED, of shape [batch_size, output_size].
4217 // This is effectively the same as the current “output state (out)” value.
4218 const Operand* output = GetOutputOperand(operation, 2, model);
4219 if (!output)
4220 {
4221 return Fail("%s: Could not read output 2: output", __func__);
4222 }
4223
4224 // set the params structure for the AddLstmLayer call
4225 LstmInputParams params;
4226 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
4227 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
4228 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
4229 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
4230 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
4231 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
4232 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
4233 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
4234 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
4235 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
4236 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
4237 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
4238 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
4239 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4240 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4241 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
4242 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
4243 params.m_InputLayerNormWeights = inputLayerNormWeightsPin.GetConstTensorPtr();
4244 params.m_ForgetLayerNormWeights = forgetLayerNormWeightsPin.GetConstTensorPtr();
4245 params.m_CellLayerNormWeights = cellLayerNormWeightsPin.GetConstTensorPtr();
4246 params.m_OutputLayerNormWeights = outputLayerNormWeightsPin.GetConstTensorPtr();
4247
4248 // set the layer descriptor
4249 QLstmDescriptor desc;
4250 desc.m_CellClip = cellClip;
4251 desc.m_ProjectionClip = projClip;
4252 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
4253 params.m_RecurrentToInputWeights == nullptr ||
4254 params.m_InputGateBias == nullptr);
4255 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
4256 params.m_CellToOutputWeights != nullptr);
4257 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
4258 desc.m_LayerNormEnabled = (params.m_InputLayerNormWeights != nullptr ||
4259 params.m_ForgetLayerNormWeights != nullptr ||
4260 params.m_CellLayerNormWeights != nullptr ||
4261 params.m_OutputLayerNormWeights != nullptr);
4262 desc.m_InputIntermediateScale = matMulInputGate;
4263 desc.m_ForgetIntermediateScale = matMulForgetGate;
4264 desc.m_CellIntermediateScale = matMulCellGate;
4265 desc.m_OutputIntermediateScale = matMulOutputGate;
4266 desc.m_HiddenStateScale = projInputScale;
4267 desc.m_HiddenStateZeroPoint = projInputZeroPoint;
4268
4269 // validate the optional input groups
4270 if (desc.m_CifgEnabled &&
4271 (params.m_InputToInputWeights != nullptr ||
4272 params.m_RecurrentToInputWeights != nullptr ||
4273 params.m_InputGateBias != nullptr))
4274 {
4275 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
4276 " and input gate bias must be provided", __func__);
4277 }
4278
4279 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
4280 {
4281 return Fail("%s: projection bias should not be provided without projection weights", __func__);
4282 }
4283
4284 if (desc.m_PeepholeEnabled &&
4285 (params.m_CellToForgetWeights == nullptr ||
4286 params.m_CellToOutputWeights == nullptr ||
4287 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
4288 {
4289 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
4290 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
4291 }
4292
4293 if (desc.m_LayerNormEnabled &&
4294 (params.m_ForgetLayerNormWeights == nullptr ||
4295 params.m_CellLayerNormWeights == nullptr ||
4296 params.m_OutputLayerNormWeights == nullptr ||
4297 (!desc.m_CifgEnabled && params.m_InputLayerNormWeights == nullptr)))
4298 {
4299 return Fail("%s: All, or none, of forget-norm weights, cell-norm weights and output-norm weights must be"
4300 " provided and, if CIFG is not enabled, input-norm weights must also be provided", __func__);
4301 }
4302
4303 // Basic parameters
4304 LstmInputParamsInfo paramsInfo;
4305 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4306 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4307 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4308 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
4309 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4310 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
4311 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4312 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4313 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4314
4315 // Inputs
4316 const TensorInfo& inputInfo = input.GetTensorInfo();
4317 const TensorInfo& outputStatePrevTimeStepInfo = outputStatePrevTimeStep.GetTensorInfo();
4318 const TensorInfo& cellStatePrevTimeStepInfo = cellStatePrevTimeStep.GetTensorInfo();
4319
4320 // Outputs
4321 TensorInfo outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
4322 TensorInfo outputInfo = GetTensorInfoForOperand(*output);
4323 const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4324
4325 // Optional parameters
4326 if (!desc.m_CifgEnabled)
4327 {
4328 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4329 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
4330 if (desc.m_PeepholeEnabled)
4331 {
4332 paramsInfo.m_CellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
4333 }
4334 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4335 }
4336
4337
4338 if (desc.m_ProjectionEnabled)
4339 {
4340 paramsInfo.m_ProjectionWeights = &(params.m_ProjectionWeights->GetInfo());
4341 if (params.m_ProjectionBias != nullptr)
4342 {
4343 paramsInfo.m_ProjectionBias = &(params.m_ProjectionBias->GetInfo());
4344 }
4345 }
4346 else
4347 {
4348 // If Projection is disabled, override non-const outputs to change the quant info with hidden params, then
4349 // create a new const TensorInfo based on this
4350 outputStateOutInfo.SetQuantizationScale(projInputScale);
4351 outputStateOutInfo.SetQuantizationOffset(projInputZeroPoint);
4352 outputInfo.SetQuantizationScale(projInputScale);
4353 outputInfo.SetQuantizationOffset(projInputZeroPoint);
4354 }
4355
4356 const TensorInfo constOutputStateOutInfo(outputStateOutInfo);
4357 const TensorInfo constOutputInfo(outputInfo);
4358
4359 if (desc.m_PeepholeEnabled)
4360 {
4361 paramsInfo.m_CellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
4362 paramsInfo.m_CellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
4363 }
4364
4365 if (desc.m_LayerNormEnabled)
4366 {
4367 if(!desc.m_CifgEnabled)
4368 {
4369 paramsInfo.m_InputLayerNormWeights = &(params.m_InputLayerNormWeights->GetInfo());
4370 }
4371 paramsInfo.m_ForgetLayerNormWeights = &(params.m_ForgetLayerNormWeights->GetInfo());
4372 paramsInfo.m_CellLayerNormWeights = &(params.m_CellLayerNormWeights->GetInfo());
4373 paramsInfo.m_OutputLayerNormWeights = &(params.m_OutputLayerNormWeights->GetInfo());
4374 }
4375
4376 // Check if the layer is supported
4377 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01004378 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01004379 auto validateFunc = [&](const armnn::TensorInfo& cellStateOutInfo, bool& isSupported)
4380 {
4381 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4382 IsQLstmSupported,
4383 data.m_Backends,
4384 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01004385 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01004386 inputInfo,
4387 outputStatePrevTimeStepInfo,
4388 cellStatePrevTimeStepInfo,
4389 constOutputStateOutInfo,
4390 cellStateOutInfo,
4391 constOutputInfo,
4392 desc,
4393 paramsInfo);
4394 };
4395
4396 bool isDynamic = false;
4397 if (!IsDynamicTensor(constOutputStateOutInfo) &&
4398 !IsDynamicTensor(cellStateOutInfo) &&
4399 !IsDynamicTensor(constOutputInfo))
4400 {
4401 validateFunc(outputInfo, isSupported);
4402 }
4403 else
4404 {
4405 isDynamic = true;
4406 isSupported = AreDynamicTensorsSupported();
4407 }
4408
4409 if (!isSupported)
4410 {
4411 return false;
4412 }
4413
4414 // Add the layer
4415 IConnectableLayer* layer = data.m_Network->AddQLstmLayer(desc, params, "QLstm");
Cathal Corbett53837672022-09-01 11:34:37 +01004416 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01004417
4418 input.Connect(layer->GetInputSlot(0));
4419 outputStatePrevTimeStep.Connect(layer->GetInputSlot(1));
4420 cellStatePrevTimeStep.Connect(layer->GetInputSlot(2));
4421
4422 if (!isDynamic)
4423 {
4424 return ( SetupAndTrackLayerOutputSlot(
4425 operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4426 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data) &&
4427 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4428 }
4429 else
4430 {
4431 return ( SetupAndTrackLayerOutputSlot(
4432 operation, 0, *layer, 0, model, data, &constOutputStateOutInfo) &&
4433 SetupAndTrackLayerOutputSlot(
4434 operation, 1, *layer, 1, model, data, nullptr, validateFunc,
4435 ActivationFn::kActivationNone, true) &&
4436 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2, model, data, &constOutputInfo));
4437 }
4438}
4439
4440bool Converter::ConvertQuantized16BitLstm(const Operation& operation, const Model& model, ConversionData& data)
4441{
4442 VLOG(DRIVER) << "Converter::ConvertQuantized16BitLstm()";
4443 VLOG(DRIVER) << "Policy::ConvertQuantized16BitLstm()";
4444
4445 //Inputs:
4446 // 0: The input: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBatches, inputSize]
4447 // specifying the input to the LSTM cell. Tensor is quantized with a fixed quantization range of -1, 127/128.
4448 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4449 if (!input.IsValid())
4450 {
4451 return Fail("%s: Could not read input 0: input", __func__);
4452 }
4453
4454 //13: The previous cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape
4455 // [numBatches, outputSize] specifying the cell state from the previous time step of the LSTM cell.
4456 // It is quantized using a quantization range of -2^4, 2^4 * 32767/32768.
4457 LayerInputHandle previousCellStateIn = ConvertToLayerInputHandle(operation, 13, model, data);
4458 if (!previousCellStateIn.IsValid())
4459 {
4460 return Fail("%s: Could not read input 13: previousCellStateIn", __func__);
4461 }
4462
4463 // 14: The previous output state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4464 // [numBathes, outputSize] specifying the output of the LSTM cell from previous time-step. Tensor
4465 // is quantized with a fixed quantization range of -1, 127/128.
4466 LayerInputHandle previousOutputIn = ConvertToLayerInputHandle(operation, 14, model, data);
4467 if (!previousOutputIn.IsValid())
4468 {
4469 return Fail("%s: Could not read input 14: previousOutputIn", __func__);
4470 }
4471
4472 // Get the input tensors:
4473 // 1: The input-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4474 // [outputSize, inputSize] specifying input-to-input part of weights for fully-connected layer inside the
4475 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4476 const ConstTensorPin inputToInputWeightsPin =
4477 ConvertOperationInputToConstTensorPin(operation, 1, model, data);
4478
4479 // 2: The input-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4480 // [outputSize, inputSize] specifying input-to-forget part of weights for fully-connected layer inside the
4481 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4482 const ConstTensorPin inputToForgetWeightsPin =
4483 ConvertOperationInputToConstTensorPin(operation, 2, model, data);
4484
4485 // 3: The input-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4486 // [outputSize, inputSize] specifying input-to-cell part of weights for fully-connected layer inside the
4487 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4488 const ConstTensorPin inputToCellWeightsPin =
4489 ConvertOperationInputToConstTensorPin(operation, 3, model, data);
4490
4491 // 4: The input-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4492 // [outputSize, inputSize] specifying input-to-output part of weights for fully-connected layer inside the
4493 // LSTM cell. Quantization zero point and scale must be the same across all the weights.
4494 const ConstTensorPin inputToOutputWeightsPin =
4495 ConvertOperationInputToConstTensorPin(operation, 4, model, data);
4496
4497 // 5: The recurrent-to-input weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4498 // [outputSize, outputSize] specifying recurrent-to-input part of weights for fully-connected layer inside
4499 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4500 const ConstTensorPin recurrentToInputWeightsPin =
4501 ConvertOperationInputToConstTensorPin(operation, 5, model, data);
4502
4503 // 6: The recurrent-to-forget weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4504 // [outputSize, outputSize] specifying recurrent-to-forget part of weights for fully-connected layer inside
4505 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4506 const ConstTensorPin recurrentToForgetWeightsPin =
4507 ConvertOperationInputToConstTensorPin(operation, 6, model, data);
4508
4509 // 7: The recurrent-to-cell weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4510 // [outputSize, outputSize] specifying recurrent-to-cell part of weights for fully-connected layer inside
4511 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4512 const ConstTensorPin recurrentToCellWeightsPin =
4513 ConvertOperationInputToConstTensorPin(operation, 7, model, data);
4514
4515 // 8: The recurrent-to-output weights. A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape
4516 // [outputSize, outputSize] specifying recurrent-to-output part of weights for fully-connected layer inside
4517 // the LSTM cell. Quantization zero point and scale must be the same across all the weights.
4518 const ConstTensorPin recurrentToOutputWeightsPin =
4519 ConvertOperationInputToConstTensorPin(operation, 8, model, data);
4520
4521 // 9: The input gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the
4522 // bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4523 // of input and weights scales and zeroPoint equal to 0.
4524 const ConstTensorPin inputGateBiasPin =
4525 ConvertOperationInputToConstTensorPin(operation, 9, model, data);
4526
4527 // 10: The forget gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4528 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4529 // of input and weights scales and zeroPoint equal to 0.
4530 const ConstTensorPin forgetGateBiasPin =
4531 ConvertOperationInputToConstTensorPin(operation, 10, model, data);
4532
4533 // 11:The cell bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying the bias
4534 // for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product of input
4535 // and weights scales and zeroPoint equal to 0.
4536 const ConstTensorPin cellBiasPin =
4537 ConvertOperationInputToConstTensorPin(operation, 11, model, data);
4538
4539 // 12:The output gate bias. A 1-D tensor of type ANEURALNETWORKS_TENSOR_INT32 and shape [outputSize] specifying
4540 // the bias for the fully-connected layer inside the LSTM cell. Bias is quantized with scale being a product
4541 // of input and weights scales and zeroPoint equal to 0.
4542 const ConstTensorPin outputGateBiasPin =
4543 ConvertOperationInputToConstTensorPin(operation, 12, model, data);
4544
4545 if (!inputToInputWeightsPin.IsValid() ||
4546 !inputToForgetWeightsPin.IsValid() ||
4547 !inputToCellWeightsPin.IsValid() ||
4548 !inputToOutputWeightsPin.IsValid() ||
4549 !recurrentToInputWeightsPin.IsValid() ||
4550 !recurrentToForgetWeightsPin.IsValid() ||
4551 !recurrentToCellWeightsPin.IsValid() ||
4552 !recurrentToOutputWeightsPin.IsValid() ||
4553 !inputGateBiasPin.IsValid() ||
4554 !forgetGateBiasPin.IsValid() ||
4555 !cellBiasPin.IsValid() ||
4556 !outputGateBiasPin.IsValid())
4557 {
4558 return Fail("%s: Operation has invalid tensor inputs", __func__);
4559 }
4560
4561 // Outputs:
4562 // 0: The cell state: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT16_SYMM and shape [numBatches, outputSize]
4563 // which contains a cell state from the current time step. Tensor is quantized using a quantization range
4564 // of -2^4, 2^4 * 32767/32768.
4565 const Operand* cellStateOut = GetOutputOperand(operation, 0, model);
4566 if (!cellStateOut)
4567 {
4568 return Fail("%s: Could not read output 0: cellStateOut", __func__);
4569 }
4570
4571 // 1: The output: A 2-D tensor of type ANEURALNETWORKS_TENSOR_QUANT8_ASYMM and shape [numBathes, outputSize] which
4572 // contains the output value. Tensor is quantized with a fixed quantization range of -1, 127/128.
4573 const Operand* output = GetOutputOperand(operation, 1, model);
4574 if (!output)
4575 {
4576 return Fail("%s: Could not read output 1: output", __func__);
4577 }
4578
4579 // Inputs
4580 const TensorInfo& inputInfo = input.GetTensorInfo();
4581 const TensorInfo& previousCellStateInInfo = previousCellStateIn.GetTensorInfo();
4582 const TensorInfo& previousOutputInInfo = previousOutputIn.GetTensorInfo();
4583
4584 // Outputs
4585 const TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
4586 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4587
4588 // Dynamic tensors currently not supported
4589 if (IsDynamicTensor(cellStateOutInfo) || IsDynamicTensor(outputInfo))
4590 {
4591 return Fail("%s: Dynamic output tensors are not supported", __func__);
4592 }
4593
4594 QuantizedLstmInputParams params;
4595
4596 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
4597 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
4598 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
4599 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
4600 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
4601 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
4602 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
4603 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
4604 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
4605 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
4606 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
4607 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
4608
4609 QuantizedLstmInputParamsInfo paramsInfo;
4610 paramsInfo.m_InputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
4611 paramsInfo.m_InputToForgetWeights = &(params.m_InputToForgetWeights->GetInfo());
4612 paramsInfo.m_InputToCellWeights = &(params.m_InputToCellWeights->GetInfo());
4613 paramsInfo.m_InputToOutputWeights = &(params.m_InputToOutputWeights->GetInfo());
4614 paramsInfo.m_RecurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
4615 paramsInfo.m_RecurrentToForgetWeights = &(params.m_RecurrentToForgetWeights->GetInfo());
4616 paramsInfo.m_RecurrentToCellWeights = &(params.m_RecurrentToCellWeights->GetInfo());
4617 paramsInfo.m_RecurrentToOutputWeights = &(params.m_RecurrentToOutputWeights->GetInfo());
4618 paramsInfo.m_InputGateBias = &(params.m_InputGateBias->GetInfo());
4619 paramsInfo.m_ForgetGateBias = &(params.m_ForgetGateBias->GetInfo());
4620 paramsInfo.m_CellBias = &(params.m_CellBias->GetInfo());
4621 paramsInfo.m_OutputGateBias = &(params.m_OutputGateBias->GetInfo());
4622
4623 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01004624 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01004625 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4626 {
4627 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4628 IsQuantizedLstmSupported,
4629 data.m_Backends,
4630 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01004631 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01004632 inputInfo,
4633 previousCellStateInInfo,
4634 previousOutputInInfo,
4635 cellStateOutInfo,
4636 outputInfo,
4637 paramsInfo);
4638 };
4639
4640 bool isDynamic = false;
4641 if (!IsDynamicTensor(cellStateOutInfo) &&
4642 !IsDynamicTensor(outputInfo))
4643 {
4644 validateFunc(outputInfo, isSupported);
4645 }
4646 else
4647 {
4648 isDynamic = true;
4649 isSupported = AreDynamicTensorsSupported();
4650 }
4651
4652 if (!isSupported)
4653 {
4654 return false;
4655 }
4656
4657 IConnectableLayer* const layer = data.m_Network->AddQuantizedLstmLayer(params, "QuantizedLstm");
Cathal Corbett53837672022-09-01 11:34:37 +01004658 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01004659 input.Connect(layer->GetInputSlot(0));
4660 previousCellStateIn.Connect(layer->GetInputSlot(1));
4661 previousOutputIn.Connect(layer->GetInputSlot(2));
4662
4663 if (!isDynamic)
4664 {
4665 return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4666 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1, model, data));
4667 }
4668 else
4669 {
4670 return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0, model, data) &&
4671 SetupAndTrackLayerOutputSlot(
4672 operation, 1, *layer, 1, model, data, nullptr, validateFunc, ActivationFn::kActivationNone, true));
4673 }
4674
4675}
4676
4677bool Converter::ConvertRank(const Operation& operation, const Model& model, ConversionData& data)
4678{
4679 VLOG(DRIVER) << "Converter::ConvertRank()";
4680
4681 const Operand* inputOperand = GetInputOperand(operation, 0, model);
4682 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4683
4684 if (inputOperand == nullptr || outputOperand == nullptr)
4685 {
4686 return Fail("%s: Operation has invalid inputs", __func__);
4687 }
4688
4689 const Shape inputOperandShape = GetOperandShape(*inputOperand);
4690 const Shape outputOperandShape = GetOperandShape(*outputOperand);
4691
4692 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4693 if (!input.IsValid())
4694 {
4695 return Fail("%s: Could not read input 0", __func__);
4696 }
4697
4698 armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
4699 if (IsDynamicTensor(outInfo))
4700 {
4701 return Fail("%s: Dynamic output tensors are not supported", __func__);
4702 }
4703
4704 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01004705 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01004706 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4707 IsRankSupported,
4708 data.m_Backends,
4709 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01004710 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01004711 input.GetTensorInfo(),
4712 outInfo);
4713 if (!isSupported)
4714 {
4715 return false;
4716 }
4717
4718 armnn::IConnectableLayer* layer = data.m_Network->AddRankLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01004719 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01004720 assert(layer != nullptr);
4721 input.Connect(layer->GetInputSlot(0));
4722
4723 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, &outInfo);
4724}
4725
4726bool Converter::ConvertReLu(const Operation& operation, const Model& model, ConversionData& data)
4727{
4728 VLOG(DRIVER) << "Converter::ConvertReLu()";
4729 armnn::ActivationDescriptor desc;
4730 desc.m_Function = armnn::ActivationFunction::ReLu;
4731
4732
4733 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4734 if (!input.IsValid())
4735 {
4736 return Fail("%s: Input 0 is invalid", "operationName");
4737 }
4738
4739 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4740 if (!outputOperand)
4741 {
4742 return false;
4743 }
4744
4745 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
4746
4747 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01004748 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01004749 auto validateFunc = [&](const armnn::TensorInfo& outInfo, bool& isSupported)
4750 {
4751 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4752 IsActivationSupported,
4753 data.m_Backends,
4754 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01004755 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01004756 input.GetTensorInfo(),
4757 outInfo,
4758 desc);
4759 };
4760
4761 if(IsDynamicTensor(outInfo))
4762 {
4763 isSupported = AreDynamicTensorsSupported();
4764 }
4765 else
4766 {
4767 validateFunc(outInfo, isSupported);
4768 }
4769
4770 if (!isSupported)
4771 {
4772 return false;
4773 }
4774
4775 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(desc);
Cathal Corbett53837672022-09-01 11:34:37 +01004776 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01004777 ARMNN_ASSERT(layer != nullptr);
4778 input.Connect(layer->GetInputSlot(0));
4779
4780 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4781}
4782
4783bool Converter::ConvertReLu1(const Operation& operation, const Model& model, ConversionData& data)
4784{
4785 VLOG(DRIVER) << "Converter::ConvertReLu1()";
4786 armnn::ActivationDescriptor desc;
4787 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
4788 desc.m_A = 1.0f;
4789 desc.m_B = -1.0f;
4790
4791 return ConvertToActivation(operation, __func__, desc, model, data);
4792}
4793
4794bool Converter::ConvertReLu6(const Operation& operation, const Model& model, ConversionData& data)
4795{
4796 VLOG(DRIVER) << "Converter::ConvertReLu6()";
4797 armnn::ActivationDescriptor desc;
4798 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
4799 desc.m_A = 6.0f;
4800
4801 return ConvertToActivation(operation, __func__, desc, model, data);
4802}
4803
4804bool Converter::ConvertReshape(const Operation& operation, const Model& model, ConversionData& data)
4805{
4806 VLOG(DRIVER) << "Converter::ConvertReshape()";
4807
4808 const Operand* inputOperand = GetInputOperand(operation, 0, model);
4809 const Operand* requestedShapeOperand = GetInputOperand(operation, 1, model);
4810 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
4811
4812 if (inputOperand == nullptr
4813 || requestedShapeOperand == nullptr
4814 || outputOperand == nullptr)
4815 {
4816 return Fail("%s: Operation has invalid inputs", __func__);
4817 }
4818
4819 if (requestedShapeOperand->dimensions.size() != 1)
4820 {
4821 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
4822 __func__, requestedShapeOperand->dimensions.size());
4823 }
4824
4825 std::vector<int32_t> targetDimensions;
4826 if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions, model, data))
4827 {
4828 return Fail("%s: Could not read values of input 1", __func__);
4829 }
4830
4831 const Shape inputOperandShape = GetOperandShape(*inputOperand);
4832
4833 Shape requestedShape;
4834 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
4835 // function that resolves these values into a fully specified tensor shape.
4836 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
4837 {
4838 return Fail("%s: Failed to resolve the requested shape", __func__);
4839 }
4840
4841 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4842 if (!input.IsValid())
4843 {
4844 return Fail("%s: Could not read input 0", __func__);
4845 }
4846
4847 armnn::ReshapeDescriptor reshapeDescriptor;
4848 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
4849 requestedShape.dimensions.data());
4850
4851 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
4852
4853 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01004854 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01004855 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
4856 {
4857 FORWARD_LAYER_SUPPORT_FUNC(__func__,
4858 IsReshapeSupported,
4859 data.m_Backends,
4860 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01004861 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01004862 input.GetTensorInfo(),
4863 outputInfo,
4864 reshapeDescriptor);
4865 };
4866
4867 if(!IsDynamicTensor(outputInfo))
4868 {
4869 validateFunc(outputInfo, isSupported);
4870 }
4871 else
4872 {
4873 isSupported = AreDynamicTensorsSupported();
4874 }
4875
4876 if (!isSupported)
4877 {
4878 return false;
4879 }
4880
4881 armnn::IConnectableLayer* layer = data.m_Network->AddReshapeLayer(reshapeDescriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01004882 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01004883 assert(layer != nullptr);
4884 input.Connect(layer->GetInputSlot(0));
4885
4886 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
4887}
4888
4889bool Converter::ConvertResize(const Operation& operation,
4890 const Model& model,
4891 ConversionData& data,
4892 ResizeMethod resizeMethod)
4893{
4894 VLOG(DRIVER) << "Converter::ConvertResize()";
4895 VLOG(DRIVER) << "resizeMethod = " << GetResizeMethodAsCString(resizeMethod);
4896
4897 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
4898 if (!input.IsValid())
4899 {
4900 return Fail("%s: Could not read input 0", __func__);
4901 }
4902
4903 const Operand* output = GetOutputOperand(operation, 0, model);
4904 if (!output)
4905 {
4906 return Fail("%s: Could not read output 0", __func__);
4907 }
4908
4909 const TensorInfo& inputInfo = input.GetTensorInfo();
4910 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
4911
4912 ResizeDescriptor descriptor;
4913 descriptor.m_Method = resizeMethod;
4914 descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
4915
4916 OperandType operandType1;
4917 OperandType operandType2;
4918
4919 if (!GetOperandType(operation, 1, model, operandType1) ||
4920 !GetOperandType(operation, 2, model, operandType2))
4921 {
4922 return Fail("%s: Operation has invalid inputs", __func__);
4923 }
4924
4925 if (operandType1 != operandType2)
4926 {
4927 return Fail("%s: Operation has invalid inputs. Type of input 1 and 2 should be the same", __func__);
4928 }
4929
4930 if (operandType1 == OperandType::INT32)
4931 {
4932 // Case 1: resizing by shape
4933 int32_t targetWidth = 0;
4934 int32_t targetHeight = 0;
4935
4936 if (!GetInputInt32(operation, 1, targetWidth, model, data) ||
4937 !GetInputInt32(operation, 2, targetHeight, model, data))
4938 {
4939 return Fail("%s: Operation has invalid inputs for resizing by shape", __func__);
4940 }
4941
4942 if (targetWidth < 0 || targetHeight < 0)
4943 {
4944 return Fail("%s: Operation has invalid inputs for resizing by shape. "
4945 "Target width/height cannot be < 0", __func__);
4946 }
4947
4948 descriptor.m_TargetWidth = static_cast<uint32_t>(targetWidth);
4949 descriptor.m_TargetHeight = static_cast<uint32_t>(targetHeight);
4950 }
4951 else if (operandType1 == OperandType::FLOAT32)
4952 {
4953 // Case 2: resizing by scale
4954 float widthScale = 1.0f;
4955 float heightScale = 1.0f;
4956
4957 if (!GetInputFloat32(operation, 1, widthScale, model, data) ||
4958 !GetInputFloat32(operation, 2, heightScale, model, data))
4959 {
4960 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
4961 }
4962
4963 const TensorShape& inputShape = inputInfo.GetShape();
4964 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
4965
4966 float width = inputShape[dataLayoutIndexed.GetWidthIndex()];
4967 float height = inputShape[dataLayoutIndexed.GetHeightIndex()];
4968
4969 descriptor.m_TargetWidth = std::floor(width * widthScale);
4970 descriptor.m_TargetHeight = std::floor(height * heightScale);
4971 }
4972 else if (operandType1 == OperandType::FLOAT16)
4973 {
4974 Half widthScale;
4975 Half heightScale;
4976
4977 if (!GetInputScalar(operation, 1, OperandType::FLOAT16, widthScale, model, data) ||
4978 !GetInputScalar(operation, 2, OperandType::FLOAT16, heightScale, model, data))
4979 {
4980 return Fail("%s: Operation has invalid inputs for resizing by scale", __func__);
4981 }
4982
4983 const TensorShape& inputShape = inputInfo.GetShape();
4984 armnnUtils::DataLayoutIndexed dataLayoutIndexed(descriptor.m_DataLayout);
4985
4986 Half width = static_cast<Half>(inputShape[dataLayoutIndexed.GetWidthIndex()]);
4987 Half height = static_cast<Half>(inputShape[dataLayoutIndexed.GetHeightIndex()]);
4988
4989 descriptor.m_TargetWidth = std::floor(width * widthScale);
4990 descriptor.m_TargetHeight = std::floor(height * heightScale);
4991 }
4992 else
4993 {
4994 return Fail("%s: Operand has invalid data type for resizing by scale", __func__);
4995 }
4996
4997 descriptor.m_AlignCorners = GetOptionalBool(operation, 4, model, data);
4998 descriptor.m_HalfPixelCenters = GetOptionalBool(operation, 5, model, data);
4999
5000 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01005001 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01005002 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5003 {
5004 FORWARD_LAYER_SUPPORT_FUNC(__func__,
5005 IsResizeSupported,
5006 data.m_Backends,
5007 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01005008 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01005009 inputInfo,
5010 outputInfo,
5011 descriptor);
5012 };
5013
5014 if(IsDynamicTensor(outputInfo))
5015 {
5016 isSupported = AreDynamicTensorsSupported();
5017 }
5018 else
5019 {
5020 validateFunc(outputInfo, isSupported);
5021 }
5022
5023 if (!isSupported)
5024 {
5025 return false;
5026 }
5027
5028 IConnectableLayer* layer = data.m_Network->AddResizeLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01005029 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01005030 assert(layer != nullptr);
5031 input.Connect(layer->GetInputSlot(0));
5032
5033 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5034}
5035
5036bool Converter::ConvertSpaceToBatchNd(const Operation& operation, const Model& model, ConversionData& data)
5037{
5038 VLOG(DRIVER) << "Converter::ConvertSpaceToBatchNd()";
5039
5040 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5041 if(!input.IsValid())
5042 {
5043 return Fail("%s: Operation has invalid inputs", __func__);
5044 }
5045
5046 const armnn::TensorInfo &inputInfo = input.GetTensorInfo();
5047 unsigned int rank = inputInfo.GetNumDimensions();
5048 unsigned int spatialDim = rank - 2;
5049
5050 if(rank != 4)
5051 {
5052 Fail("%s: Only inputs with rank 4 are supported", __func__);
5053 }
5054
5055 const Operand *output = GetOutputOperand(operation, 0, model);
5056 if(!output)
5057 {
5058 return Fail("%s: Could not read output 0", __func__);
5059 }
5060
5061 const armnn::TensorInfo &outputInfo = GetTensorInfoForOperand(*output);
5062
5063 const Operand *blockShapeOperand = GetInputOperand(operation, 1, model);
5064 const Operand *paddingsOperand = GetInputOperand(operation, 2, model);
5065
5066 armnn::TensorShape blockShapeOperandShape = GetTensorShapeForOperand(*blockShapeOperand);
5067 if(blockShapeOperandShape.GetNumDimensions() != 1 || blockShapeOperandShape.GetNumElements() != spatialDim)
5068 {
5069 return Fail("%s: Operation has invalid block shape operand: expected shape [%d]", __func__, spatialDim);
5070 }
5071
5072 std::vector<int32_t> blockShape;
5073 if(!GetTensorInt32Values(*blockShapeOperand, blockShape, model, data))
5074 {
5075 return Fail("%s: Operation has an invalid or unsupported block size operand", __func__);
5076 }
5077 if(std::any_of(blockShape.cbegin(), blockShape.cend(), [](int32_t i)
5078 { return i < 1; }))
5079 {
5080 return Fail("%s: Block shape must be at least 1 in all dimensions.", __func__);
5081 }
5082
5083 armnn::TensorShape paddingsOperandShape = GetTensorShapeForOperand(*paddingsOperand);
5084 if(paddingsOperandShape.GetNumDimensions() != 2 || paddingsOperandShape.GetNumElements() != 2 * spatialDim)
5085 {
5086 return Fail("%s: Operation has invalid paddings operand: expected shape [%d, 2]", __func__, spatialDim);
5087 }
5088
5089 std::vector<std::pair<unsigned int, unsigned int>> paddingList;
5090 std::vector<int32_t> paddings;
5091 if(!GetTensorInt32Values(*paddingsOperand, paddings, model, data))
5092 {
5093 return Fail("%s: Operation has an invalid or unsupported paddings operand", __func__);
5094 }
5095 for (unsigned int i = 0; i < paddings.size() - 1; i += 2)
5096 {
5097 int paddingBeforeInput = paddings[i];
5098 int paddingAfterInput = paddings[i + 1];
5099 if(paddingBeforeInput < 0 || paddingAfterInput < 0)
5100 {
5101 return Fail("%s: Operation has invalid paddings operand, invalid padding values.", __func__);
5102 }
5103
5104 paddingList.emplace_back((unsigned int) paddingBeforeInput, (unsigned int) paddingAfterInput);
5105 }
5106
5107 armnn::SpaceToBatchNdDescriptor descriptor;
5108 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
5109 descriptor.m_BlockShape.assign(blockShape.cbegin(), blockShape.cend());
5110 descriptor.m_PadList.assign(paddingList.cbegin(), paddingList.cend());
5111
5112 if(Is12OrLaterOperand(*output))
5113 {
5114 descriptor.m_DataLayout = OptionalDataLayout(operation, 3, model, data);
5115 }
5116
5117 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01005118 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01005119 auto validateFunc = [&](const armnn::TensorInfo &outputInfo, bool &isSupported)
5120 {
5121 FORWARD_LAYER_SUPPORT_FUNC(__func__,
5122 IsSpaceToBatchNdSupported,
5123 data.m_Backends,
5124 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01005125 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01005126 inputInfo,
5127 outputInfo,
5128 descriptor);
5129 };
5130
5131 if(IsDynamicTensor(outputInfo))
5132 {
5133 isSupported = AreDynamicTensorsSupported();
5134 } else
5135 {
5136 validateFunc(outputInfo, isSupported);
5137 }
5138
5139 if(!isSupported)
5140 {
5141 return false;
5142 }
5143
5144 armnn::IConnectableLayer *const layer = data.m_Network->AddSpaceToBatchNdLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01005145 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01005146 assert(layer != nullptr);
5147 input.Connect(layer->GetInputSlot(0));
5148
5149 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5150}
5151
5152bool Converter::ConvertSpaceToDepth(const Operation& operation, const Model& model, ConversionData& data)
5153{
5154 VLOG(DRIVER) << "Converter::ConvertSpaceToDepth()";
5155
5156 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5157 if (!input.IsValid() )
5158 {
5159 return Fail("%s: Operation has invalid inputs", __func__);
5160 }
5161
5162 const TensorInfo& inputInfo = input.GetTensorInfo();
5163 unsigned int rank = inputInfo.GetNumDimensions();
5164 if (rank != 4)
5165 {
5166 return Fail("%s: Only inputs with rank 4 are supported", __func__);
5167 }
5168
5169 const Operand* output = GetOutputOperand(operation, 0, model);
5170 if (!output)
5171 {
5172 return Fail("%s: Could not read output 0", __func__);
5173 }
5174
5175 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5176
5177 SpaceToDepthDescriptor desc;
5178
5179 GetInputScalar(operation, 1, OperandType::INT32, desc.m_BlockSize, model, data);
5180
5181 if (desc.m_BlockSize <= 1)
5182 {
5183 return Fail("%s: Block size must be at least 1 in all dimensions");
5184 }
5185
5186 desc.m_DataLayout = OptionalDataLayout(operation, 2, model, data);
5187
5188 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01005189 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01005190 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5191 {
5192 FORWARD_LAYER_SUPPORT_FUNC(__func__,
5193 IsSpaceToDepthSupported,
5194 data.m_Backends,
5195 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01005196 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01005197 inputInfo,
5198 outputInfo,
5199 desc);
5200 };
5201
5202 if(IsDynamicTensor(outputInfo))
5203 {
5204 isSupported = AreDynamicTensorsSupported();
5205 }
5206 else
5207 {
5208 validateFunc(outputInfo, isSupported);
5209 }
5210
5211 if (!isSupported)
5212 {
5213 return false;
5214 }
5215
5216 IConnectableLayer* const layer = data.m_Network->AddSpaceToDepthLayer(desc);
Cathal Corbett53837672022-09-01 11:34:37 +01005217 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01005218 assert(layer != nullptr);
5219 input.Connect(layer->GetInputSlot(0));
5220
5221 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5222}
5223
5224bool Converter::ConvertSoftmax(const Operation& operation, const Model& model, ConversionData& data)
5225{
5226 VLOG(DRIVER) << "Converter::ConvertSoftmax()";
5227
5228 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5229 if (!input.IsValid())
5230 {
5231 return Fail("%s: Operation has invalid inputs", __func__);
5232 }
5233
5234 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
5235 if (!outputOperand)
5236 {
5237 return Fail("%s: Operation has no outputs", __func__);
5238 }
5239
5240 const TensorInfo& outputInfo = GetTensorInfoForOperand(*outputOperand);
5241
5242 SoftmaxDescriptor desc;
5243 OperandType outputType = outputOperand->type;
5244
5245 // Read beta value
5246 if (outputType == OperandType::TENSOR_FLOAT16)
5247 {
5248 Half value;
5249
5250 if (!GetInputScalar(operation, 1, OperandType::FLOAT16, value, model, data))
5251 {
5252 return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5253 }
5254
5255 desc.m_Beta = static_cast<float>(value);
5256 }
5257 else
5258 {
5259 if (!GetInputFloat32(operation, 1, desc.m_Beta, model, data))
5260 {
5261 return Fail("%s: Operation has invalid inputs %d", __func__, outputType);
5262 }
5263 }
5264
5265 if (operation.inputs.size() > 2 && !GetInputScalar(operation,
5266 2,
5267 OperandType::INT32,
5268 desc.m_Axis,
5269 model,
5270 data))
5271 {
5272 return Fail("%s: Operation has invalid inputs", __func__);
5273 }
5274
5275 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01005276 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01005277 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5278 {
5279 FORWARD_LAYER_SUPPORT_FUNC(__func__,
5280 IsSoftmaxSupported,
5281 data.m_Backends,
5282 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01005283 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01005284 input.GetTensorInfo(),
5285 outputInfo,
5286 desc);
5287 };
5288
5289 if(IsDynamicTensor(outputInfo))
5290 {
5291 isSupported = AreDynamicTensorsSupported();
5292 }
5293 else
5294 {
5295 validateFunc(outputInfo, isSupported);
5296 }
5297
5298 if (!isSupported)
5299 {
5300 return false;
5301 }
5302
5303 IConnectableLayer* layer = data.m_Network->AddSoftmaxLayer(desc);
Cathal Corbett53837672022-09-01 11:34:37 +01005304 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01005305 assert(layer != nullptr);
5306 input.Connect(layer->GetInputSlot(0));
5307
5308 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5309}
5310
5311bool Converter::ConvertSub(const Operation& operation, const Model& model, ConversionData& data)
5312{
5313 VLOG(DRIVER) << "Converter::ConvertSub()";
5314
5315 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0, model, data);
5316 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1, model, data);
5317
5318 if (!input0.IsValid() || !input1.IsValid())
5319 {
5320 return Fail("%s: Operation has invalid inputs", __func__);
5321 }
5322
5323 // The FuseActivation parameter is always the input index 2
5324 // and it should be optional
5325 ActivationFn activationFunction;
5326 if (!GetOptionalInputActivation(operation, 2, activationFunction, model, data))
5327 {
5328 return Fail("%s: Operation has invalid inputs", __func__);
5329 }
5330
5331 const Operand* output = GetOutputOperand(operation, 0, model);
5332 if (!output)
5333 {
5334 return Fail("%s: Could not read output 0", __func__);
5335 }
5336
5337 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5338
5339 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01005340 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01005341 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5342 {
5343 FORWARD_LAYER_SUPPORT_FUNC(__func__,
5344 IsSubtractionSupported,
5345 data.m_Backends,
5346 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01005347 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01005348 input0.GetTensorInfo(),
5349 input1.GetTensorInfo(),
5350 outputInfo);
5351 };
5352
5353 if(IsDynamicTensor(outputInfo))
5354 {
5355 isSupported = AreDynamicTensorsSupported();
5356 }
5357 else
5358 {
5359 validateFunc(outputInfo, isSupported);
5360 }
5361
5362 if (!isSupported)
5363 {
5364 return false;
5365 }
5366
5367 armnn::IConnectableLayer* const startLayer = data.m_Network->AddSubtractionLayer();
Cathal Corbett53837672022-09-01 11:34:37 +01005368 startLayer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01005369
5370 bool isReshapeSupported = BroadcastTensor(input0, input1, startLayer, data);
5371 if (!isReshapeSupported)
5372 {
5373 return false;
5374 }
5375 return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
5376 data, nullptr, validateFunc, activationFunction);
5377}
5378
5379bool Converter::ConvertTanH(const Operation& operation, const Model& model, ConversionData& data)
5380{
5381 VLOG(DRIVER) << "Converter::ConvertTanH()";
5382
5383 armnn::ActivationDescriptor desc;
5384 desc.m_Function = armnn::ActivationFunction::TanH;
5385 desc.m_A = 1.0f; // android nn does not support tanH parameters
5386 desc.m_B = 1.0f; // set to 1.0f for unity scaling
5387
5388 return ConvertToActivation(operation, __func__, desc, model, data);
5389}
5390
5391bool Converter::ConvertTransposeConv2d(const Operation& operation, const Model& model, ConversionData& data)
5392{
5393 VLOG(DRIVER) << "Converter::ConvertTransposeConv2d()";
5394
5395 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5396
5397 if (!input.IsValid())
5398 {
5399 return Fail("%s: Operation has invalid inputs", __func__);
5400 }
5401
5402 const Operand* output = GetOutputOperand(operation, 0, model);
5403
5404 if (!output)
5405 {
5406 return Fail("%s: Could not read output 0", __func__);
5407 }
5408
5409 const TensorInfo& inputInfo = input.GetTensorInfo();
5410 const TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5411
5412 // ArmNN does not currently support non-fixed weights or bias
5413 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
5414 const Operand* weightsOperand = GetInputOperand(operation, 1, model);
5415
5416 if (weightsOperand == nullptr)
5417 {
5418 return Fail("%s: Operand is invalid", __func__);
5419 }
5420 TransposeConvolution2dDescriptor desc;
5421 desc.m_DataLayout = DataLayout::NHWC;
5422
5423 // Determine whether padding is implicit or explicit
5424 bool implicitPadding = operation.inputs.size() == 9;
5425
5426 if (implicitPadding )
5427 {
5428 desc.m_DataLayout = OptionalDataLayout(operation, 8, model, data);
5429 }
5430 else
5431 {
5432 desc.m_DataLayout = OptionalDataLayout(operation, 10, model, data);
5433 }
5434
5435 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
5436 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
5437 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
5438
5439 const PermutationVector OHWIToOIHW = {0, 2, 3, 1};
5440
5441 // The shape of the weight is [depth_out, filter_height, filter_width, depth_in].
5442 // We have to permute it to OIHW if the data layout is NCHW.
5443 const ConstTensorPin weightsPin = (desc.m_DataLayout == DataLayout::NCHW) ?
5444 ConvertOperationInputToConstTensorPin(operation, 1,
5445 model, data, OHWIToOIHW) :
5446 ConvertOperationInputToConstTensorPin(operation, 1, model, data);
5447
5448 // Bias is a 1D tensor
5449 const ConstTensorPin biasPin =
5450 ConvertOperationInputToConstTensorPin(operation, 2, model, data);
5451
5452 if (!weightsPin.IsValid())
5453 {
5454 return Fail("%s: Operation has invalid weights", __func__);
5455 }
5456
5457 if (!biasPin.IsValid())
5458 {
5459 return Fail("%s: Operation has invalid biases", __func__);
5460 }
5461
5462 ConstTensor weights = weightsPin.GetConstTensor();
5463 ConstTensor bias = biasPin.GetConstTensor();
5464 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
5465
5466 ActivationFn activation;
5467
5468 if (implicitPadding)
5469 {
5470 int32_t strideX{0};
5471 int32_t strideY{0};
5472 int32_t padLeft{0};
5473 int32_t padRight{0};
5474 int32_t padTop{0};
5475 int32_t padBottom{0};
5476
5477 ::android::nn::PaddingScheme paddingScheme;
5478 if (!GetInputPaddingScheme(operation, 4, paddingScheme, model, data) ||
5479 !GetInputScalar(operation, 5, OperandType::INT32, strideX, model, data) ||
5480 !GetInputScalar(operation, 6, OperandType::INT32, strideY, model, data) ||
5481 !GetInputActivationFunction(operation, 7, activation, model, data))
5482 {
5483 return Fail("%s: Operation has invalid inputs (implicit padding)", __func__);
5484 }
5485
5486 const uint32_t kernelX = weights.GetShape()[widthIndex];
5487 const uint32_t kernelY = weights.GetShape()[heightIndex];
5488
5489 // If output shape has been specified as a parameter then extract it and make it available.
5490 const Operand* outputShapeOperand = GetInputOperand(operation, 3, model, false);
5491 std::vector<int32_t> outputShape;
5492 if ((outputShapeOperand) && (GetTensorInt32Values(*outputShapeOperand, outputShape, model, data)))
5493 {
5494 // Change from signed to unsigned int to store in TransposeConvolution2dDescriptor.
5495 for (int dimension : outputShape)
5496 {
5497 desc.m_OutputShape.push_back(static_cast<unsigned int>(dimension));
5498 }
5499 desc.m_OutputShapeEnabled = true;
5500 }
5501
5502 uint32_t outputX;
5503 uint32_t outputY;
5504
5505 if (IsDynamicTensor(outputInfo))
5506 {
5507 if (outputShape.size() == 0)
5508 {
5509 return Fail("%s: Padding sizes cannot be inferred", __func__);
5510 }
5511
5512 outputX = outputShape[widthIndex];
5513 outputY = outputShape[heightIndex];
5514 }
5515 else
5516 {
5517 outputX = outputInfo.GetShape()[widthIndex];
5518 outputY = outputInfo.GetShape()[heightIndex];
5519 }
5520
5521 CalcPaddingTransposeConv(outputX, kernelX, strideX, padLeft, padRight, paddingScheme);
5522 CalcPaddingTransposeConv(outputY, kernelY, strideY, padTop, padBottom, paddingScheme);
5523
5524 // NOTE: The Android NN API allows for negative padding values in TransposeConv2d,
5525 // but Arm NN only supports values >= 0
5526 if (padLeft < 0 || padRight < 0 || padTop < 0 || padBottom < 0)
5527 {
5528 return Fail("%s: Negative padding values are not supported", __func__);
5529 }
5530
5531 desc.m_StrideX = armnn::numeric_cast<uint32_t>(strideX);
5532 desc.m_StrideY = armnn::numeric_cast<uint32_t>(strideY);
5533 desc.m_PadLeft = armnn::numeric_cast<uint32_t>(padLeft);
5534 desc.m_PadRight = armnn::numeric_cast<uint32_t>(padRight);
5535 desc.m_PadTop = armnn::numeric_cast<uint32_t>(padTop);
5536 desc.m_PadBottom = armnn::numeric_cast<uint32_t>(padBottom);
5537 }
5538 else if (operation.inputs.size() == 11)
5539 {
5540 // explicit padding
5541 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft, model, data) ||
5542 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight, model, data) ||
5543 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop, model, data) ||
5544 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom, model, data) ||
5545 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX, model, data) ||
5546 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY, model, data) ||
5547 !GetInputActivationFunction(operation, 9, activation, model, data))
5548 {
5549 return Fail("%s: Operation has invalid inputs (explicit padding)", __func__);
5550 }
5551 }
5552 else
5553 {
5554 return Fail("%s: Unsupported number of operation inputs", __func__);
5555 }
5556
5557 desc.m_BiasEnabled = true;
5558 Optional<TensorInfo> biases(bias.GetInfo());
5559
5560 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01005561 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01005562 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5563 {
5564 FORWARD_LAYER_SUPPORT_FUNC(__func__,
5565 IsTransposeConvolution2dSupported,
5566 data.m_Backends,
5567 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01005568 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01005569 inputInfo,
5570 outputInfo,
5571 desc,
5572 weights.GetInfo(),
5573 biases);
5574 };
5575
5576 if(IsDynamicTensor(outputInfo))
5577 {
5578 isSupported = AreDynamicTensorsSupported();
5579 }
5580 else
5581 {
5582 validateFunc(outputInfo, isSupported);
5583 }
5584 if (!isSupported)
5585 {
5586 return false;
5587 }
5588
5589 IConnectableLayer* startLayer =
5590 data.m_Network->AddTransposeConvolution2dLayer(desc, weights, Optional<ConstTensor>(bias));
Cathal Corbett53837672022-09-01 11:34:37 +01005591 startLayer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01005592 if (!startLayer)
5593 {
5594 return Fail("%s: AddTransposeConvolution2dLayer failed", __func__);
5595 }
5596
5597 input.Connect(startLayer->GetInputSlot(0));
5598
5599 return SetupAndTrackLayerOutputSlot(operation, 0, *startLayer, model,
5600 data, nullptr, validateFunc, activation);
5601}
5602
5603bool Converter::ConvertSqrt(const Operation& operation, const Model& model, ConversionData& data)
5604{
5605 VLOG(DRIVER) << "Converter::ConvertSqrt()";
5606 ActivationDescriptor desc;
5607 desc.m_Function = ActivationFunction::Sqrt;
5608
5609 return ::ConvertToActivation(operation, __func__, desc, model, data);
5610}
5611
5612bool Converter::ConvertSqueeze(const Operation& operation, const Model& model, ConversionData& data)
5613{
5614 VLOG(DRIVER) << "Converter::ConvertSqueeze()";
5615
5616 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5617 if (!input.IsValid())
5618 {
5619 return Fail("%s: Operation has invalid inputs", __func__);
5620 }
5621
5622 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5623 unsigned int rank = inputInfo.GetNumDimensions();
5624 if (rank > 4)
5625 {
5626 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5627 }
5628
5629 const Operand* output = GetOutputOperand(operation, 0, model);
5630 if (!output)
5631 {
5632 return Fail("%s: Could not read output 0", __func__);
5633 }
5634
5635 if (IsDynamicTensor(GetTensorInfoForOperand(*output)) && !(AreDynamicTensorsSupported()))
5636 {
5637 return Fail("%s: Dynamic output tensors are not supported", __func__);
5638 }
5639
5640 // NOTE: Axis is an optional parameter to SQUEEZE, therefore we do not want to generate a failure
5641 // if the operand index is out of bounds.
5642 const Operand* axisOperand = GetInputOperand(operation, 1, model, false);
5643
5644 const uint32_t dimensionSequence[] = { 0, 1, 2, 3 };
5645
5646 std::vector<int32_t> axis;
5647 if (!axisOperand)
5648 {
5649 axis.assign(dimensionSequence,
5650 dimensionSequence + rank);
5651 }
5652 else if (!GetTensorInt32Values(*axisOperand, axis, model, data))
5653 {
5654 return Fail("%s: Operation has an invalid or unsupported axis operand", __func__);
5655 }
5656
5657 std::vector<uint32_t> outputDims;
5658 for (unsigned int i = 0; i < rank; i++)
5659 {
5660 bool skipSqueeze = (std::find(axis.begin(), axis.end(), i) == axis.end());
5661 auto currentDimension = inputInfo.GetShape()[i];
5662 if (skipSqueeze || currentDimension != 1)
5663 {
5664 outputDims.push_back(currentDimension);
5665 }
5666 }
5667
5668 armnn::TensorShape outShape = armnn::TensorShape(outputDims.size(), outputDims.data());
5669
5670 armnn::TensorInfo outputInfo = inputInfo;
5671 outputInfo.SetShape(outShape);
5672
5673 armnn::ReshapeDescriptor reshapeDesc;
5674 reshapeDesc.m_TargetShape = outputInfo.GetShape();
5675
5676 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01005677 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01005678 FORWARD_LAYER_SUPPORT_FUNC(__func__,
5679 IsReshapeSupported,
5680 data.m_Backends,
5681 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01005682 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01005683 inputInfo,
5684 outputInfo,
5685 reshapeDesc);
5686
5687 if (!isSupported)
5688 {
5689 return false;
5690 }
5691
5692 armnn::IConnectableLayer* const layer = data.m_Network->AddReshapeLayer(reshapeDesc);
Cathal Corbett53837672022-09-01 11:34:37 +01005693 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01005694 assert(layer != nullptr);
5695 input.Connect(layer->GetInputSlot(0));
5696
5697 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
5698}
5699
5700bool Converter::ConvertStridedSlice(const Operation& operation, const Model& model, ConversionData& data)
5701{
5702 VLOG(DRIVER) << "Converter::ConvertStridedSlice()";
5703
5704 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5705 if (!input.IsValid())
5706 {
5707 return Fail("%s: Operation has invalid inputs", __func__);
5708 }
5709
5710 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5711 unsigned int rank = inputInfo.GetNumDimensions();
5712 if (rank > 4)
5713 {
5714 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5715 }
5716
5717 const Operand* output = GetOutputOperand(operation, 0, model);
5718 if (!output)
5719 {
5720 return Fail("%s: Could not read output 0", __func__);
5721 }
5722
5723 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5724
5725 const Operand* beginOperand = GetInputOperand(operation, 1, model);
5726 const Operand* endOperand = GetInputOperand(operation, 2, model);
5727 const Operand* stridesOperand = GetInputOperand(operation, 3, model);
5728
5729 std::vector<int32_t> beginValues;
5730 std::vector<int32_t> endValues;
5731 std::vector<int32_t> stridesValues;
5732
5733 // The length of the beginOperand, endOperand and stridesOperand must be of a rank(input)
5734 auto ValidateInputOperands = [&] (const Operand& operand, std::vector<int32_t>& operandValues)
5735 {
5736 if (!GetTensorInt32Values(operand, operandValues, model, data))
5737 {
5738 return false;
5739 }
5740
5741 if (operandValues.size() != rank)
5742 {
5743 return false;
5744 }
5745
5746 return true;
5747 };
5748
5749 if (!ValidateInputOperands(*beginOperand, beginValues)
5750 || !ValidateInputOperands(*endOperand, endValues)
5751 || !ValidateInputOperands(*stridesOperand, stridesValues))
5752 {
5753 return Fail("%s: Operation has invalid input operand", __func__);
5754 }
5755
5756 // Stride cannot have value '0'
5757 if (std::any_of(stridesValues.cbegin(), stridesValues.cend(), [](int32_t i){ return i == 0; }))
5758 {
5759 return Fail("%s: Stride must be non-zero value.", __func__);
5760 }
5761
5762 armnn::StridedSliceDescriptor descriptor;
5763 descriptor.m_Begin.assign(beginValues.cbegin(), beginValues.cend());
5764 descriptor.m_End.assign(endValues.cbegin(), endValues.cend());
5765 descriptor.m_Stride.assign(stridesValues.cbegin(), stridesValues.cend());
5766 descriptor.m_DataLayout = armnn::DataLayout::NHWC;
5767
5768 // Get the "begin_mask", "end_mask", and "shrink_axis_mask" flags
5769 if (!GetInputInt32(operation, 4, descriptor.m_BeginMask, model, data) ||
5770 !GetInputInt32(operation, 5, descriptor.m_EndMask, model, data) ||
5771 !GetInputInt32(operation, 6, descriptor.m_ShrinkAxisMask, model, data))
5772 {
5773 return Fail("%s: Operation has invalid inputs", __func__);
5774 }
5775
5776 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01005777 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01005778 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5779 {
5780 FORWARD_LAYER_SUPPORT_FUNC(__func__,
5781 IsStridedSliceSupported,
5782 data.m_Backends,
5783 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01005784 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01005785 inputInfo,
5786 outputInfo,
5787 descriptor);
5788 };
5789
5790 if(IsDynamicTensor(outputInfo))
5791 {
5792 isSupported = AreDynamicTensorsSupported();
5793 }
5794 else
5795 {
5796 validateFunc(outputInfo, isSupported);
5797 }
5798
5799 if (!isSupported)
5800 {
5801 return false;
5802 }
5803
5804 // Check if slice can fit in a inferred output
5805 armnn::TensorShape inputShape = inputInfo.GetShape();
5806 for (unsigned int i = 0; i < inputShape.GetNumDimensions(); i++)
5807 {
5808 int stride = descriptor.m_Stride[i];
5809
5810 if (descriptor.m_ShrinkAxisMask & (1 << i))
5811 {
5812 // If the difference between the start point and the end point of the slice on an axis being shrunk
5813 // is greater than 1 then throw an error as the output will not be large enough to hold the slice
5814 if (((descriptor.m_Begin[i] - descriptor.m_End[i]) > 1)
5815 || ((descriptor.m_Begin[i] - descriptor.m_End[i]) < -1))
5816 {
5817 return Fail("%s: StridedSlice: Output will not be large enough to hold the slice", __func__);
5818 }
5819
5820 if(stride < 0)
5821 {
5822 return Fail("%s: StridedSlice: Stride can not be negative while ShrinkAxisMask is set.", __func__);
5823 }
5824 }
5825 }
5826
5827 armnn::IConnectableLayer* const layer = data.m_Network->AddStridedSliceLayer(descriptor);
Cathal Corbett53837672022-09-01 11:34:37 +01005828 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01005829 assert(layer != nullptr);
5830 input.Connect(layer->GetInputSlot(0));
5831
5832 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5833}
5834
5835bool Converter::ConvertTranspose(const Operation& operation, const Model& model, ConversionData& data)
5836{
5837 VLOG(DRIVER) << "Converter::ConvertTranspose()";
5838
5839 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
5840 if (!input.IsValid())
5841 {
5842 return Fail("%s: Operation has invalid inputs", __func__);
5843 }
5844
5845 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
5846 unsigned int rank = inputInfo.GetNumDimensions();
5847 if (rank > 4)
5848 {
5849 Fail("%s: Inputs with rank greater than 4 are not supported", __func__);
5850 }
5851
5852 // NOTE: Axis is an optional parameter to TRANSPOSE, therefore we do not want to generate a failure
5853 // if the operand index is out of bounds.
5854 const Operand* permOperand = GetInputOperand(operation, 1, model, false);
5855
5856 std::vector<int32_t> perm(rank);
5857 if (!permOperand || (permOperand->lifetime == OperandLifeTime::NO_VALUE))
5858 {
5859 for (unsigned int i = rank; i > 0; i--)
5860 {
5861 perm[rank - i] = armnn::numeric_cast<int> (i - 1);
5862 }
5863 }
5864 else if (!GetTensorInt32Values(*permOperand, perm, model, data))
5865 {
5866 return Fail("%s: Operation has an invalid or unsupported permutation operand", __func__);
5867 }
5868
5869 std::vector<uint32_t> outputDims(perm.begin(), perm.begin() + rank);
5870
5871 armnn::TransposeDescriptor transposeDesc;
5872 transposeDesc.m_DimMappings = armnn::PermutationVector(outputDims.data(), outputDims.size());
5873
5874 const Operand* output = GetOutputOperand(operation, 0, model);
5875 if (!output)
5876 {
5877 return Fail("%s: Could not read output 0", __func__);
5878 }
5879
5880 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
5881
5882 bool isSupported = false;
Cathal Corbett53837672022-09-01 11:34:37 +01005883 armnn::BackendId setBackend;
Sadik Armagan8f397a12022-06-17 15:38:22 +01005884 auto validateFunc = [&](const armnn::TensorInfo& outputInfo, bool& isSupported)
5885 {
5886 FORWARD_LAYER_SUPPORT_FUNC(__func__,
5887 IsTransposeSupported,
5888 data.m_Backends,
5889 isSupported,
Cathal Corbett53837672022-09-01 11:34:37 +01005890 setBackend,
Sadik Armagan8f397a12022-06-17 15:38:22 +01005891 inputInfo,
5892 outputInfo,
5893 transposeDesc);
5894 };
5895
5896 if(IsDynamicTensor(outputInfo))
5897 {
5898 isSupported = AreDynamicTensorsSupported();
5899 }
5900 else
5901 {
5902 validateFunc(outputInfo, isSupported);
5903 }
5904
5905 if (!isSupported)
5906 {
5907 return false;
5908 }
5909
5910 armnn::IConnectableLayer* const layer = data.m_Network->AddTransposeLayer(transposeDesc);
Cathal Corbett53837672022-09-01 11:34:37 +01005911 layer->SetBackendId(setBackend);
Sadik Armagan8f397a12022-06-17 15:38:22 +01005912 assert(layer != nullptr);
5913 input.Connect(layer->GetInputSlot(0));
5914
5915 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data, nullptr, validateFunc);
5916}
5917
5918} // namespace armnn_driver