blob: a812183dd38fa9997f57c7909a8781a163694c83 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9
10#include "armnn/src/armnnUtils/Permute.hpp"
11#include "Utils.hpp"
12
13#include <ActivationFunctor.h>
14#include <CpuExecutor.h>
15#include <OperationsUtils.h>
16
17#include <boost/assert.hpp>
18#include <boost/core/ignore_unused.hpp>
19#include <boost/test/tools/floating_point_comparison.hpp>
20
21#include <log/log.h>
22
23namespace armnn_driver
24{
25
26///
27/// Helper classes
28///
29
30struct ConversionData
31{
32 ConversionData(armnn::Compute compute)
33 : m_Compute(compute)
34 , m_Network(nullptr, nullptr)
35 {}
36
37 const armnn::Compute m_Compute;
38 armnn::INetworkPtr m_Network;
39 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
40 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
41};
42
43class LayerInputHandle
44{
45public:
46 LayerInputHandle();
47 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
48
49 bool IsValid() const;
50
51 void Connect(armnn::IInputSlot& inputSlot);
52
53 const armnn::TensorInfo& GetTensorInfo() const;
54
55private:
56 armnn::IOutputSlot* m_OutputSlot;
57 bool m_Valid;
58 armnn::TensorInfo m_TensorInfo;
59};
60
61class ConstTensorPin
62{
63public:
64 // Creates an invalid tensor pin (can be used to signal errors)
65 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
66 ConstTensorPin(bool optional = false);
67
68 // @param tensorInfo TensorInfo associated with the tensor.
69 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
70 // the model being converted.
71 // @param numBytes Number of bytes for the tensor data.
72 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
73 const armnn::PermutationVector& mappings);
74
75 ConstTensorPin(const ConstTensorPin& other) = delete;
76 ConstTensorPin(ConstTensorPin&& other) = default;
77
78 bool IsValid() const;
79 bool IsOptional() const;
80
81 const armnn::ConstTensor& GetConstTensor() const;
82 const armnn::ConstTensor* GetConstTensorPtr() const;
83
84private:
85 armnn::ConstTensor m_ConstTensor;
86
87 // Owned memory for swizzled tensor data, only required if the tensor needed
88 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
89 // the pools associated with the model being converted.
90 std::vector<uint8_t> m_SwizzledTensorData;
91
92 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
93 bool m_Optional;
94};
95
96} // namespace armnn_driver
97
98///
99/// Utility functions
100///
101
102namespace
103{
104
105using namespace armnn_driver;
106using namespace android::nn;
107
108// Convenience function to log the reason for failing to convert a model.
109// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
110template<class... Args>
111static bool Fail(const char* formatStr, Args&&... args)
112{
113 ALOGD(formatStr, std::forward<Args>(args)...);
114 return false;
115}
116
117// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
118// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
119template<typename IsLayerSupportedFunc, typename ... Args>
120bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
121{
122 std::vector<char> unsupportedReason(1024+1);
123 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
124 if(isSupported)
125 {
126 return true;
127 }
128 else
129 {
130 std::string sUnsupportedReason(unsupportedReason.data());
131 if (sUnsupportedReason.size() > 0)
132 {
133 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
134 } else
135 {
136 ALOGD("%s: not supported by armnn", funcName);
137 }
138 return false;
139 }
140}
141
142armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
143{
144 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
145}
146
147inline bool IsOperandTypeSupportedForTensors(OperandType type)
148{
149 return type == OperandType::TENSOR_FLOAT32 ||
150 type == OperandType::TENSOR_QUANT8_ASYMM ||
151 type == OperandType::TENSOR_INT32;
152}
153
154void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
155 armnn::INetwork& network)
156{
157 BOOST_ASSERT(startLayer != nullptr);
158 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
159 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
160
161 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
162 {
163 // If the number of dimensions do not match then we need to add degenerate dimensions
164 // to the "smaller" tensor using a reshape:
165 // Small Big
166 // | |
167 // Reshape |
168 // \ /
169 // Add
170 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
171
172 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
173 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
174
175 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
176 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
177
178 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
179 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
180 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
181 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
182 {
183 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
184 }
185 armnn::TensorInfo reshapedInfo = smallTensorDims;
186 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
187 reshapedDims.data() });
188
189 armnn::ReshapeDescriptor reshapeDesc;
190 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
191 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
192 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
193 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
194
195 // Connect the outputs from new reshape and original input layer
196 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
197 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
198 }
199 else
200 {
201 input0.Connect(startLayer->GetInputSlot(0));
202 input1.Connect(startLayer->GetInputSlot(1));
203 }
204}
205
206void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
207 android::nn::PaddingScheme scheme)
208{
209 int32_t padHead;
210 int32_t padTail;
211 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
212 outPadHead = boost::numeric_cast<uint32_t>(padHead);
213 outPadTail = boost::numeric_cast<uint32_t>(padTail);
214}
215
216Shape GetOperandShape(const Operand& operand)
217{
218 Shape shape;
219 shape.type = operand.type;
220 shape.dimensions = operand.dimensions;
221 shape.scale = operand.scale;
222 shape.offset = operand.zeroPoint;
223 return shape;
224}
225
226// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
227// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
228// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
229// (us, in this case) to ensure they match.
230void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
231 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
232{
233 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
234 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
235 {
236 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
237 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
238 {
239 ALOGW("Bias quantization scale has been modified to match input*weights");
240 biasInfo.SetQuantizationScale(expectedBiasScale);
241 }
242 }
243}
244
245// 4D Tensor Permutations
246const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
247const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
248const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
249const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
250
251// 3D Permutation Vectors
252const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
253const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
254const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
255
256template<typename OSlot>
257armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
258 const armnn::PermutationVector& mappings)
259{
260 // Add swizzle layer
261 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
262
263 BOOST_ASSERT(layer != nullptr);
264
265 // Connect input to swizzle layer
266 input.Connect(layer->GetInputSlot(0));
267
268 // Setup swizzled output
269 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
270 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
271
272 return *layer;
273}
274
275void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
276{
277 // Add swizzle layer
278 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
279 // Connect swizzled input to layer
280 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
281}
282
283armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
284{
285 // Add deswizzle layer
286 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
287 return deswizzleLayer;
288}
289
290// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
291armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
292 LayerInputHandle& input,
293 armnn::IConnectableLayer& firstLayer,
294 armnn::IConnectableLayer& lastLayer)
295{
296 SwizzleIn(network, input, firstLayer, 0);
297 return DeswizzleOut(network, lastLayer, 0);
298}
299
300// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
301armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
302 armnn::IConnectableLayer& layer)
303{
304 return SwizzleInDeswizzleOut(network, input, layer, layer);
305}
306
307bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
308 const armnn::TensorShape & outputShape,
309 uint32_t concatDim)
310{
311 // Validate the output shape is correct given the input shapes (which have just been validated)
312 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
313 if (outputShape.GetNumDimensions() != numDimensions)
314 {
315 return Fail("%s: Output shape has wrong number of dimensions", __func__);
316 }
317
318 unsigned int outputSizeAlongConcatenatedDimension = 0;
319 for (unsigned int i = 0; i < inputShapes.size(); i++)
320 {
321 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
322 }
323
324 for (unsigned int i = 0; i < numDimensions; ++i)
325 {
326 if (i == concatDim)
327 {
328 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
329 {
330 return Fail(
331 "%s: Invalid output shape for dimension %d (%d != %d)",
332 __func__,
333 i,
334 outputShape[i],
335 outputSizeAlongConcatenatedDimension);
336 }
337 }
338 else
339 {
340 if (outputShape[i] != inputShapes[0][i])
341 {
342 return Fail("%s: Invalid output shape", __func__);
343 }
344 }
345 }
346
347 return true;
348}
349
350bool RequiresReshape(armnn::TensorShape & inputShape)
351{
352 return inputShape.GetNumDimensions() < 3;
353}
354
355template<typename OSlot>
356armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
357 armnn::TensorInfo reshapeInfo)
358{
359 armnn::ReshapeDescriptor reshapeDescriptor;
360 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
361
362 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
363 BOOST_ASSERT(reshapeLayer != nullptr);
364
365 // Attach the input layer to the reshape layer
366 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
367 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
368
369 return *reshapeLayer;
370}
371
372void SwizzleInputs(armnn::INetwork& network,
373 std::vector<LayerInputHandle>& inputs,
374 std::vector<armnn::TensorShape>& inputShapes,
375 const armnn::PermutationVector& mapping)
376{
377 if (!mapping.IsEqual(IdentityPermutation4D))
378 {
379 size_t nInputs = inputs.size();
380 for (size_t i=0; i<nInputs; ++i)
381 {
382 // add swizzle layer
383 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
384 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
385 auto& outputInfo = outputSlot.GetTensorInfo();
386 // replace inputs with the swizzled ones
387 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
388 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
389 }
390 }
391}
392
393void CreatePermutationParameters(const unsigned int numberOfDimensions,
394 int32_t & concatDimension,
395 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
396{
397 BOOST_ASSERT(numberOfDimensions >= 3);
398
399 // ArmNN uses Compute Library subtensors to perform concatenation
400 // This only works when concatenating along dimension 0 or 1 for a 4-D tensor,
401 // or along dimension 0 for a 3-D tensor.
402 if (numberOfDimensions == 4)
403 {
404 if (concatDimension == 3)
405 {
406 concatDimension = 1;
407 permutationPair = std::make_pair(NHWCToArmNN, ArmNNToNHWC);
408 }
409 else if (concatDimension == 2)
410 {
411 concatDimension = 1;
412 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
413 }
414 else
415 {
416 permutationPair = std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
417 }
418
419 }
420 else if (numberOfDimensions == 3)
421 {
422 if (concatDimension == 2)
423 {
424 concatDimension = 0;
425 permutationPair = std::make_pair(RotateTensorRight, RotateTensorLeft);
426 }
427 else if (concatDimension == 1)
428 {
429 concatDimension = 0;
430 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
431 }
432 else
433 {
434 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
435 }
436 }
437}
438
439} // anonymous namespace
440
441namespace armnn_driver
442{
443
444//// Creates an ArmNN activation layer and connects it to the given layer, if the
445//// passed in AndroidNN activation function requires so.
446//// @return The end layer of the sequence of layers built for the given AndroidNN
447//// activation function or nullptr if an error occurred (e.g. unsupported activation).
448//// Note that the end layer matches the input layer if no activation is required
449//// (the sequence of layers has length 1).
450armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
451 ActivationFn activation,
452 armnn::IConnectableLayer* prevLayer,
453 ConversionData& data);
454
455} // namespace armnn_driver
456
457///
458/// Utility templates
459///
460
461namespace armnn_driver
462{
463
464using namespace android::nn;
465
466template<typename HalOperation, typename HalModel>
467const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex, const HalModel& model)
468{
469 if (inputIndex >= operation.inputs.size())
470 {
471 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
472 return nullptr;
473 }
474
475 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
476 return &model.operands[operation.inputs[inputIndex]];
477}
478
479template<typename HalOperation, typename HalModel>
480const Operand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex, const HalModel& model)
481{
482 if (outputIndex >= operation.outputs.size())
483 {
484 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
485 return nullptr;
486 }
487
488 // Model should have been validated beforehand
489 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
490
491 return &model.operands[operation.outputs[outputIndex]];
492}
493
494template<typename HalModel>
495ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
496 const HalModel& model,
497 const ConversionData& data,
498 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
499 const armnn::TensorShape* overrideTensorShape = nullptr,
500 bool optional = false)
501{
502 if (!IsOperandTypeSupportedForTensors(operand.type))
503 {
504 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
505 return ConstTensorPin();
506 }
507
508 if (operand.lifetime != OperandLifeTime::CONSTANT_COPY && operand.lifetime != OperandLifeTime::CONSTANT_REFERENCE)
509 {
510 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
511 return ConstTensorPin();
512 }
513
514 const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data);
515 if (!valueStart)
516 {
517 if (optional)
518 {
519 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
520 return ConstTensorPin(true);
521 }
522 // mandatory tensor with no values
523 Fail("%s: failed to get operand address", __func__);
524 return ConstTensorPin();
525 }
526
527 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
528 if (overrideTensorShape != nullptr)
529 {
530 tensorInfo.SetShape(*overrideTensorShape);
531 }
532 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
533}
534
535template<typename HalOperation, typename HalModel>
536ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
537 uint32_t inputIndex,
538 const HalModel& model,
539 const ConversionData& data,
540 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
541 const armnn::TensorShape* overrideTensorShape = nullptr,
542 bool optional = false)
543{
544 const Operand* operand = GetInputOperand(operation, inputIndex, model);
545 if (!operand)
546 {
547 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
548 return ConstTensorPin();
549 }
550 return ConvertOperandToConstTensorPin(*operand,
551 model,
552 data,
553 dimensionMappings,
554 overrideTensorShape,
555 optional);
556}
557
558template<typename HalModel>
559const void* GetOperandValueReadOnlyAddress(const Operand& operand, const HalModel& model, const ConversionData& data)
560{
561 const void* valueStart = nullptr;
562
563 switch (operand.lifetime)
564 {
565 case OperandLifeTime::CONSTANT_COPY:
566 {
567 // Constant found in model.operandValues
568 valueStart = &model.operandValues[operand.location.offset];
569 break;
570 }
571 case OperandLifeTime::CONSTANT_REFERENCE:
572 {
573 // Constant specified via a Memory object
574 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
575 break;
576 }
577 default:
578 {
579 // Unsupported/invalid (e.g. can't get value of an input to the model)
580 Fail("%s: unsupported/invalid operand lifetime: %s",
581 __func__, toString(operand.lifetime).c_str());
582 valueStart = nullptr;
583 }
584 }
585
586 return valueStart;
587}
588
589template<typename HalOperation, typename HalModel, typename OutputType>
590bool GetInputScalar(const HalOperation& operation,
591 uint32_t inputIndex,
592 OperandType type,
593 OutputType& outValue,
594 const HalModel& model,
595 const ConversionData& data)
596{
597 const Operand* operand = GetInputOperand(operation, inputIndex, model);
598 if (!operand)
599 {
600 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
601 }
602
603 if (operand->type != type)
604 {
605 return Fail("%s: unexpected operand type: %s (should be %s)",
606 __func__, toString(operand->type).c_str(), toString(type).c_str());
607 }
608
609 if (operand->location.length != sizeof(OutputType))
610 {
611 return Fail("%s: incorrect operand location length: %i (should be %i)",
612 __func__, operand->location.length, sizeof(OutputType));
613 }
614
615 const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
616 if (!valueAddress)
617 {
618 return Fail("%s: failed to get address for operand", __func__);
619 }
620
621 outValue = *(static_cast<const OutputType*>(valueAddress));
622 return true;
623}
624
625template<typename HalOperation, typename HalModel>
626bool GetInputInt32(const HalOperation& operation,
627 uint32_t inputIndex,
628 int32_t& outValue,
629 const HalModel& model,
630 const ConversionData& data)
631{
632 return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
633}
634
635
636template<typename HalOperation, typename HalModel>
637bool GetInputFloat32(const HalOperation& operation,
638 uint32_t inputIndex,
639 float& outValue,
640 const HalModel& model,
641 const ConversionData& data)
642{
643 return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
644}
645
646
647template<typename HalOperation, typename HalModel>
648bool GetInputActivationFunctionImpl(const HalOperation& operation,
649 uint32_t inputIndex,
650 OperandType type,
651 ActivationFn& outActivationFunction,
652 const HalModel& model,
653 const ConversionData& data)
654{
655 if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
656 {
657 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
658 __func__,
659 toString(type).c_str(),
660 toString(OperandType::INT32).c_str(),
661 toString(OperandType::TENSOR_INT32).c_str());
662 }
663
664 int32_t activationFunctionAsInt;
665 if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
666 {
667 return Fail("%s: failed to get activation input value", __func__);
668 }
669 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
670 return true;
671}
672
673
674template<typename HalOperation, typename HalModel>
675bool GetInputActivationFunction(const HalOperation& operation,
676 uint32_t inputIndex,
677 ActivationFn& outActivationFunction,
678 const HalModel& model,
679 const ConversionData& data)
680{
681 return GetInputActivationFunctionImpl(operation,
682 inputIndex,
683 OperandType::INT32,
684 outActivationFunction,
685 model,
686 data);
687}
688
689template<typename HalOperation, typename HalModel>
690bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
691 uint32_t inputIndex,
692 ActivationFn& outActivationFunction,
693 const HalModel& model,
694 const ConversionData& data)
695{
696 // This only accepts a 1-D tensor of size 1
697 return GetInputActivationFunctionImpl(operation,
698 inputIndex,
699 OperandType::INT32,
700 outActivationFunction,
701 model,
702 data);
703}
704
705
706template<typename HalOperation, typename HalModel>
707bool GetOptionalInputActivation(const HalOperation& operation,
708 uint32_t inputIndex,
709 ActivationFn& activationFunction,
710 const HalModel& model,
711 const ConversionData& data)
712{
713 if (operation.inputs.size() <= inputIndex)
714 {
715 activationFunction = ActivationFn::kActivationNone;
716 }
717 else
718 {
719 if (!GetInputActivationFunction(operation, inputIndex, activationFunction, model, data))
720 {
721 return Fail("%s: Operation has invalid inputs", __func__);
722 }
723 }
724 return true;
725}
726
727template<typename HalModel>
728bool GetTensorInt32Values(const Operand& operand,
729 std::vector<int32_t>& outValues,
730 const HalModel& model,
731 const ConversionData& data)
732{
733 if (operand.type != OperandType::TENSOR_INT32)
734 {
735 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
736 }
737
738 const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
739 if (!startAddress)
740 {
741 return Fail("%s: failed to get operand address", __func__, operand.type);
742 }
743
744 // Check number of bytes is sensible
745 const uint32_t numBytes = operand.location.length;
746 if (numBytes % sizeof(int32_t) != 0)
747 {
748 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
749 __func__, numBytes, sizeof(int32_t));
750 }
751
752 outValues.resize(numBytes / sizeof(int32_t));
753 memcpy(outValues.data(), startAddress, numBytes);
754 return true;
755}
756
757template<typename HalOperation, typename HalModel>
758bool GetInputPaddingScheme(const HalOperation& operation,
759 uint32_t inputIndex,
760 PaddingScheme& outPaddingScheme,
761 const HalModel& model,
762 const ConversionData& data)
763{
764 int32_t paddingSchemeAsInt;
765 if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
766 {
767 return Fail("%s: failed to get padding scheme input value", __func__);
768 }
769
770 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
771 return true;
772}
773
774template<typename HalOperation, typename HalModel>
775LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
776 uint32_t inputIndex,
777 const HalModel& model,
778 ConversionData& data)
779{
780 const Operand* operand = GetInputOperand(operation, inputIndex, model);
781 if (!operand)
782 {
783 Fail("%s: failed to get input operand %i", __func__, inputIndex);
784 return LayerInputHandle();
785 }
786
787 if (!IsOperandTypeSupportedForTensors(operand->type))
788 {
789 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
790 return LayerInputHandle();
791 }
792
793 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
794
795 switch (operand->lifetime)
796 {
797 case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
798 case OperandLifeTime::MODEL_INPUT:
799 {
800 // The tensor is either an operand internal to the model, or a model input.
801 // It can be associated with an ArmNN output slot for an existing layer.
802
803 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
804 const uint32_t operandIndex = operation.inputs[inputIndex];
805 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
806 break;
807 }
808 case OperandLifeTime::CONSTANT_COPY:
809 case OperandLifeTime::CONSTANT_REFERENCE:
810 {
811 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
812 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data);
813 if (tensorPin.IsValid())
814 {
815 if (!IsLayerSupported(__func__,
816 armnn::IsConstantSupported,
817 data.m_Compute,
818 tensorPin.GetConstTensor().GetInfo()))
819 {
820 return LayerInputHandle();
821 }
822
823 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
824 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
825 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
826
827 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
828 }
829 else
830 {
831 Fail("%s: invalid operand tensor", __func__);
832 return LayerInputHandle();
833 }
834 break;
835 }
836 default:
837 {
838 // Unsupported lifetime for an input tensor
839 Fail("%s: unsupported lifetime for input tensor: %s",
840 __func__, toString(operand->lifetime).c_str());
841 return LayerInputHandle();
842 }
843 }
844}
845
846template<typename HalOperation, typename HalModel>
847bool ConvertToActivation(const HalOperation& operation,
848 const char* operationName,
849 const armnn::ActivationDescriptor& activationDesc,
850 const HalModel& model,
851 ConversionData& data)
852{
853 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
854 if (!input.IsValid())
855 {
856 return Fail("%s: Input 0 is invalid", operationName);
857 }
858
859 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
860 if (!outputOperand)
861 {
862 return false;
863 }
864 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
865 if (!IsLayerSupported(__func__,
866 armnn::IsActivationSupported,
867 data.m_Compute,
868 input.GetTensorInfo(),
869 outInfo,
870 activationDesc))
871 {
872 return false;
873 }
874
875 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
876 BOOST_ASSERT(layer != nullptr);
877 input.Connect(layer->GetInputSlot(0));
878
879 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
880}
881
882template<typename HalOperation, typename HalModel>
883bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
884 uint32_t operationOutputIndex,
885 armnn::IConnectableLayer& layer,
886 uint32_t layerOutputIndex,
887 const HalModel& model,
888 ConversionData& data)
889{
890 const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
891 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
892 {
893 return false;
894 }
895
896 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
897
898 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
899 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
900
901 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
902
903 return true;
904}
905
906template<typename HalOperation, typename HalModel>
907bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
908 uint32_t outputIndex,
909 armnn::IConnectableLayer& layer,
910 const HalModel& model,
911 ConversionData& data)
912{
913 return SetupAndTrackLayerOutputSlot(operation, outputIndex, layer, outputIndex, model, data);
914}
915
916template<typename HalOperation, typename HalModel>
917bool ConvertPooling2d(const HalOperation& operation,
918 const char* operationName,
919 armnn::PoolingAlgorithm poolType,
920 const HalModel& model,
921 ConversionData& data)
922{
923 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
924 if (!input.IsValid())
925 {
926 return Fail("%s: Could not read input 0", operationName);
927 }
928
929 const Operand* output = GetOutputOperand(operation, 0, model);
930 if (!output)
931 {
932 return Fail("%s: Could not read output 0", __func__);
933 }
934
935 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
936 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
937
938 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
939 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
940
941 armnn::Pooling2dDescriptor desc;
942 desc.m_PoolType = poolType;
943 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
944
945 ActivationFn activation;
946
947 if (operation.inputs.size() == 7)
948 {
949 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
950 android::nn::PaddingScheme scheme;
951 if (!GetInputPaddingScheme(operation, 1, scheme, model, data)
952 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data)
953 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data)
954 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data)
955 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data)
956 || !GetInputActivationFunction(operation, 6, activation, model, data))
957 {
958 return Fail("%s: Operation has invalid inputs", operationName);
959 }
960
961 const unsigned int inputWidth = swizzledInputInfo.GetShape()[3];
962 const unsigned int inputHeight = swizzledInputInfo.GetShape()[2];
963
964 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
965 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
966 }
967 else
968 {
969 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
970 if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data)
971 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data)
972 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data)
973 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data)
974 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data)
975 || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data)
976 || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data)
977 || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data)
978 || !GetInputActivationFunction(operation, 9, activation, model, data))
979 {
980 return Fail("%s: Operation has invalid inputs", operationName);
981 }
982 }
983
984 // ArmNN does not accept a pool size of 1, but the ArmNN driver is expected to cope.
985 // This is mapped to a trivial splitter instead.
986 armnn::IConnectableLayer* startLayer = nullptr;
987 if (desc.m_PoolWidth != 1 || desc.m_PoolHeight != 1)
988 {
989 if (!IsLayerSupported(__func__,
990 armnn::IsPooling2dSupported,
991 data.m_Compute,
992 swizzledInputInfo,
993 swizzledOutputInfo,
994 desc))
995 {
996 return false;
997 }
998
999 startLayer = data.m_Network->AddPooling2dLayer(desc);
1000 }
1001 else
1002 {
1003 const unsigned int numDims = swizzledOutputInfo.GetNumDimensions();
1004
1005 armnn::ViewsDescriptor viewsDesc(1, numDims);
1006
1007 for (unsigned int i = 0; i < numDims; ++i)
1008 {
1009 viewsDesc.SetViewOriginCoord(0, i, 0);
1010 viewsDesc.SetViewSize(0, i, swizzledOutputInfo.GetShape()[i]);
1011 }
1012
1013 if (!IsLayerSupported(__func__,
1014 armnn::IsSplitterSupported,
1015 data.m_Compute,
1016 swizzledInputInfo,
1017 viewsDesc))
1018 {
1019 return false;
1020 }
1021
1022 startLayer = data.m_Network->AddSplitterLayer(viewsDesc);
1023 }
1024
1025 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer, data);
1026
1027 if (endLayer != nullptr)
1028 {
1029 armnn::IConnectableLayer& outSwizzleLayer =
1030 SwizzleInDeswizzleOut(*data.m_Network, input, *startLayer, *endLayer);
1031 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer, model, data);
1032 }
1033 else
1034 {
1035 return Fail("%s: ProcessActivation failed", operationName);
1036 }
1037}
1038
1039} // namespace armnn_driver