blob: 68ce09d8deb046d152b225467d45933ecdafb825 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9
10#include "armnn/src/armnnUtils/Permute.hpp"
11#include "Utils.hpp"
12
13#include <ActivationFunctor.h>
14#include <CpuExecutor.h>
15#include <OperationsUtils.h>
16
17#include <boost/assert.hpp>
18#include <boost/core/ignore_unused.hpp>
19#include <boost/test/tools/floating_point_comparison.hpp>
20
21#include <log/log.h>
22
23namespace armnn_driver
24{
25
26///
27/// Helper classes
28///
29
30struct ConversionData
31{
32 ConversionData(armnn::Compute compute)
33 : m_Compute(compute)
34 , m_Network(nullptr, nullptr)
35 {}
36
37 const armnn::Compute m_Compute;
38 armnn::INetworkPtr m_Network;
39 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
40 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
41};
42
43class LayerInputHandle
44{
45public:
46 LayerInputHandle();
47 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
48
49 bool IsValid() const;
50
51 void Connect(armnn::IInputSlot& inputSlot);
52
53 const armnn::TensorInfo& GetTensorInfo() const;
54
55private:
56 armnn::IOutputSlot* m_OutputSlot;
57 bool m_Valid;
58 armnn::TensorInfo m_TensorInfo;
59};
60
61class ConstTensorPin
62{
63public:
64 // Creates an invalid tensor pin (can be used to signal errors)
65 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
66 ConstTensorPin(bool optional = false);
67
68 // @param tensorInfo TensorInfo associated with the tensor.
69 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
70 // the model being converted.
71 // @param numBytes Number of bytes for the tensor data.
72 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
73 const armnn::PermutationVector& mappings);
74
75 ConstTensorPin(const ConstTensorPin& other) = delete;
76 ConstTensorPin(ConstTensorPin&& other) = default;
77
78 bool IsValid() const;
79 bool IsOptional() const;
80
81 const armnn::ConstTensor& GetConstTensor() const;
82 const armnn::ConstTensor* GetConstTensorPtr() const;
83
84private:
85 armnn::ConstTensor m_ConstTensor;
86
87 // Owned memory for swizzled tensor data, only required if the tensor needed
88 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
89 // the pools associated with the model being converted.
90 std::vector<uint8_t> m_SwizzledTensorData;
91
92 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
93 bool m_Optional;
94};
95
96} // namespace armnn_driver
97
98///
99/// Utility functions
100///
101
102namespace
103{
104
105using namespace armnn_driver;
106using namespace android::nn;
107
108// Convenience function to log the reason for failing to convert a model.
109// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
110template<class... Args>
111static bool Fail(const char* formatStr, Args&&... args)
112{
113 ALOGD(formatStr, std::forward<Args>(args)...);
114 return false;
115}
116
117// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
118// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
119template<typename IsLayerSupportedFunc, typename ... Args>
120bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
121{
122 std::vector<char> unsupportedReason(1024+1);
123 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
124 if(isSupported)
125 {
126 return true;
127 }
128 else
129 {
130 std::string sUnsupportedReason(unsupportedReason.data());
131 if (sUnsupportedReason.size() > 0)
132 {
133 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
134 } else
135 {
136 ALOGD("%s: not supported by armnn", funcName);
137 }
138 return false;
139 }
140}
141
142armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
143{
144 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
145}
146
147inline bool IsOperandTypeSupportedForTensors(OperandType type)
148{
149 return type == OperandType::TENSOR_FLOAT32 ||
150 type == OperandType::TENSOR_QUANT8_ASYMM ||
151 type == OperandType::TENSOR_INT32;
152}
153
154void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
155 armnn::INetwork& network)
156{
157 BOOST_ASSERT(startLayer != nullptr);
158 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
159 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
160
161 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
162 {
163 // If the number of dimensions do not match then we need to add degenerate dimensions
164 // to the "smaller" tensor using a reshape:
165 // Small Big
166 // | |
167 // Reshape |
168 // \ /
169 // Add
170 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
171
172 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
173 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
174
175 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
176 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
177
178 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
179 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
180 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
181 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
182 {
183 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
184 }
185 armnn::TensorInfo reshapedInfo = smallTensorDims;
186 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
187 reshapedDims.data() });
188
189 armnn::ReshapeDescriptor reshapeDesc;
190 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
191 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
192 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
193 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
194
195 // Connect the outputs from new reshape and original input layer
196 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
197 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
198 }
199 else
200 {
201 input0.Connect(startLayer->GetInputSlot(0));
202 input1.Connect(startLayer->GetInputSlot(1));
203 }
204}
205
206void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
207 android::nn::PaddingScheme scheme)
208{
209 int32_t padHead;
210 int32_t padTail;
211 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
212 outPadHead = boost::numeric_cast<uint32_t>(padHead);
213 outPadTail = boost::numeric_cast<uint32_t>(padTail);
214}
215
216Shape GetOperandShape(const Operand& operand)
217{
218 Shape shape;
219 shape.type = operand.type;
220 shape.dimensions = operand.dimensions;
221 shape.scale = operand.scale;
222 shape.offset = operand.zeroPoint;
223 return shape;
224}
225
226// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
227// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
228// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
229// (us, in this case) to ensure they match.
230void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
231 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
232{
233 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
234 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
235 {
236 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
237 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
238 {
239 ALOGW("Bias quantization scale has been modified to match input*weights");
240 biasInfo.SetQuantizationScale(expectedBiasScale);
241 }
242 }
243}
244
245// 4D Tensor Permutations
246const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
247const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
248const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
249const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
250
251// 3D Permutation Vectors
252const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
253const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
254const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
255
256template<typename OSlot>
257armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
258 const armnn::PermutationVector& mappings)
259{
260 // Add swizzle layer
261 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
262
263 BOOST_ASSERT(layer != nullptr);
264
265 // Connect input to swizzle layer
266 input.Connect(layer->GetInputSlot(0));
267
268 // Setup swizzled output
269 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
270 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
271
272 return *layer;
273}
274
275void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
276{
277 // Add swizzle layer
278 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
279 // Connect swizzled input to layer
280 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
281}
282
283armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
284{
285 // Add deswizzle layer
286 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
287 return deswizzleLayer;
288}
289
290// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
291armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
292 LayerInputHandle& input,
293 armnn::IConnectableLayer& firstLayer,
294 armnn::IConnectableLayer& lastLayer)
295{
296 SwizzleIn(network, input, firstLayer, 0);
297 return DeswizzleOut(network, lastLayer, 0);
298}
299
300// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
301armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
302 armnn::IConnectableLayer& layer)
303{
304 return SwizzleInDeswizzleOut(network, input, layer, layer);
305}
306
307bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
308 const armnn::TensorShape & outputShape,
309 uint32_t concatDim)
310{
311 // Validate the output shape is correct given the input shapes (which have just been validated)
312 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
313 if (outputShape.GetNumDimensions() != numDimensions)
314 {
315 return Fail("%s: Output shape has wrong number of dimensions", __func__);
316 }
317
318 unsigned int outputSizeAlongConcatenatedDimension = 0;
319 for (unsigned int i = 0; i < inputShapes.size(); i++)
320 {
321 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
322 }
323
324 for (unsigned int i = 0; i < numDimensions; ++i)
325 {
326 if (i == concatDim)
327 {
328 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
329 {
330 return Fail(
331 "%s: Invalid output shape for dimension %d (%d != %d)",
332 __func__,
333 i,
334 outputShape[i],
335 outputSizeAlongConcatenatedDimension);
336 }
337 }
338 else
339 {
340 if (outputShape[i] != inputShapes[0][i])
341 {
342 return Fail("%s: Invalid output shape", __func__);
343 }
344 }
345 }
346
347 return true;
348}
349
350bool RequiresReshape(armnn::TensorShape & inputShape)
351{
352 return inputShape.GetNumDimensions() < 3;
353}
354
355template<typename OSlot>
356armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
357 armnn::TensorInfo reshapeInfo)
358{
359 armnn::ReshapeDescriptor reshapeDescriptor;
360 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
361
362 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
363 BOOST_ASSERT(reshapeLayer != nullptr);
364
365 // Attach the input layer to the reshape layer
366 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
367 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
368
369 return *reshapeLayer;
370}
371
372void SwizzleInputs(armnn::INetwork& network,
373 std::vector<LayerInputHandle>& inputs,
374 std::vector<armnn::TensorShape>& inputShapes,
375 const armnn::PermutationVector& mapping)
376{
377 if (!mapping.IsEqual(IdentityPermutation4D))
378 {
379 size_t nInputs = inputs.size();
380 for (size_t i=0; i<nInputs; ++i)
381 {
382 // add swizzle layer
383 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
384 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
385 auto& outputInfo = outputSlot.GetTensorInfo();
386 // replace inputs with the swizzled ones
387 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
388 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
389 }
390 }
391}
392
393void CreatePermutationParameters(const unsigned int numberOfDimensions,
394 int32_t & concatDimension,
395 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
396{
397 BOOST_ASSERT(numberOfDimensions >= 3);
398
399 // ArmNN uses Compute Library subtensors to perform concatenation
400 // This only works when concatenating along dimension 0 or 1 for a 4-D tensor,
401 // or along dimension 0 for a 3-D tensor.
402 if (numberOfDimensions == 4)
403 {
404 if (concatDimension == 3)
405 {
406 concatDimension = 1;
407 permutationPair = std::make_pair(NHWCToArmNN, ArmNNToNHWC);
408 }
409 else if (concatDimension == 2)
410 {
411 concatDimension = 1;
412 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
413 }
414 else
415 {
416 permutationPair = std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
417 }
418
419 }
420 else if (numberOfDimensions == 3)
421 {
422 if (concatDimension == 2)
423 {
424 concatDimension = 0;
425 permutationPair = std::make_pair(RotateTensorRight, RotateTensorLeft);
426 }
427 else if (concatDimension == 1)
428 {
429 concatDimension = 0;
430 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
431 }
432 else
433 {
434 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
435 }
436 }
437}
438
439} // anonymous namespace
440
441namespace armnn_driver
442{
443
444//// Creates an ArmNN activation layer and connects it to the given layer, if the
445//// passed in AndroidNN activation function requires so.
446//// @return The end layer of the sequence of layers built for the given AndroidNN
447//// activation function or nullptr if an error occurred (e.g. unsupported activation).
448//// Note that the end layer matches the input layer if no activation is required
449//// (the sequence of layers has length 1).
450armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
451 ActivationFn activation,
452 armnn::IConnectableLayer* prevLayer,
453 ConversionData& data);
454
455} // namespace armnn_driver
456
457///
458/// Utility templates
459///
460
461namespace armnn_driver
462{
463
464using namespace android::nn;
465
466template<typename HalOperation, typename HalModel>
saoste01b8471482018-10-10 09:44:51 +0100467const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex, const HalModel& model,
468 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100469{
470 if (inputIndex >= operation.inputs.size())
471 {
saoste01b8471482018-10-10 09:44:51 +0100472 if (failOnIndexOutOfBounds)
473 {
474 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
475 }
arovir01b0717b52018-09-05 17:03:25 +0100476 return nullptr;
477 }
478
479 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
480 return &model.operands[operation.inputs[inputIndex]];
481}
482
483template<typename HalOperation, typename HalModel>
484const Operand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex, const HalModel& model)
485{
486 if (outputIndex >= operation.outputs.size())
487 {
488 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
489 return nullptr;
490 }
491
492 // Model should have been validated beforehand
493 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
494
495 return &model.operands[operation.outputs[outputIndex]];
496}
497
498template<typename HalModel>
499ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
500 const HalModel& model,
501 const ConversionData& data,
502 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
503 const armnn::TensorShape* overrideTensorShape = nullptr,
504 bool optional = false)
505{
506 if (!IsOperandTypeSupportedForTensors(operand.type))
507 {
508 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
509 return ConstTensorPin();
510 }
511
512 if (operand.lifetime != OperandLifeTime::CONSTANT_COPY && operand.lifetime != OperandLifeTime::CONSTANT_REFERENCE)
513 {
514 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
515 return ConstTensorPin();
516 }
517
518 const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data);
519 if (!valueStart)
520 {
521 if (optional)
522 {
523 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
524 return ConstTensorPin(true);
525 }
526 // mandatory tensor with no values
527 Fail("%s: failed to get operand address", __func__);
528 return ConstTensorPin();
529 }
530
531 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
532 if (overrideTensorShape != nullptr)
533 {
534 tensorInfo.SetShape(*overrideTensorShape);
535 }
536 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
537}
538
539template<typename HalOperation, typename HalModel>
540ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
541 uint32_t inputIndex,
542 const HalModel& model,
543 const ConversionData& data,
544 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
545 const armnn::TensorShape* overrideTensorShape = nullptr,
546 bool optional = false)
547{
548 const Operand* operand = GetInputOperand(operation, inputIndex, model);
549 if (!operand)
550 {
551 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
552 return ConstTensorPin();
553 }
554 return ConvertOperandToConstTensorPin(*operand,
555 model,
556 data,
557 dimensionMappings,
558 overrideTensorShape,
559 optional);
560}
561
562template<typename HalModel>
563const void* GetOperandValueReadOnlyAddress(const Operand& operand, const HalModel& model, const ConversionData& data)
564{
565 const void* valueStart = nullptr;
566
567 switch (operand.lifetime)
568 {
569 case OperandLifeTime::CONSTANT_COPY:
570 {
571 // Constant found in model.operandValues
572 valueStart = &model.operandValues[operand.location.offset];
573 break;
574 }
575 case OperandLifeTime::CONSTANT_REFERENCE:
576 {
577 // Constant specified via a Memory object
578 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
579 break;
580 }
581 default:
582 {
583 // Unsupported/invalid (e.g. can't get value of an input to the model)
584 Fail("%s: unsupported/invalid operand lifetime: %s",
585 __func__, toString(operand.lifetime).c_str());
586 valueStart = nullptr;
587 }
588 }
589
590 return valueStart;
591}
592
593template<typename HalOperation, typename HalModel, typename OutputType>
594bool GetInputScalar(const HalOperation& operation,
595 uint32_t inputIndex,
596 OperandType type,
597 OutputType& outValue,
598 const HalModel& model,
599 const ConversionData& data)
600{
601 const Operand* operand = GetInputOperand(operation, inputIndex, model);
602 if (!operand)
603 {
604 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
605 }
606
607 if (operand->type != type)
608 {
609 return Fail("%s: unexpected operand type: %s (should be %s)",
610 __func__, toString(operand->type).c_str(), toString(type).c_str());
611 }
612
613 if (operand->location.length != sizeof(OutputType))
614 {
615 return Fail("%s: incorrect operand location length: %i (should be %i)",
616 __func__, operand->location.length, sizeof(OutputType));
617 }
618
619 const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
620 if (!valueAddress)
621 {
622 return Fail("%s: failed to get address for operand", __func__);
623 }
624
625 outValue = *(static_cast<const OutputType*>(valueAddress));
626 return true;
627}
628
629template<typename HalOperation, typename HalModel>
630bool GetInputInt32(const HalOperation& operation,
631 uint32_t inputIndex,
632 int32_t& outValue,
633 const HalModel& model,
634 const ConversionData& data)
635{
636 return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
637}
638
639
640template<typename HalOperation, typename HalModel>
641bool GetInputFloat32(const HalOperation& operation,
642 uint32_t inputIndex,
643 float& outValue,
644 const HalModel& model,
645 const ConversionData& data)
646{
647 return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
648}
649
650
651template<typename HalOperation, typename HalModel>
652bool GetInputActivationFunctionImpl(const HalOperation& operation,
653 uint32_t inputIndex,
654 OperandType type,
655 ActivationFn& outActivationFunction,
656 const HalModel& model,
657 const ConversionData& data)
658{
659 if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
660 {
661 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
662 __func__,
663 toString(type).c_str(),
664 toString(OperandType::INT32).c_str(),
665 toString(OperandType::TENSOR_INT32).c_str());
666 }
667
668 int32_t activationFunctionAsInt;
669 if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
670 {
671 return Fail("%s: failed to get activation input value", __func__);
672 }
673 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
674 return true;
675}
676
677
678template<typename HalOperation, typename HalModel>
679bool GetInputActivationFunction(const HalOperation& operation,
680 uint32_t inputIndex,
681 ActivationFn& outActivationFunction,
682 const HalModel& model,
683 const ConversionData& data)
684{
685 return GetInputActivationFunctionImpl(operation,
686 inputIndex,
687 OperandType::INT32,
688 outActivationFunction,
689 model,
690 data);
691}
692
693template<typename HalOperation, typename HalModel>
694bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
695 uint32_t inputIndex,
696 ActivationFn& outActivationFunction,
697 const HalModel& model,
698 const ConversionData& data)
699{
700 // This only accepts a 1-D tensor of size 1
701 return GetInputActivationFunctionImpl(operation,
702 inputIndex,
703 OperandType::INT32,
704 outActivationFunction,
705 model,
706 data);
707}
708
709
710template<typename HalOperation, typename HalModel>
711bool GetOptionalInputActivation(const HalOperation& operation,
712 uint32_t inputIndex,
713 ActivationFn& activationFunction,
714 const HalModel& model,
715 const ConversionData& data)
716{
717 if (operation.inputs.size() <= inputIndex)
718 {
719 activationFunction = ActivationFn::kActivationNone;
720 }
721 else
722 {
723 if (!GetInputActivationFunction(operation, inputIndex, activationFunction, model, data))
724 {
725 return Fail("%s: Operation has invalid inputs", __func__);
726 }
727 }
728 return true;
729}
730
731template<typename HalModel>
732bool GetTensorInt32Values(const Operand& operand,
733 std::vector<int32_t>& outValues,
734 const HalModel& model,
735 const ConversionData& data)
736{
737 if (operand.type != OperandType::TENSOR_INT32)
738 {
739 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
740 }
741
742 const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
743 if (!startAddress)
744 {
745 return Fail("%s: failed to get operand address", __func__, operand.type);
746 }
747
748 // Check number of bytes is sensible
749 const uint32_t numBytes = operand.location.length;
750 if (numBytes % sizeof(int32_t) != 0)
751 {
752 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
753 __func__, numBytes, sizeof(int32_t));
754 }
755
756 outValues.resize(numBytes / sizeof(int32_t));
757 memcpy(outValues.data(), startAddress, numBytes);
758 return true;
759}
760
761template<typename HalOperation, typename HalModel>
762bool GetInputPaddingScheme(const HalOperation& operation,
763 uint32_t inputIndex,
764 PaddingScheme& outPaddingScheme,
765 const HalModel& model,
766 const ConversionData& data)
767{
768 int32_t paddingSchemeAsInt;
769 if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
770 {
771 return Fail("%s: failed to get padding scheme input value", __func__);
772 }
773
774 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
775 return true;
776}
777
778template<typename HalOperation, typename HalModel>
779LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
780 uint32_t inputIndex,
781 const HalModel& model,
782 ConversionData& data)
783{
784 const Operand* operand = GetInputOperand(operation, inputIndex, model);
785 if (!operand)
786 {
787 Fail("%s: failed to get input operand %i", __func__, inputIndex);
788 return LayerInputHandle();
789 }
790
791 if (!IsOperandTypeSupportedForTensors(operand->type))
792 {
793 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
794 return LayerInputHandle();
795 }
796
797 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
798
799 switch (operand->lifetime)
800 {
801 case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
802 case OperandLifeTime::MODEL_INPUT:
Matthew Benthamfecc7792018-10-25 12:44:10 +0100803 case OperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100804 {
805 // The tensor is either an operand internal to the model, or a model input.
806 // It can be associated with an ArmNN output slot for an existing layer.
807
808 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
809 const uint32_t operandIndex = operation.inputs[inputIndex];
810 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
811 break;
812 }
813 case OperandLifeTime::CONSTANT_COPY:
814 case OperandLifeTime::CONSTANT_REFERENCE:
815 {
816 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
817 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data);
818 if (tensorPin.IsValid())
819 {
820 if (!IsLayerSupported(__func__,
821 armnn::IsConstantSupported,
822 data.m_Compute,
823 tensorPin.GetConstTensor().GetInfo()))
824 {
825 return LayerInputHandle();
826 }
827
828 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
829 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
830 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
831
832 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
833 }
834 else
835 {
836 Fail("%s: invalid operand tensor", __func__);
837 return LayerInputHandle();
838 }
839 break;
840 }
841 default:
842 {
843 // Unsupported lifetime for an input tensor
844 Fail("%s: unsupported lifetime for input tensor: %s",
845 __func__, toString(operand->lifetime).c_str());
846 return LayerInputHandle();
847 }
848 }
849}
850
851template<typename HalOperation, typename HalModel>
852bool ConvertToActivation(const HalOperation& operation,
853 const char* operationName,
854 const armnn::ActivationDescriptor& activationDesc,
855 const HalModel& model,
856 ConversionData& data)
857{
858 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
859 if (!input.IsValid())
860 {
861 return Fail("%s: Input 0 is invalid", operationName);
862 }
863
864 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
865 if (!outputOperand)
866 {
867 return false;
868 }
869 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
870 if (!IsLayerSupported(__func__,
871 armnn::IsActivationSupported,
872 data.m_Compute,
873 input.GetTensorInfo(),
874 outInfo,
875 activationDesc))
876 {
877 return false;
878 }
879
880 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
881 BOOST_ASSERT(layer != nullptr);
882 input.Connect(layer->GetInputSlot(0));
883
884 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
885}
886
887template<typename HalOperation, typename HalModel>
888bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
889 uint32_t operationOutputIndex,
890 armnn::IConnectableLayer& layer,
891 uint32_t layerOutputIndex,
892 const HalModel& model,
893 ConversionData& data)
894{
895 const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
896 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
897 {
898 return false;
899 }
900
901 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
902
903 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
904 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
905
906 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
907
908 return true;
909}
910
911template<typename HalOperation, typename HalModel>
912bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
913 uint32_t outputIndex,
914 armnn::IConnectableLayer& layer,
915 const HalModel& model,
916 ConversionData& data)
917{
918 return SetupAndTrackLayerOutputSlot(operation, outputIndex, layer, outputIndex, model, data);
919}
920
921template<typename HalOperation, typename HalModel>
922bool ConvertPooling2d(const HalOperation& operation,
923 const char* operationName,
924 armnn::PoolingAlgorithm poolType,
925 const HalModel& model,
926 ConversionData& data)
927{
928 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
929 if (!input.IsValid())
930 {
931 return Fail("%s: Could not read input 0", operationName);
932 }
933
934 const Operand* output = GetOutputOperand(operation, 0, model);
935 if (!output)
936 {
937 return Fail("%s: Could not read output 0", __func__);
938 }
939
940 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
941 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
942
arovir01b0717b52018-09-05 17:03:25 +0100943 armnn::Pooling2dDescriptor desc;
944 desc.m_PoolType = poolType;
945 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100946 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100947
948 ActivationFn activation;
949
950 if (operation.inputs.size() == 7)
951 {
952 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
953 android::nn::PaddingScheme scheme;
954 if (!GetInputPaddingScheme(operation, 1, scheme, model, data)
955 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data)
956 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data)
957 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data)
958 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data)
959 || !GetInputActivationFunction(operation, 6, activation, model, data))
960 {
961 return Fail("%s: Operation has invalid inputs", operationName);
962 }
963
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100964 const unsigned int inputWidth = inputInfo.GetShape()[2];
965 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100966
967 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
968 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
969 }
970 else
971 {
972 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
973 if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data)
974 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data)
975 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data)
976 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data)
977 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data)
978 || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data)
979 || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data)
980 || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data)
981 || !GetInputActivationFunction(operation, 9, activation, model, data))
982 {
983 return Fail("%s: Operation has invalid inputs", operationName);
984 }
985 }
986
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +0100987 if (!IsLayerSupported(__func__,
988 armnn::IsPooling2dSupported,
989 data.m_Compute,
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100990 inputInfo,
991 outputInfo,
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +0100992 desc))
arovir01b0717b52018-09-05 17:03:25 +0100993 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +0100994 return false;
arovir01b0717b52018-09-05 17:03:25 +0100995 }
arovir01b0717b52018-09-05 17:03:25 +0100996
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100997 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
998 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +0100999 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001000 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001001 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001002
1003 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1004 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001005 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001006 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001007 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001008
1009 input.Connect(pooling2dLayer->GetInputSlot(0));
1010
1011 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001012}
1013
saoste01b8471482018-10-10 09:44:51 +01001014} // namespace armnn_driver