blob: c86ad93c2897fe6cb0ed0e1f21c1dc0a8a04d334 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9
10#include "armnn/src/armnnUtils/Permute.hpp"
11#include "Utils.hpp"
12
13#include <ActivationFunctor.h>
14#include <CpuExecutor.h>
15#include <OperationsUtils.h>
16
17#include <boost/assert.hpp>
18#include <boost/core/ignore_unused.hpp>
19#include <boost/test/tools/floating_point_comparison.hpp>
20
21#include <log/log.h>
22
23namespace armnn_driver
24{
25
26///
27/// Helper classes
28///
29
30struct ConversionData
31{
32 ConversionData(armnn::Compute compute)
33 : m_Compute(compute)
34 , m_Network(nullptr, nullptr)
35 {}
36
37 const armnn::Compute m_Compute;
38 armnn::INetworkPtr m_Network;
39 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
40 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
41};
42
43class LayerInputHandle
44{
45public:
46 LayerInputHandle();
47 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
48
49 bool IsValid() const;
50
51 void Connect(armnn::IInputSlot& inputSlot);
52
53 const armnn::TensorInfo& GetTensorInfo() const;
54
55private:
56 armnn::IOutputSlot* m_OutputSlot;
57 bool m_Valid;
58 armnn::TensorInfo m_TensorInfo;
59};
60
61class ConstTensorPin
62{
63public:
64 // Creates an invalid tensor pin (can be used to signal errors)
65 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
66 ConstTensorPin(bool optional = false);
67
68 // @param tensorInfo TensorInfo associated with the tensor.
69 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
70 // the model being converted.
71 // @param numBytes Number of bytes for the tensor data.
72 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
73 const armnn::PermutationVector& mappings);
74
75 ConstTensorPin(const ConstTensorPin& other) = delete;
76 ConstTensorPin(ConstTensorPin&& other) = default;
77
78 bool IsValid() const;
79 bool IsOptional() const;
80
81 const armnn::ConstTensor& GetConstTensor() const;
82 const armnn::ConstTensor* GetConstTensorPtr() const;
83
84private:
85 armnn::ConstTensor m_ConstTensor;
86
87 // Owned memory for swizzled tensor data, only required if the tensor needed
88 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
89 // the pools associated with the model being converted.
90 std::vector<uint8_t> m_SwizzledTensorData;
91
92 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
93 bool m_Optional;
94};
95
96} // namespace armnn_driver
97
98///
99/// Utility functions
100///
101
102namespace
103{
104
105using namespace armnn_driver;
106using namespace android::nn;
107
108// Convenience function to log the reason for failing to convert a model.
109// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
110template<class... Args>
111static bool Fail(const char* formatStr, Args&&... args)
112{
113 ALOGD(formatStr, std::forward<Args>(args)...);
114 return false;
115}
116
117// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
118// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
119template<typename IsLayerSupportedFunc, typename ... Args>
120bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
121{
122 std::vector<char> unsupportedReason(1024+1);
123 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
124 if(isSupported)
125 {
126 return true;
127 }
128 else
129 {
130 std::string sUnsupportedReason(unsupportedReason.data());
131 if (sUnsupportedReason.size() > 0)
132 {
133 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
134 } else
135 {
136 ALOGD("%s: not supported by armnn", funcName);
137 }
138 return false;
139 }
140}
141
142armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
143{
144 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
145}
146
147inline bool IsOperandTypeSupportedForTensors(OperandType type)
148{
149 return type == OperandType::TENSOR_FLOAT32 ||
150 type == OperandType::TENSOR_QUANT8_ASYMM ||
151 type == OperandType::TENSOR_INT32;
152}
153
154void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
155 armnn::INetwork& network)
156{
157 BOOST_ASSERT(startLayer != nullptr);
158 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
159 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
160
161 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
162 {
163 // If the number of dimensions do not match then we need to add degenerate dimensions
164 // to the "smaller" tensor using a reshape:
165 // Small Big
166 // | |
167 // Reshape |
168 // \ /
169 // Add
170 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
171
172 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
173 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
174
175 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
176 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
177
178 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
179 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
180 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
181 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
182 {
183 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
184 }
185 armnn::TensorInfo reshapedInfo = smallTensorDims;
186 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
187 reshapedDims.data() });
188
189 armnn::ReshapeDescriptor reshapeDesc;
190 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
191 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
192 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
193 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
194
195 // Connect the outputs from new reshape and original input layer
196 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
197 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
198 }
199 else
200 {
201 input0.Connect(startLayer->GetInputSlot(0));
202 input1.Connect(startLayer->GetInputSlot(1));
203 }
204}
205
206void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
207 android::nn::PaddingScheme scheme)
208{
209 int32_t padHead;
210 int32_t padTail;
211 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
212 outPadHead = boost::numeric_cast<uint32_t>(padHead);
213 outPadTail = boost::numeric_cast<uint32_t>(padTail);
214}
215
216Shape GetOperandShape(const Operand& operand)
217{
218 Shape shape;
219 shape.type = operand.type;
220 shape.dimensions = operand.dimensions;
221 shape.scale = operand.scale;
222 shape.offset = operand.zeroPoint;
223 return shape;
224}
225
226// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
227// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
228// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
229// (us, in this case) to ensure they match.
230void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
231 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
232{
233 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
234 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
235 {
236 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
237 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
238 {
239 ALOGW("Bias quantization scale has been modified to match input*weights");
240 biasInfo.SetQuantizationScale(expectedBiasScale);
241 }
242 }
243}
244
245// 4D Tensor Permutations
246const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
247const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
248const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
249const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
250
251// 3D Permutation Vectors
252const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
253const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
254const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
255
256template<typename OSlot>
257armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
258 const armnn::PermutationVector& mappings)
259{
260 // Add swizzle layer
261 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
262
263 BOOST_ASSERT(layer != nullptr);
264
265 // Connect input to swizzle layer
266 input.Connect(layer->GetInputSlot(0));
267
268 // Setup swizzled output
269 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
270 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
271
272 return *layer;
273}
274
275void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
276{
277 // Add swizzle layer
278 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
279 // Connect swizzled input to layer
280 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
281}
282
283armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
284{
285 // Add deswizzle layer
286 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
287 return deswizzleLayer;
288}
289
290// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
291armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
292 LayerInputHandle& input,
293 armnn::IConnectableLayer& firstLayer,
294 armnn::IConnectableLayer& lastLayer)
295{
296 SwizzleIn(network, input, firstLayer, 0);
297 return DeswizzleOut(network, lastLayer, 0);
298}
299
300// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
301armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
302 armnn::IConnectableLayer& layer)
303{
304 return SwizzleInDeswizzleOut(network, input, layer, layer);
305}
306
307bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
308 const armnn::TensorShape & outputShape,
309 uint32_t concatDim)
310{
311 // Validate the output shape is correct given the input shapes (which have just been validated)
312 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
313 if (outputShape.GetNumDimensions() != numDimensions)
314 {
315 return Fail("%s: Output shape has wrong number of dimensions", __func__);
316 }
317
318 unsigned int outputSizeAlongConcatenatedDimension = 0;
319 for (unsigned int i = 0; i < inputShapes.size(); i++)
320 {
321 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
322 }
323
324 for (unsigned int i = 0; i < numDimensions; ++i)
325 {
326 if (i == concatDim)
327 {
328 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
329 {
330 return Fail(
331 "%s: Invalid output shape for dimension %d (%d != %d)",
332 __func__,
333 i,
334 outputShape[i],
335 outputSizeAlongConcatenatedDimension);
336 }
337 }
338 else
339 {
340 if (outputShape[i] != inputShapes[0][i])
341 {
342 return Fail("%s: Invalid output shape", __func__);
343 }
344 }
345 }
346
347 return true;
348}
349
350bool RequiresReshape(armnn::TensorShape & inputShape)
351{
352 return inputShape.GetNumDimensions() < 3;
353}
354
355template<typename OSlot>
356armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
357 armnn::TensorInfo reshapeInfo)
358{
359 armnn::ReshapeDescriptor reshapeDescriptor;
360 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
361
362 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
363 BOOST_ASSERT(reshapeLayer != nullptr);
364
365 // Attach the input layer to the reshape layer
366 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
367 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
368
369 return *reshapeLayer;
370}
371
372void SwizzleInputs(armnn::INetwork& network,
373 std::vector<LayerInputHandle>& inputs,
374 std::vector<armnn::TensorShape>& inputShapes,
375 const armnn::PermutationVector& mapping)
376{
377 if (!mapping.IsEqual(IdentityPermutation4D))
378 {
379 size_t nInputs = inputs.size();
380 for (size_t i=0; i<nInputs; ++i)
381 {
382 // add swizzle layer
383 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
384 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
385 auto& outputInfo = outputSlot.GetTensorInfo();
386 // replace inputs with the swizzled ones
387 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
388 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
389 }
390 }
391}
392
narpra01f176d5a2018-11-18 20:17:48 +0000393bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
394 int32_t & concatDimension,
395 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100396{
narpra01f176d5a2018-11-18 20:17:48 +0000397 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100398 BOOST_ASSERT(numberOfDimensions >= 3);
399
400 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000401 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
402 // or along dimension 0 or 2 for a 3-D tensor.
403 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100404 {
narpra01f176d5a2018-11-18 20:17:48 +0000405 concatDimension = 1;
406 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
407 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100408 }
narpra01f176d5a2018-11-18 20:17:48 +0000409 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100410 {
narpra01f176d5a2018-11-18 20:17:48 +0000411 concatDimension = 0;
412 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
413 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100414 }
narpra01f176d5a2018-11-18 20:17:48 +0000415 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100416}
417
418} // anonymous namespace
419
420namespace armnn_driver
421{
422
423//// Creates an ArmNN activation layer and connects it to the given layer, if the
424//// passed in AndroidNN activation function requires so.
425//// @return The end layer of the sequence of layers built for the given AndroidNN
426//// activation function or nullptr if an error occurred (e.g. unsupported activation).
427//// Note that the end layer matches the input layer if no activation is required
428//// (the sequence of layers has length 1).
429armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
430 ActivationFn activation,
431 armnn::IConnectableLayer* prevLayer,
432 ConversionData& data);
433
434} // namespace armnn_driver
435
436///
437/// Utility templates
438///
439
440namespace armnn_driver
441{
442
443using namespace android::nn;
444
445template<typename HalOperation, typename HalModel>
saoste01b8471482018-10-10 09:44:51 +0100446const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex, const HalModel& model,
447 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100448{
449 if (inputIndex >= operation.inputs.size())
450 {
saoste01b8471482018-10-10 09:44:51 +0100451 if (failOnIndexOutOfBounds)
452 {
453 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
454 }
arovir01b0717b52018-09-05 17:03:25 +0100455 return nullptr;
456 }
457
458 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
459 return &model.operands[operation.inputs[inputIndex]];
460}
461
462template<typename HalOperation, typename HalModel>
463const Operand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex, const HalModel& model)
464{
465 if (outputIndex >= operation.outputs.size())
466 {
467 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
468 return nullptr;
469 }
470
471 // Model should have been validated beforehand
472 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
473
474 return &model.operands[operation.outputs[outputIndex]];
475}
476
477template<typename HalModel>
478ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
479 const HalModel& model,
480 const ConversionData& data,
481 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
482 const armnn::TensorShape* overrideTensorShape = nullptr,
483 bool optional = false)
484{
485 if (!IsOperandTypeSupportedForTensors(operand.type))
486 {
487 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
488 return ConstTensorPin();
489 }
490
491 if (operand.lifetime != OperandLifeTime::CONSTANT_COPY && operand.lifetime != OperandLifeTime::CONSTANT_REFERENCE)
492 {
493 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
494 return ConstTensorPin();
495 }
496
497 const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data);
498 if (!valueStart)
499 {
500 if (optional)
501 {
502 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
503 return ConstTensorPin(true);
504 }
505 // mandatory tensor with no values
506 Fail("%s: failed to get operand address", __func__);
507 return ConstTensorPin();
508 }
509
510 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
511 if (overrideTensorShape != nullptr)
512 {
513 tensorInfo.SetShape(*overrideTensorShape);
514 }
515 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
516}
517
518template<typename HalOperation, typename HalModel>
519ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
520 uint32_t inputIndex,
521 const HalModel& model,
522 const ConversionData& data,
523 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
524 const armnn::TensorShape* overrideTensorShape = nullptr,
525 bool optional = false)
526{
527 const Operand* operand = GetInputOperand(operation, inputIndex, model);
528 if (!operand)
529 {
530 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
531 return ConstTensorPin();
532 }
533 return ConvertOperandToConstTensorPin(*operand,
534 model,
535 data,
536 dimensionMappings,
537 overrideTensorShape,
538 optional);
539}
540
541template<typename HalModel>
542const void* GetOperandValueReadOnlyAddress(const Operand& operand, const HalModel& model, const ConversionData& data)
543{
544 const void* valueStart = nullptr;
545
546 switch (operand.lifetime)
547 {
548 case OperandLifeTime::CONSTANT_COPY:
549 {
550 // Constant found in model.operandValues
551 valueStart = &model.operandValues[operand.location.offset];
552 break;
553 }
554 case OperandLifeTime::CONSTANT_REFERENCE:
555 {
556 // Constant specified via a Memory object
557 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
558 break;
559 }
560 default:
561 {
562 // Unsupported/invalid (e.g. can't get value of an input to the model)
563 Fail("%s: unsupported/invalid operand lifetime: %s",
564 __func__, toString(operand.lifetime).c_str());
565 valueStart = nullptr;
566 }
567 }
568
569 return valueStart;
570}
571
572template<typename HalOperation, typename HalModel, typename OutputType>
573bool GetInputScalar(const HalOperation& operation,
574 uint32_t inputIndex,
575 OperandType type,
576 OutputType& outValue,
577 const HalModel& model,
578 const ConversionData& data)
579{
580 const Operand* operand = GetInputOperand(operation, inputIndex, model);
581 if (!operand)
582 {
583 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
584 }
585
586 if (operand->type != type)
587 {
588 return Fail("%s: unexpected operand type: %s (should be %s)",
589 __func__, toString(operand->type).c_str(), toString(type).c_str());
590 }
591
592 if (operand->location.length != sizeof(OutputType))
593 {
594 return Fail("%s: incorrect operand location length: %i (should be %i)",
595 __func__, operand->location.length, sizeof(OutputType));
596 }
597
598 const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
599 if (!valueAddress)
600 {
601 return Fail("%s: failed to get address for operand", __func__);
602 }
603
604 outValue = *(static_cast<const OutputType*>(valueAddress));
605 return true;
606}
607
608template<typename HalOperation, typename HalModel>
609bool GetInputInt32(const HalOperation& operation,
610 uint32_t inputIndex,
611 int32_t& outValue,
612 const HalModel& model,
613 const ConversionData& data)
614{
615 return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
616}
617
618
619template<typename HalOperation, typename HalModel>
620bool GetInputFloat32(const HalOperation& operation,
621 uint32_t inputIndex,
622 float& outValue,
623 const HalModel& model,
624 const ConversionData& data)
625{
626 return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
627}
628
629
630template<typename HalOperation, typename HalModel>
631bool GetInputActivationFunctionImpl(const HalOperation& operation,
632 uint32_t inputIndex,
633 OperandType type,
634 ActivationFn& outActivationFunction,
635 const HalModel& model,
636 const ConversionData& data)
637{
638 if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
639 {
640 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
641 __func__,
642 toString(type).c_str(),
643 toString(OperandType::INT32).c_str(),
644 toString(OperandType::TENSOR_INT32).c_str());
645 }
646
647 int32_t activationFunctionAsInt;
648 if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
649 {
650 return Fail("%s: failed to get activation input value", __func__);
651 }
652 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
653 return true;
654}
655
656
657template<typename HalOperation, typename HalModel>
658bool GetInputActivationFunction(const HalOperation& operation,
659 uint32_t inputIndex,
660 ActivationFn& outActivationFunction,
661 const HalModel& model,
662 const ConversionData& data)
663{
664 return GetInputActivationFunctionImpl(operation,
665 inputIndex,
666 OperandType::INT32,
667 outActivationFunction,
668 model,
669 data);
670}
671
672template<typename HalOperation, typename HalModel>
673bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
674 uint32_t inputIndex,
675 ActivationFn& outActivationFunction,
676 const HalModel& model,
677 const ConversionData& data)
678{
679 // This only accepts a 1-D tensor of size 1
680 return GetInputActivationFunctionImpl(operation,
681 inputIndex,
682 OperandType::INT32,
683 outActivationFunction,
684 model,
685 data);
686}
687
688
689template<typename HalOperation, typename HalModel>
690bool GetOptionalInputActivation(const HalOperation& operation,
691 uint32_t inputIndex,
692 ActivationFn& activationFunction,
693 const HalModel& model,
694 const ConversionData& data)
695{
696 if (operation.inputs.size() <= inputIndex)
697 {
698 activationFunction = ActivationFn::kActivationNone;
699 }
700 else
701 {
702 if (!GetInputActivationFunction(operation, inputIndex, activationFunction, model, data))
703 {
704 return Fail("%s: Operation has invalid inputs", __func__);
705 }
706 }
707 return true;
708}
709
710template<typename HalModel>
711bool GetTensorInt32Values(const Operand& operand,
712 std::vector<int32_t>& outValues,
713 const HalModel& model,
714 const ConversionData& data)
715{
716 if (operand.type != OperandType::TENSOR_INT32)
717 {
718 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
719 }
720
721 const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
722 if (!startAddress)
723 {
724 return Fail("%s: failed to get operand address", __func__, operand.type);
725 }
726
727 // Check number of bytes is sensible
728 const uint32_t numBytes = operand.location.length;
729 if (numBytes % sizeof(int32_t) != 0)
730 {
731 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
732 __func__, numBytes, sizeof(int32_t));
733 }
734
735 outValues.resize(numBytes / sizeof(int32_t));
736 memcpy(outValues.data(), startAddress, numBytes);
737 return true;
738}
739
740template<typename HalOperation, typename HalModel>
741bool GetInputPaddingScheme(const HalOperation& operation,
742 uint32_t inputIndex,
743 PaddingScheme& outPaddingScheme,
744 const HalModel& model,
745 const ConversionData& data)
746{
747 int32_t paddingSchemeAsInt;
748 if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
749 {
750 return Fail("%s: failed to get padding scheme input value", __func__);
751 }
752
753 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
754 return true;
755}
756
757template<typename HalOperation, typename HalModel>
758LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
759 uint32_t inputIndex,
760 const HalModel& model,
761 ConversionData& data)
762{
763 const Operand* operand = GetInputOperand(operation, inputIndex, model);
764 if (!operand)
765 {
766 Fail("%s: failed to get input operand %i", __func__, inputIndex);
767 return LayerInputHandle();
768 }
769
770 if (!IsOperandTypeSupportedForTensors(operand->type))
771 {
772 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
773 return LayerInputHandle();
774 }
775
776 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
777
778 switch (operand->lifetime)
779 {
780 case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
781 case OperandLifeTime::MODEL_INPUT:
Matthew Benthamfecc7792018-10-25 12:44:10 +0100782 case OperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100783 {
784 // The tensor is either an operand internal to the model, or a model input.
785 // It can be associated with an ArmNN output slot for an existing layer.
786
787 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
788 const uint32_t operandIndex = operation.inputs[inputIndex];
789 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
790 break;
791 }
792 case OperandLifeTime::CONSTANT_COPY:
793 case OperandLifeTime::CONSTANT_REFERENCE:
794 {
795 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
796 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data);
797 if (tensorPin.IsValid())
798 {
799 if (!IsLayerSupported(__func__,
800 armnn::IsConstantSupported,
801 data.m_Compute,
802 tensorPin.GetConstTensor().GetInfo()))
803 {
804 return LayerInputHandle();
805 }
806
807 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
808 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
809 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
810
811 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
812 }
813 else
814 {
815 Fail("%s: invalid operand tensor", __func__);
816 return LayerInputHandle();
817 }
818 break;
819 }
820 default:
821 {
822 // Unsupported lifetime for an input tensor
823 Fail("%s: unsupported lifetime for input tensor: %s",
824 __func__, toString(operand->lifetime).c_str());
825 return LayerInputHandle();
826 }
827 }
828}
829
830template<typename HalOperation, typename HalModel>
831bool ConvertToActivation(const HalOperation& operation,
832 const char* operationName,
833 const armnn::ActivationDescriptor& activationDesc,
834 const HalModel& model,
835 ConversionData& data)
836{
837 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
838 if (!input.IsValid())
839 {
840 return Fail("%s: Input 0 is invalid", operationName);
841 }
842
843 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
844 if (!outputOperand)
845 {
846 return false;
847 }
848 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
849 if (!IsLayerSupported(__func__,
850 armnn::IsActivationSupported,
851 data.m_Compute,
852 input.GetTensorInfo(),
853 outInfo,
854 activationDesc))
855 {
856 return false;
857 }
858
859 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
860 BOOST_ASSERT(layer != nullptr);
861 input.Connect(layer->GetInputSlot(0));
862
863 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
864}
865
866template<typename HalOperation, typename HalModel>
867bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
868 uint32_t operationOutputIndex,
869 armnn::IConnectableLayer& layer,
870 uint32_t layerOutputIndex,
871 const HalModel& model,
872 ConversionData& data)
873{
874 const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
875 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
876 {
877 return false;
878 }
879
880 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
881
882 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
883 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
884
885 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
886
887 return true;
888}
889
890template<typename HalOperation, typename HalModel>
891bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
892 uint32_t outputIndex,
893 armnn::IConnectableLayer& layer,
894 const HalModel& model,
895 ConversionData& data)
896{
897 return SetupAndTrackLayerOutputSlot(operation, outputIndex, layer, outputIndex, model, data);
898}
899
900template<typename HalOperation, typename HalModel>
901bool ConvertPooling2d(const HalOperation& operation,
902 const char* operationName,
903 armnn::PoolingAlgorithm poolType,
904 const HalModel& model,
905 ConversionData& data)
906{
907 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
908 if (!input.IsValid())
909 {
910 return Fail("%s: Could not read input 0", operationName);
911 }
912
913 const Operand* output = GetOutputOperand(operation, 0, model);
914 if (!output)
915 {
916 return Fail("%s: Could not read output 0", __func__);
917 }
918
919 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
920 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
921
arovir01b0717b52018-09-05 17:03:25 +0100922 armnn::Pooling2dDescriptor desc;
923 desc.m_PoolType = poolType;
924 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100925 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100926
927 ActivationFn activation;
928
929 if (operation.inputs.size() == 7)
930 {
931 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
932 android::nn::PaddingScheme scheme;
933 if (!GetInputPaddingScheme(operation, 1, scheme, model, data)
934 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data)
935 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data)
936 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data)
937 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data)
938 || !GetInputActivationFunction(operation, 6, activation, model, data))
939 {
940 return Fail("%s: Operation has invalid inputs", operationName);
941 }
942
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100943 const unsigned int inputWidth = inputInfo.GetShape()[2];
944 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100945
946 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
947 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
948 }
949 else
950 {
951 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
952 if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data)
953 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data)
954 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data)
955 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data)
956 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data)
957 || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data)
958 || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data)
959 || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data)
960 || !GetInputActivationFunction(operation, 9, activation, model, data))
961 {
962 return Fail("%s: Operation has invalid inputs", operationName);
963 }
964 }
965
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +0100966 if (!IsLayerSupported(__func__,
967 armnn::IsPooling2dSupported,
968 data.m_Compute,
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100969 inputInfo,
970 outputInfo,
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +0100971 desc))
arovir01b0717b52018-09-05 17:03:25 +0100972 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +0100973 return false;
arovir01b0717b52018-09-05 17:03:25 +0100974 }
arovir01b0717b52018-09-05 17:03:25 +0100975
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100976 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
977 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +0100978 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100979 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100980 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100981
982 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
983 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100984 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100985 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100986 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100987
988 input.Connect(pooling2dLayer->GetInputSlot(0));
989
990 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100991}
992
saoste01b8471482018-10-10 09:44:51 +0100993} // namespace armnn_driver