blob: ca1f0aeabd7dd15b979fee7804116dffaaebd290 [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9
10#include "armnn/src/armnnUtils/Permute.hpp"
11#include "Utils.hpp"
12
13#include <ActivationFunctor.h>
14#include <CpuExecutor.h>
15#include <OperationsUtils.h>
16
17#include <boost/assert.hpp>
18#include <boost/core/ignore_unused.hpp>
19#include <boost/test/tools/floating_point_comparison.hpp>
20
21#include <log/log.h>
22
23namespace armnn_driver
24{
25
26///
27/// Helper classes
28///
29
30struct ConversionData
31{
32 ConversionData(armnn::Compute compute)
33 : m_Compute(compute)
34 , m_Network(nullptr, nullptr)
35 {}
36
37 const armnn::Compute m_Compute;
38 armnn::INetworkPtr m_Network;
39 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
40 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
41};
42
43class LayerInputHandle
44{
45public:
46 LayerInputHandle();
47 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
48
49 bool IsValid() const;
50
51 void Connect(armnn::IInputSlot& inputSlot);
52
53 const armnn::TensorInfo& GetTensorInfo() const;
54
55private:
56 armnn::IOutputSlot* m_OutputSlot;
57 bool m_Valid;
58 armnn::TensorInfo m_TensorInfo;
59};
60
61class ConstTensorPin
62{
63public:
64 // Creates an invalid tensor pin (can be used to signal errors)
65 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
66 ConstTensorPin(bool optional = false);
67
68 // @param tensorInfo TensorInfo associated with the tensor.
69 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
70 // the model being converted.
71 // @param numBytes Number of bytes for the tensor data.
72 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
73 const armnn::PermutationVector& mappings);
74
75 ConstTensorPin(const ConstTensorPin& other) = delete;
76 ConstTensorPin(ConstTensorPin&& other) = default;
77
78 bool IsValid() const;
79 bool IsOptional() const;
80
81 const armnn::ConstTensor& GetConstTensor() const;
82 const armnn::ConstTensor* GetConstTensorPtr() const;
83
84private:
85 armnn::ConstTensor m_ConstTensor;
86
87 // Owned memory for swizzled tensor data, only required if the tensor needed
88 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
89 // the pools associated with the model being converted.
90 std::vector<uint8_t> m_SwizzledTensorData;
91
92 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
93 bool m_Optional;
94};
95
96} // namespace armnn_driver
97
98///
99/// Utility functions
100///
101
102namespace
103{
104
105using namespace armnn_driver;
106using namespace android::nn;
107
108// Convenience function to log the reason for failing to convert a model.
109// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
110template<class... Args>
111static bool Fail(const char* formatStr, Args&&... args)
112{
113 ALOGD(formatStr, std::forward<Args>(args)...);
114 return false;
115}
116
117// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
118// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
119template<typename IsLayerSupportedFunc, typename ... Args>
120bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
121{
122 std::vector<char> unsupportedReason(1024+1);
123 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
124 if(isSupported)
125 {
126 return true;
127 }
128 else
129 {
130 std::string sUnsupportedReason(unsupportedReason.data());
131 if (sUnsupportedReason.size() > 0)
132 {
133 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
134 } else
135 {
136 ALOGD("%s: not supported by armnn", funcName);
137 }
138 return false;
139 }
140}
141
142armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
143{
144 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
145}
146
147inline bool IsOperandTypeSupportedForTensors(OperandType type)
148{
149 return type == OperandType::TENSOR_FLOAT32 ||
150 type == OperandType::TENSOR_QUANT8_ASYMM ||
151 type == OperandType::TENSOR_INT32;
152}
153
154void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
155 armnn::INetwork& network)
156{
157 BOOST_ASSERT(startLayer != nullptr);
158 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
159 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
160
161 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
162 {
163 // If the number of dimensions do not match then we need to add degenerate dimensions
164 // to the "smaller" tensor using a reshape:
165 // Small Big
166 // | |
167 // Reshape |
168 // \ /
169 // Add
170 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
171
172 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
173 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
174
175 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
176 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
177
178 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
179 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
180 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
181 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
182 {
183 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
184 }
185 armnn::TensorInfo reshapedInfo = smallTensorDims;
186 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
187 reshapedDims.data() });
188
189 armnn::ReshapeDescriptor reshapeDesc;
190 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
191 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
192 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
193 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
194
195 // Connect the outputs from new reshape and original input layer
196 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
197 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
198 }
199 else
200 {
201 input0.Connect(startLayer->GetInputSlot(0));
202 input1.Connect(startLayer->GetInputSlot(1));
203 }
204}
205
206void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
207 android::nn::PaddingScheme scheme)
208{
209 int32_t padHead;
210 int32_t padTail;
211 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
212 outPadHead = boost::numeric_cast<uint32_t>(padHead);
213 outPadTail = boost::numeric_cast<uint32_t>(padTail);
214}
215
216Shape GetOperandShape(const Operand& operand)
217{
218 Shape shape;
219 shape.type = operand.type;
220 shape.dimensions = operand.dimensions;
221 shape.scale = operand.scale;
222 shape.offset = operand.zeroPoint;
223 return shape;
224}
225
226// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
227// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
228// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
229// (us, in this case) to ensure they match.
230void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
231 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
232{
233 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
234 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
235 {
236 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
237 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
238 {
239 ALOGW("Bias quantization scale has been modified to match input*weights");
240 biasInfo.SetQuantizationScale(expectedBiasScale);
241 }
242 }
243}
244
245// 4D Tensor Permutations
246const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
247const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
248const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
249const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
250
251// 3D Permutation Vectors
252const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
253const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
254const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
255
256template<typename OSlot>
257armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
258 const armnn::PermutationVector& mappings)
259{
260 // Add swizzle layer
261 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
262
263 BOOST_ASSERT(layer != nullptr);
264
265 // Connect input to swizzle layer
266 input.Connect(layer->GetInputSlot(0));
267
268 // Setup swizzled output
269 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
270 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
271
272 return *layer;
273}
274
275void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
276{
277 // Add swizzle layer
278 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
279 // Connect swizzled input to layer
280 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
281}
282
283armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
284{
285 // Add deswizzle layer
286 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
287 return deswizzleLayer;
288}
289
290// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
291armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
292 LayerInputHandle& input,
293 armnn::IConnectableLayer& firstLayer,
294 armnn::IConnectableLayer& lastLayer)
295{
296 SwizzleIn(network, input, firstLayer, 0);
297 return DeswizzleOut(network, lastLayer, 0);
298}
299
300// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
301armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
302 armnn::IConnectableLayer& layer)
303{
304 return SwizzleInDeswizzleOut(network, input, layer, layer);
305}
306
307bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
308 const armnn::TensorShape & outputShape,
309 uint32_t concatDim)
310{
311 // Validate the output shape is correct given the input shapes (which have just been validated)
312 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
313 if (outputShape.GetNumDimensions() != numDimensions)
314 {
315 return Fail("%s: Output shape has wrong number of dimensions", __func__);
316 }
317
318 unsigned int outputSizeAlongConcatenatedDimension = 0;
319 for (unsigned int i = 0; i < inputShapes.size(); i++)
320 {
321 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
322 }
323
324 for (unsigned int i = 0; i < numDimensions; ++i)
325 {
326 if (i == concatDim)
327 {
328 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
329 {
330 return Fail(
331 "%s: Invalid output shape for dimension %d (%d != %d)",
332 __func__,
333 i,
334 outputShape[i],
335 outputSizeAlongConcatenatedDimension);
336 }
337 }
338 else
339 {
340 if (outputShape[i] != inputShapes[0][i])
341 {
342 return Fail("%s: Invalid output shape", __func__);
343 }
344 }
345 }
346
347 return true;
348}
349
350bool RequiresReshape(armnn::TensorShape & inputShape)
351{
352 return inputShape.GetNumDimensions() < 3;
353}
354
355template<typename OSlot>
356armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
357 armnn::TensorInfo reshapeInfo)
358{
359 armnn::ReshapeDescriptor reshapeDescriptor;
360 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
361
362 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
363 BOOST_ASSERT(reshapeLayer != nullptr);
364
365 // Attach the input layer to the reshape layer
366 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
367 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
368
369 return *reshapeLayer;
370}
371
372void SwizzleInputs(armnn::INetwork& network,
373 std::vector<LayerInputHandle>& inputs,
374 std::vector<armnn::TensorShape>& inputShapes,
375 const armnn::PermutationVector& mapping)
376{
377 if (!mapping.IsEqual(IdentityPermutation4D))
378 {
379 size_t nInputs = inputs.size();
380 for (size_t i=0; i<nInputs; ++i)
381 {
382 // add swizzle layer
383 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
384 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
385 auto& outputInfo = outputSlot.GetTensorInfo();
386 // replace inputs with the swizzled ones
387 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
388 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
389 }
390 }
391}
392
narpra01f176d5a2018-11-18 20:17:48 +0000393bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
394 int32_t & concatDimension,
395 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100396{
narpra01f176d5a2018-11-18 20:17:48 +0000397 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100398 BOOST_ASSERT(numberOfDimensions >= 3);
399
400 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000401 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
402 // or along dimension 0 or 2 for a 3-D tensor.
403 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100404 {
narpra01f176d5a2018-11-18 20:17:48 +0000405 concatDimension = 1;
406 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
407 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100408 }
narpra01f176d5a2018-11-18 20:17:48 +0000409 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100410 {
narpra01f176d5a2018-11-18 20:17:48 +0000411 concatDimension = 0;
412 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
413 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100414 }
narpra01f176d5a2018-11-18 20:17:48 +0000415 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100416}
417
418} // anonymous namespace
419
420namespace armnn_driver
421{
422
423//// Creates an ArmNN activation layer and connects it to the given layer, if the
424//// passed in AndroidNN activation function requires so.
425//// @return The end layer of the sequence of layers built for the given AndroidNN
426//// activation function or nullptr if an error occurred (e.g. unsupported activation).
427//// Note that the end layer matches the input layer if no activation is required
428//// (the sequence of layers has length 1).
429armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
430 ActivationFn activation,
431 armnn::IConnectableLayer* prevLayer,
432 ConversionData& data);
433
434} // namespace armnn_driver
435
436///
437/// Utility templates
438///
439
440namespace armnn_driver
441{
442
443using namespace android::nn;
444
445template<typename HalOperation, typename HalModel>
saoste01b8471482018-10-10 09:44:51 +0100446const Operand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex, const HalModel& model,
447 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100448{
449 if (inputIndex >= operation.inputs.size())
450 {
saoste01b8471482018-10-10 09:44:51 +0100451 if (failOnIndexOutOfBounds)
452 {
453 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
454 }
arovir01b0717b52018-09-05 17:03:25 +0100455 return nullptr;
456 }
457
458 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
459 return &model.operands[operation.inputs[inputIndex]];
460}
461
462template<typename HalOperation, typename HalModel>
463const Operand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex, const HalModel& model)
464{
465 if (outputIndex >= operation.outputs.size())
466 {
467 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
468 return nullptr;
469 }
470
471 // Model should have been validated beforehand
472 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
473
474 return &model.operands[operation.outputs[outputIndex]];
475}
476
477template<typename HalModel>
478ConstTensorPin ConvertOperandToConstTensorPin(const Operand& operand,
479 const HalModel& model,
480 const ConversionData& data,
481 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
482 const armnn::TensorShape* overrideTensorShape = nullptr,
483 bool optional = false)
484{
485 if (!IsOperandTypeSupportedForTensors(operand.type))
486 {
487 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
488 return ConstTensorPin();
489 }
490
Kevin Mayf29a2c52019-03-14 11:56:32 +0000491 if (!optional &&
492 operand.lifetime != OperandLifeTime::CONSTANT_COPY &&
493 operand.lifetime != OperandLifeTime::CONSTANT_REFERENCE &&
494 operand.lifetime != OperandLifeTime::NO_VALUE)
arovir01b0717b52018-09-05 17:03:25 +0100495 {
496 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
497 return ConstTensorPin();
498 }
499
Kevin Mayf29a2c52019-03-14 11:56:32 +0000500 const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data, optional);
arovir01b0717b52018-09-05 17:03:25 +0100501 if (!valueStart)
502 {
503 if (optional)
504 {
505 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
506 return ConstTensorPin(true);
507 }
508 // mandatory tensor with no values
509 Fail("%s: failed to get operand address", __func__);
510 return ConstTensorPin();
511 }
512
513 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
514 if (overrideTensorShape != nullptr)
515 {
516 tensorInfo.SetShape(*overrideTensorShape);
517 }
518 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
519}
520
521template<typename HalOperation, typename HalModel>
522ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
523 uint32_t inputIndex,
524 const HalModel& model,
525 const ConversionData& data,
526 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
527 const armnn::TensorShape* overrideTensorShape = nullptr,
528 bool optional = false)
529{
530 const Operand* operand = GetInputOperand(operation, inputIndex, model);
531 if (!operand)
532 {
533 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
534 return ConstTensorPin();
535 }
536 return ConvertOperandToConstTensorPin(*operand,
537 model,
538 data,
539 dimensionMappings,
540 overrideTensorShape,
541 optional);
542}
543
544template<typename HalModel>
Kevin Mayf29a2c52019-03-14 11:56:32 +0000545const void* GetOperandValueReadOnlyAddress(const Operand& operand, const HalModel& model, const ConversionData& data,
546 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100547{
548 const void* valueStart = nullptr;
549
550 switch (operand.lifetime)
551 {
552 case OperandLifeTime::CONSTANT_COPY:
553 {
554 // Constant found in model.operandValues
555 valueStart = &model.operandValues[operand.location.offset];
556 break;
557 }
558 case OperandLifeTime::CONSTANT_REFERENCE:
559 {
560 // Constant specified via a Memory object
561 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
562 break;
563 }
Kevin Mayf29a2c52019-03-14 11:56:32 +0000564 case OperandLifeTime::NO_VALUE:
565 {
566 // An optional input tensor with no values is not an error so should not register as a fail
567 if (optional)
568 {
569 valueStart = nullptr;
570 break;
571 }
572 }
arovir01b0717b52018-09-05 17:03:25 +0100573 default:
574 {
575 // Unsupported/invalid (e.g. can't get value of an input to the model)
576 Fail("%s: unsupported/invalid operand lifetime: %s",
577 __func__, toString(operand.lifetime).c_str());
578 valueStart = nullptr;
579 }
580 }
581
582 return valueStart;
583}
584
585template<typename HalOperation, typename HalModel, typename OutputType>
586bool GetInputScalar(const HalOperation& operation,
587 uint32_t inputIndex,
588 OperandType type,
589 OutputType& outValue,
590 const HalModel& model,
591 const ConversionData& data)
592{
593 const Operand* operand = GetInputOperand(operation, inputIndex, model);
594 if (!operand)
595 {
596 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
597 }
598
599 if (operand->type != type)
600 {
601 return Fail("%s: unexpected operand type: %s (should be %s)",
602 __func__, toString(operand->type).c_str(), toString(type).c_str());
603 }
604
605 if (operand->location.length != sizeof(OutputType))
606 {
607 return Fail("%s: incorrect operand location length: %i (should be %i)",
608 __func__, operand->location.length, sizeof(OutputType));
609 }
610
611 const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
612 if (!valueAddress)
613 {
614 return Fail("%s: failed to get address for operand", __func__);
615 }
616
617 outValue = *(static_cast<const OutputType*>(valueAddress));
618 return true;
619}
620
621template<typename HalOperation, typename HalModel>
622bool GetInputInt32(const HalOperation& operation,
623 uint32_t inputIndex,
624 int32_t& outValue,
625 const HalModel& model,
626 const ConversionData& data)
627{
628 return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue, model, data);
629}
630
631
632template<typename HalOperation, typename HalModel>
633bool GetInputFloat32(const HalOperation& operation,
634 uint32_t inputIndex,
635 float& outValue,
636 const HalModel& model,
637 const ConversionData& data)
638{
639 return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue, model, data);
640}
641
642
643template<typename HalOperation, typename HalModel>
644bool GetInputActivationFunctionImpl(const HalOperation& operation,
645 uint32_t inputIndex,
646 OperandType type,
647 ActivationFn& outActivationFunction,
648 const HalModel& model,
649 const ConversionData& data)
650{
651 if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
652 {
653 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
654 __func__,
655 toString(type).c_str(),
656 toString(OperandType::INT32).c_str(),
657 toString(OperandType::TENSOR_INT32).c_str());
658 }
659
660 int32_t activationFunctionAsInt;
661 if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt, model, data))
662 {
663 return Fail("%s: failed to get activation input value", __func__);
664 }
665 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
666 return true;
667}
668
669
670template<typename HalOperation, typename HalModel>
671bool GetInputActivationFunction(const HalOperation& operation,
672 uint32_t inputIndex,
673 ActivationFn& outActivationFunction,
674 const HalModel& model,
675 const ConversionData& data)
676{
677 return GetInputActivationFunctionImpl(operation,
678 inputIndex,
679 OperandType::INT32,
680 outActivationFunction,
681 model,
682 data);
683}
684
685template<typename HalOperation, typename HalModel>
686bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
687 uint32_t inputIndex,
688 ActivationFn& outActivationFunction,
689 const HalModel& model,
690 const ConversionData& data)
691{
692 // This only accepts a 1-D tensor of size 1
693 return GetInputActivationFunctionImpl(operation,
694 inputIndex,
695 OperandType::INT32,
696 outActivationFunction,
697 model,
698 data);
699}
700
701
702template<typename HalOperation, typename HalModel>
703bool GetOptionalInputActivation(const HalOperation& operation,
704 uint32_t inputIndex,
705 ActivationFn& activationFunction,
706 const HalModel& model,
707 const ConversionData& data)
708{
709 if (operation.inputs.size() <= inputIndex)
710 {
711 activationFunction = ActivationFn::kActivationNone;
712 }
713 else
714 {
715 if (!GetInputActivationFunction(operation, inputIndex, activationFunction, model, data))
716 {
717 return Fail("%s: Operation has invalid inputs", __func__);
718 }
719 }
720 return true;
721}
722
723template<typename HalModel>
724bool GetTensorInt32Values(const Operand& operand,
725 std::vector<int32_t>& outValues,
726 const HalModel& model,
727 const ConversionData& data)
728{
729 if (operand.type != OperandType::TENSOR_INT32)
730 {
731 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
732 }
733
734 const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
735 if (!startAddress)
736 {
737 return Fail("%s: failed to get operand address", __func__, operand.type);
738 }
739
740 // Check number of bytes is sensible
741 const uint32_t numBytes = operand.location.length;
742 if (numBytes % sizeof(int32_t) != 0)
743 {
744 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
745 __func__, numBytes, sizeof(int32_t));
746 }
747
748 outValues.resize(numBytes / sizeof(int32_t));
749 memcpy(outValues.data(), startAddress, numBytes);
750 return true;
751}
752
753template<typename HalOperation, typename HalModel>
754bool GetInputPaddingScheme(const HalOperation& operation,
755 uint32_t inputIndex,
756 PaddingScheme& outPaddingScheme,
757 const HalModel& model,
758 const ConversionData& data)
759{
760 int32_t paddingSchemeAsInt;
761 if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt, model, data))
762 {
763 return Fail("%s: failed to get padding scheme input value", __func__);
764 }
765
766 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
767 return true;
768}
769
770template<typename HalOperation, typename HalModel>
771LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
772 uint32_t inputIndex,
773 const HalModel& model,
774 ConversionData& data)
775{
776 const Operand* operand = GetInputOperand(operation, inputIndex, model);
777 if (!operand)
778 {
779 Fail("%s: failed to get input operand %i", __func__, inputIndex);
780 return LayerInputHandle();
781 }
782
783 if (!IsOperandTypeSupportedForTensors(operand->type))
784 {
785 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
786 return LayerInputHandle();
787 }
788
789 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
790
791 switch (operand->lifetime)
792 {
793 case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
794 case OperandLifeTime::MODEL_INPUT:
Matthew Benthamfecc7792018-10-25 12:44:10 +0100795 case OperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100796 {
797 // The tensor is either an operand internal to the model, or a model input.
798 // It can be associated with an ArmNN output slot for an existing layer.
799
800 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
801 const uint32_t operandIndex = operation.inputs[inputIndex];
802 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
803 break;
804 }
805 case OperandLifeTime::CONSTANT_COPY:
806 case OperandLifeTime::CONSTANT_REFERENCE:
807 {
808 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
809 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data);
810 if (tensorPin.IsValid())
811 {
812 if (!IsLayerSupported(__func__,
813 armnn::IsConstantSupported,
814 data.m_Compute,
815 tensorPin.GetConstTensor().GetInfo()))
816 {
817 return LayerInputHandle();
818 }
819
820 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
821 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
822 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
823
824 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
825 }
826 else
827 {
828 Fail("%s: invalid operand tensor", __func__);
829 return LayerInputHandle();
830 }
831 break;
832 }
833 default:
834 {
835 // Unsupported lifetime for an input tensor
836 Fail("%s: unsupported lifetime for input tensor: %s",
837 __func__, toString(operand->lifetime).c_str());
838 return LayerInputHandle();
839 }
840 }
841}
842
843template<typename HalOperation, typename HalModel>
844bool ConvertToActivation(const HalOperation& operation,
845 const char* operationName,
846 const armnn::ActivationDescriptor& activationDesc,
847 const HalModel& model,
848 ConversionData& data)
849{
850 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
851 if (!input.IsValid())
852 {
853 return Fail("%s: Input 0 is invalid", operationName);
854 }
855
856 const Operand* outputOperand = GetOutputOperand(operation, 0, model);
857 if (!outputOperand)
858 {
859 return false;
860 }
861 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
862 if (!IsLayerSupported(__func__,
863 armnn::IsActivationSupported,
864 data.m_Compute,
865 input.GetTensorInfo(),
866 outInfo,
867 activationDesc))
868 {
869 return false;
870 }
871
872 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
873 BOOST_ASSERT(layer != nullptr);
874 input.Connect(layer->GetInputSlot(0));
875
876 return SetupAndTrackLayerOutputSlot(operation, 0, *layer, model, data);
877}
878
879template<typename HalOperation, typename HalModel>
880bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
881 uint32_t operationOutputIndex,
882 armnn::IConnectableLayer& layer,
883 uint32_t layerOutputIndex,
884 const HalModel& model,
885 ConversionData& data)
886{
887 const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex, model);
888 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
889 {
890 return false;
891 }
892
893 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
894
895 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
896 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
897
898 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
899
900 return true;
901}
902
903template<typename HalOperation, typename HalModel>
904bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
905 uint32_t outputIndex,
906 armnn::IConnectableLayer& layer,
907 const HalModel& model,
908 ConversionData& data)
909{
910 return SetupAndTrackLayerOutputSlot(operation, outputIndex, layer, outputIndex, model, data);
911}
912
913template<typename HalOperation, typename HalModel>
914bool ConvertPooling2d(const HalOperation& operation,
915 const char* operationName,
916 armnn::PoolingAlgorithm poolType,
917 const HalModel& model,
918 ConversionData& data)
919{
920 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0, model, data);
921 if (!input.IsValid())
922 {
923 return Fail("%s: Could not read input 0", operationName);
924 }
925
926 const Operand* output = GetOutputOperand(operation, 0, model);
927 if (!output)
928 {
929 return Fail("%s: Could not read output 0", __func__);
930 }
931
932 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
933 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
934
arovir01b0717b52018-09-05 17:03:25 +0100935 armnn::Pooling2dDescriptor desc;
936 desc.m_PoolType = poolType;
937 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100938 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +0100939
940 ActivationFn activation;
941
942 if (operation.inputs.size() == 7)
943 {
944 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
945 android::nn::PaddingScheme scheme;
946 if (!GetInputPaddingScheme(operation, 1, scheme, model, data)
947 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX, model, data)
948 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY, model, data)
949 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth, model, data)
950 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight, model, data)
951 || !GetInputActivationFunction(operation, 6, activation, model, data))
952 {
953 return Fail("%s: Operation has invalid inputs", operationName);
954 }
955
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100956 const unsigned int inputWidth = inputInfo.GetShape()[2];
957 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +0100958
959 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
960 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
961 }
962 else
963 {
964 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
965 if (!GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft, model, data)
966 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight, model, data)
967 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop, model, data)
968 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom, model, data)
969 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX, model, data)
970 || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY, model, data)
971 || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth, model, data)
972 || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight, model, data)
973 || !GetInputActivationFunction(operation, 9, activation, model, data))
974 {
975 return Fail("%s: Operation has invalid inputs", operationName);
976 }
977 }
978
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +0100979 if (!IsLayerSupported(__func__,
980 armnn::IsPooling2dSupported,
981 data.m_Compute,
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100982 inputInfo,
983 outputInfo,
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +0100984 desc))
arovir01b0717b52018-09-05 17:03:25 +0100985 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +0100986 return false;
arovir01b0717b52018-09-05 17:03:25 +0100987 }
arovir01b0717b52018-09-05 17:03:25 +0100988
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100989 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
990 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +0100991 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100992 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100993 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100994
995 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
996 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +0100997 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +0100998 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +0100999 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001000
1001 input.Connect(pooling2dLayer->GetInputSlot(0));
1002
1003 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001004}
1005
saoste01b8471482018-10-10 09:44:51 +01001006} // namespace armnn_driver