blob: 3ad15d308bef81e12a59a41975f5d1e709ca777d [file] [log] [blame]
arovir01b0717b52018-09-05 17:03:25 +01001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// SPDX-License-Identifier: MIT
4//
5
6#pragma once
7
8#include <armnn/ArmNN.hpp>
9
Mike Kellyb5fdf382019-06-11 16:35:25 +010010#include "armnn/src/armnnUtils/DataLayoutIndexed.hpp"
arovir01b0717b52018-09-05 17:03:25 +010011#include "armnn/src/armnnUtils/Permute.hpp"
12#include "Utils.hpp"
13
14#include <ActivationFunctor.h>
15#include <CpuExecutor.h>
16#include <OperationsUtils.h>
17
18#include <boost/assert.hpp>
19#include <boost/core/ignore_unused.hpp>
Aron Virginas-Tar0e7ab542019-04-10 15:02:31 +010020#include <boost/numeric/conversion/cast.hpp>
arovir01b0717b52018-09-05 17:03:25 +010021#include <boost/test/tools/floating_point_comparison.hpp>
22
23#include <log/log.h>
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010024#include <vector>
arovir01b0717b52018-09-05 17:03:25 +010025
26namespace armnn_driver
27{
28
29///
30/// Helper classes
31///
32
33struct ConversionData
34{
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010035 ConversionData(const std::vector<armnn::BackendId>& backends)
36 : m_Backends(backends)
37 , m_Network(nullptr, nullptr)
arovir01b0717b52018-09-05 17:03:25 +010038 {}
39
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +010040 const std::vector<armnn::BackendId> m_Backends;
arovir01b0717b52018-09-05 17:03:25 +010041 armnn::INetworkPtr m_Network;
42 std::vector<armnn::IOutputSlot*> m_OutputSlotForOperand;
43 std::vector<android::nn::RunTimePoolInfo> m_MemPools;
44};
45
46class LayerInputHandle
47{
48public:
49 LayerInputHandle();
50 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo);
51
52 bool IsValid() const;
53
54 void Connect(armnn::IInputSlot& inputSlot);
55
56 const armnn::TensorInfo& GetTensorInfo() const;
57
58private:
59 armnn::IOutputSlot* m_OutputSlot;
60 bool m_Valid;
61 armnn::TensorInfo m_TensorInfo;
62};
63
64class ConstTensorPin
65{
66public:
67 // Creates an invalid tensor pin (can be used to signal errors)
68 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
69 ConstTensorPin(bool optional = false);
70
71 // @param tensorInfo TensorInfo associated with the tensor.
72 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
73 // the model being converted.
74 // @param numBytes Number of bytes for the tensor data.
75 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
76 const armnn::PermutationVector& mappings);
77
78 ConstTensorPin(const ConstTensorPin& other) = delete;
79 ConstTensorPin(ConstTensorPin&& other) = default;
80
81 bool IsValid() const;
82 bool IsOptional() const;
83
84 const armnn::ConstTensor& GetConstTensor() const;
85 const armnn::ConstTensor* GetConstTensorPtr() const;
86
87private:
88 armnn::ConstTensor m_ConstTensor;
89
90 // Owned memory for swizzled tensor data, only required if the tensor needed
91 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
92 // the pools associated with the model being converted.
93 std::vector<uint8_t> m_SwizzledTensorData;
94
95 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
96 bool m_Optional;
97};
98
99} // namespace armnn_driver
100
101///
102/// Utility functions
103///
104
105namespace
106{
107
108using namespace armnn_driver;
109using namespace android::nn;
110
111// Convenience function to log the reason for failing to convert a model.
112// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
113template<class... Args>
114static bool Fail(const char* formatStr, Args&&... args)
115{
116 ALOGD(formatStr, std::forward<Args>(args)...);
117 return false;
118}
119
120// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
121// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
122template<typename IsLayerSupportedFunc, typename ... Args>
123bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
124{
125 std::vector<char> unsupportedReason(1024+1);
126 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
127 if(isSupported)
128 {
129 return true;
130 }
131 else
132 {
133 std::string sUnsupportedReason(unsupportedReason.data());
134 if (sUnsupportedReason.size() > 0)
135 {
136 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
137 } else
138 {
139 ALOGD("%s: not supported by armnn", funcName);
140 }
141 return false;
142 }
143}
144
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100145template<typename IsLayerSupportedFunc, typename ... Args>
146bool IsLayerSupportedForAnyBackend(const char* funcName,
147 IsLayerSupportedFunc f,
148 const std::vector<armnn::BackendId>& backends,
149 Args&&... args)
150{
151 for (auto&& backend : backends)
152 {
153 if (IsLayerSupported(funcName, f, backend, std::forward<Args>(args)...))
154 {
155 return true;
156 }
157 }
158
159 ALOGD("%s: not supported by any specified backend", funcName);
160 return false;
161}
162
Mike Kellyb5fdf382019-06-11 16:35:25 +0100163template<typename Operand>
164armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100165{
166 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
167}
168
Matthew Bentham912b3622019-05-03 15:49:14 +0100169inline bool IsOperandTypeSupportedForTensors(V1_0::OperandType type)
arovir01b0717b52018-09-05 17:03:25 +0100170{
Matthew Bentham912b3622019-05-03 15:49:14 +0100171 return type == V1_0::OperandType::TENSOR_FLOAT32 ||
172 type == V1_0::OperandType::TENSOR_QUANT8_ASYMM ||
173 type == V1_0::OperandType::TENSOR_INT32;
arovir01b0717b52018-09-05 17:03:25 +0100174}
175
Mike Kellyb5fdf382019-06-11 16:35:25 +0100176#ifdef ARMNN_ANDROID_NN_V1_2
177
178inline bool IsOperandTypeSupportedForTensors(V1_2::OperandType type)
179{
180 return type == V1_2::OperandType::BOOL ||
181 type == V1_2::OperandType::TENSOR_FLOAT16 ||
182 type == V1_2::OperandType::TENSOR_FLOAT32 ||
183 type == V1_2::OperandType::TENSOR_QUANT8_ASYMM ||
184 type == V1_2::OperandType::TENSOR_QUANT16_SYMM ||
185 type == V1_2::OperandType::TENSOR_INT32;
186}
187
188#endif
189
190inline bool IsBool(V1_0::Operand)
191{
192 return false;
193}
194
195#ifdef ARMNN_ANDROID_NN_V1_2
196
197inline bool IsBool(V1_2::Operand operand)
198{
199 return operand.type == V1_2::OperandType::BOOL;
200}
201
202#endif
203
arovir01b0717b52018-09-05 17:03:25 +0100204void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
205 armnn::INetwork& network)
206{
207 BOOST_ASSERT(startLayer != nullptr);
208 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
209 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
210
211 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
212 {
213 // If the number of dimensions do not match then we need to add degenerate dimensions
214 // to the "smaller" tensor using a reshape:
215 // Small Big
216 // | |
217 // Reshape |
218 // \ /
219 // Add
220 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
221
222 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
223 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
224
225 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
226 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
227
228 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
229 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
230 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
231 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
232 {
233 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
234 }
235 armnn::TensorInfo reshapedInfo = smallTensorDims;
236 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
237 reshapedDims.data() });
238
239 armnn::ReshapeDescriptor reshapeDesc;
240 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
241 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
242 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
243 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
244
245 // Connect the outputs from new reshape and original input layer
246 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
247 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
248 }
249 else
250 {
251 input0.Connect(startLayer->GetInputSlot(0));
252 input1.Connect(startLayer->GetInputSlot(1));
253 }
254}
255
256void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
257 android::nn::PaddingScheme scheme)
258{
259 int32_t padHead;
260 int32_t padTail;
261 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
262 outPadHead = boost::numeric_cast<uint32_t>(padHead);
263 outPadTail = boost::numeric_cast<uint32_t>(padTail);
264}
265
Matthew Bentham912b3622019-05-03 15:49:14 +0100266Shape GetOperandShape(const V1_0::Operand& operand)
arovir01b0717b52018-09-05 17:03:25 +0100267{
268 Shape shape;
Matthew Bentham912b3622019-05-03 15:49:14 +0100269 shape.type = OperandType(operand.type);
arovir01b0717b52018-09-05 17:03:25 +0100270 shape.dimensions = operand.dimensions;
271 shape.scale = operand.scale;
272 shape.offset = operand.zeroPoint;
273 return shape;
274}
275
276// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
277// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
278// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
279// (us, in this case) to ensure they match.
280void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
281 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
282{
283 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
284 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
285 {
286 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
287 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
288 {
289 ALOGW("Bias quantization scale has been modified to match input*weights");
290 biasInfo.SetQuantizationScale(expectedBiasScale);
291 }
292 }
293}
294
295// 4D Tensor Permutations
296const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
297const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
298const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
299const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
300
301// 3D Permutation Vectors
302const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
303const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
304const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
305
306template<typename OSlot>
307armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
308 const armnn::PermutationVector& mappings)
309{
310 // Add swizzle layer
311 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
312
313 BOOST_ASSERT(layer != nullptr);
314
315 // Connect input to swizzle layer
316 input.Connect(layer->GetInputSlot(0));
317
318 // Setup swizzled output
319 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
320 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
321
322 return *layer;
323}
324
325void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
326{
327 // Add swizzle layer
328 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
329 // Connect swizzled input to layer
330 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
331}
332
333armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
334{
335 // Add deswizzle layer
336 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
337 return deswizzleLayer;
338}
339
340// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
341armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
342 LayerInputHandle& input,
343 armnn::IConnectableLayer& firstLayer,
344 armnn::IConnectableLayer& lastLayer)
345{
346 SwizzleIn(network, input, firstLayer, 0);
347 return DeswizzleOut(network, lastLayer, 0);
348}
349
350// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
351armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
352 armnn::IConnectableLayer& layer)
353{
354 return SwizzleInDeswizzleOut(network, input, layer, layer);
355}
356
357bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
358 const armnn::TensorShape & outputShape,
359 uint32_t concatDim)
360{
361 // Validate the output shape is correct given the input shapes (which have just been validated)
362 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
363 if (outputShape.GetNumDimensions() != numDimensions)
364 {
365 return Fail("%s: Output shape has wrong number of dimensions", __func__);
366 }
367
368 unsigned int outputSizeAlongConcatenatedDimension = 0;
369 for (unsigned int i = 0; i < inputShapes.size(); i++)
370 {
371 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
372 }
373
374 for (unsigned int i = 0; i < numDimensions; ++i)
375 {
376 if (i == concatDim)
377 {
378 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
379 {
380 return Fail(
381 "%s: Invalid output shape for dimension %d (%d != %d)",
382 __func__,
383 i,
384 outputShape[i],
385 outputSizeAlongConcatenatedDimension);
386 }
387 }
388 else
389 {
390 if (outputShape[i] != inputShapes[0][i])
391 {
392 return Fail("%s: Invalid output shape", __func__);
393 }
394 }
395 }
396
397 return true;
398}
399
400bool RequiresReshape(armnn::TensorShape & inputShape)
401{
402 return inputShape.GetNumDimensions() < 3;
403}
404
405template<typename OSlot>
406armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
407 armnn::TensorInfo reshapeInfo)
408{
409 armnn::ReshapeDescriptor reshapeDescriptor;
410 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
411
412 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
413 BOOST_ASSERT(reshapeLayer != nullptr);
414
415 // Attach the input layer to the reshape layer
416 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
417 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
418
419 return *reshapeLayer;
420}
421
422void SwizzleInputs(armnn::INetwork& network,
423 std::vector<LayerInputHandle>& inputs,
424 std::vector<armnn::TensorShape>& inputShapes,
425 const armnn::PermutationVector& mapping)
426{
427 if (!mapping.IsEqual(IdentityPermutation4D))
428 {
429 size_t nInputs = inputs.size();
430 for (size_t i=0; i<nInputs; ++i)
431 {
432 // add swizzle layer
433 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
434 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
435 auto& outputInfo = outputSlot.GetTensorInfo();
436 // replace inputs with the swizzled ones
437 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
438 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
439 }
440 }
441}
442
narpra01f176d5a2018-11-18 20:17:48 +0000443bool CreateConcatPermutationParameters(const unsigned int numberOfDimensions,
444 int32_t & concatDimension,
445 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
arovir01b0717b52018-09-05 17:03:25 +0100446{
narpra01f176d5a2018-11-18 20:17:48 +0000447 bool needPermute = false;
arovir01b0717b52018-09-05 17:03:25 +0100448 BOOST_ASSERT(numberOfDimensions >= 3);
449
450 // ArmNN uses Compute Library subtensors to perform concatenation
narpra01f176d5a2018-11-18 20:17:48 +0000451 // This only works when concatenating along dimension 0, 1 or 3 for a 4-D tensor,
452 // or along dimension 0 or 2 for a 3-D tensor.
453 if (numberOfDimensions == 4 && concatDimension == 2)
arovir01b0717b52018-09-05 17:03:25 +0100454 {
narpra01f176d5a2018-11-18 20:17:48 +0000455 concatDimension = 1;
456 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
457 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100458 }
narpra01f176d5a2018-11-18 20:17:48 +0000459 else if (numberOfDimensions == 3 && concatDimension == 1)
arovir01b0717b52018-09-05 17:03:25 +0100460 {
narpra01f176d5a2018-11-18 20:17:48 +0000461 concatDimension = 0;
462 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
463 needPermute = true;
arovir01b0717b52018-09-05 17:03:25 +0100464 }
narpra01f176d5a2018-11-18 20:17:48 +0000465 return needPermute;
arovir01b0717b52018-09-05 17:03:25 +0100466}
467
468} // anonymous namespace
469
470namespace armnn_driver
471{
472
473//// Creates an ArmNN activation layer and connects it to the given layer, if the
474//// passed in AndroidNN activation function requires so.
475//// @return The end layer of the sequence of layers built for the given AndroidNN
476//// activation function or nullptr if an error occurred (e.g. unsupported activation).
477//// Note that the end layer matches the input layer if no activation is required
478//// (the sequence of layers has length 1).
479armnn::IConnectableLayer* ProcessActivation(const armnn::TensorInfo& tensorInfo,
480 ActivationFn activation,
481 armnn::IConnectableLayer* prevLayer,
482 ConversionData& data);
483
484} // namespace armnn_driver
485
486///
487/// Utility templates
488///
489
490namespace armnn_driver
491{
492
493using namespace android::nn;
494
Mike Kellyb5fdf382019-06-11 16:35:25 +0100495template<typename HalOperand, typename HalOperation, typename HalModel>
496const HalOperand* GetInputOperand(const HalOperation& operation, uint32_t inputIndex, const HalModel& model,
497 bool failOnIndexOutOfBounds = true)
arovir01b0717b52018-09-05 17:03:25 +0100498{
499 if (inputIndex >= operation.inputs.size())
500 {
saoste01b8471482018-10-10 09:44:51 +0100501 if (failOnIndexOutOfBounds)
502 {
503 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
504 }
arovir01b0717b52018-09-05 17:03:25 +0100505 return nullptr;
506 }
507
508 BOOST_ASSERT(operation.inputs[inputIndex] < model.operands.size()); // Model should have been validated beforehand
509 return &model.operands[operation.inputs[inputIndex]];
510}
511
Mike Kellyb5fdf382019-06-11 16:35:25 +0100512template<typename HalOperand, typename HalOperation, typename HalModel>
513const HalOperand* GetOutputOperand(const HalOperation& operation, uint32_t outputIndex, const HalModel& model)
arovir01b0717b52018-09-05 17:03:25 +0100514{
515 if (outputIndex >= operation.outputs.size())
516 {
517 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
518 return nullptr;
519 }
520
521 // Model should have been validated beforehand
522 BOOST_ASSERT(operation.outputs[outputIndex] < model.operands.size());
523
524 return &model.operands[operation.outputs[outputIndex]];
525}
526
Mike Kellyb5fdf382019-06-11 16:35:25 +0100527template<typename HalOperand, typename HalModel>
528ConstTensorPin ConvertOperandToConstTensorPin(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100529 const HalModel& model,
530 const ConversionData& data,
531 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
532 const armnn::TensorShape* overrideTensorShape = nullptr,
533 bool optional = false)
534{
535 if (!IsOperandTypeSupportedForTensors(operand.type))
536 {
537 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
538 return ConstTensorPin();
539 }
540
Kevin Mayf29a2c52019-03-14 11:56:32 +0000541 if (!optional &&
Matthew Bentham912b3622019-05-03 15:49:14 +0100542 operand.lifetime !=V1_0::OperandLifeTime::CONSTANT_COPY &&
543 operand.lifetime !=V1_0::OperandLifeTime::CONSTANT_REFERENCE &&
544 operand.lifetime !=V1_0::OperandLifeTime::NO_VALUE)
arovir01b0717b52018-09-05 17:03:25 +0100545 {
546 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
547 return ConstTensorPin();
548 }
549
Kevin Mayf29a2c52019-03-14 11:56:32 +0000550 const void* const valueStart = GetOperandValueReadOnlyAddress(operand, model, data, optional);
arovir01b0717b52018-09-05 17:03:25 +0100551 if (!valueStart)
552 {
553 if (optional)
554 {
555 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
556 return ConstTensorPin(true);
557 }
558 // mandatory tensor with no values
559 Fail("%s: failed to get operand address", __func__);
560 return ConstTensorPin();
561 }
562
563 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
564 if (overrideTensorShape != nullptr)
565 {
566 tensorInfo.SetShape(*overrideTensorShape);
567 }
568 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
569}
570
Mike Kellyb5fdf382019-06-11 16:35:25 +0100571template<typename HalOperand, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100572ConstTensorPin ConvertOperationInputToConstTensorPin(const HalOperation& operation,
573 uint32_t inputIndex,
574 const HalModel& model,
575 const ConversionData& data,
576 const armnn::PermutationVector& dimensionMappings = g_DontPermute,
577 const armnn::TensorShape* overrideTensorShape = nullptr,
578 bool optional = false)
579{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100580 const HalOperand* operand = GetInputOperand<HalOperand>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100581 if (!operand)
582 {
583 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
584 return ConstTensorPin();
585 }
586 return ConvertOperandToConstTensorPin(*operand,
587 model,
588 data,
589 dimensionMappings,
590 overrideTensorShape,
591 optional);
592}
593
Mike Kellyb5fdf382019-06-11 16:35:25 +0100594template<typename HalOperand, typename HalModel>
595const void* GetOperandValueReadOnlyAddress(const HalOperand& operand,
Matthew Bentham912b3622019-05-03 15:49:14 +0100596 const HalModel& model,
597 const ConversionData& data,
Kevin Mayf29a2c52019-03-14 11:56:32 +0000598 bool optional = false)
arovir01b0717b52018-09-05 17:03:25 +0100599{
600 const void* valueStart = nullptr;
601
602 switch (operand.lifetime)
603 {
Matthew Bentham912b3622019-05-03 15:49:14 +0100604 case V1_0::OperandLifeTime::CONSTANT_COPY:
arovir01b0717b52018-09-05 17:03:25 +0100605 {
606 // Constant found in model.operandValues
607 valueStart = &model.operandValues[operand.location.offset];
608 break;
609 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100610 case V1_0::OperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100611 {
612 // Constant specified via a Memory object
613 valueStart = GetMemoryFromPool(operand.location, data.m_MemPools);
614 break;
615 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100616 case V1_0::OperandLifeTime::NO_VALUE:
Kevin Mayf29a2c52019-03-14 11:56:32 +0000617 {
618 // An optional input tensor with no values is not an error so should not register as a fail
619 if (optional)
620 {
621 valueStart = nullptr;
622 break;
623 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100624 [[fallthrough]];
Kevin Mayf29a2c52019-03-14 11:56:32 +0000625 }
arovir01b0717b52018-09-05 17:03:25 +0100626 default:
627 {
628 // Unsupported/invalid (e.g. can't get value of an input to the model)
629 Fail("%s: unsupported/invalid operand lifetime: %s",
630 __func__, toString(operand.lifetime).c_str());
631 valueStart = nullptr;
632 }
633 }
634
635 return valueStart;
636}
637
Mike Kellyb5fdf382019-06-11 16:35:25 +0100638template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel, typename OutputType>
arovir01b0717b52018-09-05 17:03:25 +0100639bool GetInputScalar(const HalOperation& operation,
640 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100641 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100642 OutputType& outValue,
643 const HalModel& model,
644 const ConversionData& data)
645{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100646 const HalOperand* operand = GetInputOperand<HalOperand>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100647 if (!operand)
648 {
649 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
650 }
651
652 if (operand->type != type)
653 {
654 return Fail("%s: unexpected operand type: %s (should be %s)",
655 __func__, toString(operand->type).c_str(), toString(type).c_str());
656 }
657
658 if (operand->location.length != sizeof(OutputType))
659 {
660 return Fail("%s: incorrect operand location length: %i (should be %i)",
661 __func__, operand->location.length, sizeof(OutputType));
662 }
663
664 const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
665 if (!valueAddress)
666 {
667 return Fail("%s: failed to get address for operand", __func__);
668 }
669
670 outValue = *(static_cast<const OutputType*>(valueAddress));
671 return true;
672}
673
Mike Kellyb5fdf382019-06-11 16:35:25 +0100674template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100675bool GetInputInt32(const HalOperation& operation,
676 uint32_t inputIndex,
677 int32_t& outValue,
678 const HalModel& model,
679 const ConversionData& data)
680{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100681 return GetInputScalar<HalOperand, HalOperandType>(operation, inputIndex, HalOperandType::INT32, outValue, model,
682 data);
arovir01b0717b52018-09-05 17:03:25 +0100683}
684
Mike Kellyb5fdf382019-06-11 16:35:25 +0100685template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100686bool GetInputFloat32(const HalOperation& operation,
687 uint32_t inputIndex,
688 float& outValue,
689 const HalModel& model,
690 const ConversionData& data)
691{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100692 return GetInputScalar<HalOperand, HalOperandType>(operation, inputIndex, HalOperandType::FLOAT32, outValue, model,
693 data);
arovir01b0717b52018-09-05 17:03:25 +0100694}
695
Mike Kellyb5fdf382019-06-11 16:35:25 +0100696template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100697bool GetInputActivationFunctionImpl(const HalOperation& operation,
698 uint32_t inputIndex,
Mike Kellyb5fdf382019-06-11 16:35:25 +0100699 HalOperandType type,
arovir01b0717b52018-09-05 17:03:25 +0100700 ActivationFn& outActivationFunction,
701 const HalModel& model,
702 const ConversionData& data)
703{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100704 if (type != HalOperandType::INT32 && type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100705 {
706 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
707 __func__,
708 toString(type).c_str(),
709 toString(OperandType::INT32).c_str(),
710 toString(OperandType::TENSOR_INT32).c_str());
711 }
712
713 int32_t activationFunctionAsInt;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100714 if (!GetInputScalar<HalOperand, HalOperandType>(operation, inputIndex, type, activationFunctionAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100715 {
716 return Fail("%s: failed to get activation input value", __func__);
717 }
718 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
719 return true;
720}
721
Mike Kellyb5fdf382019-06-11 16:35:25 +0100722template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100723bool GetInputActivationFunction(const HalOperation& operation,
724 uint32_t inputIndex,
725 ActivationFn& outActivationFunction,
726 const HalModel& model,
727 const ConversionData& data)
728{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100729 return GetInputActivationFunctionImpl<HalOperand, HalOperandType>(operation,
730 inputIndex,
731 HalOperandType::INT32,
732 outActivationFunction,
733 model,
734 data);
arovir01b0717b52018-09-05 17:03:25 +0100735}
736
Mike Kellyb5fdf382019-06-11 16:35:25 +0100737template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100738bool GetInputActivationFunctionFromTensor(const HalOperation& operation,
739 uint32_t inputIndex,
740 ActivationFn& outActivationFunction,
741 const HalModel& model,
742 const ConversionData& data)
743{
744 // This only accepts a 1-D tensor of size 1
Mike Kellyb5fdf382019-06-11 16:35:25 +0100745 return GetInputActivationFunctionImpl<HalOperand, HalOperandType>(operation,
746 inputIndex,
747 HalOperandType::INT32,
748 outActivationFunction,
749 model,
750 data);
arovir01b0717b52018-09-05 17:03:25 +0100751}
752
753
Mike Kellyb5fdf382019-06-11 16:35:25 +0100754template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100755bool GetOptionalInputActivation(const HalOperation& operation,
756 uint32_t inputIndex,
757 ActivationFn& activationFunction,
758 const HalModel& model,
759 const ConversionData& data)
760{
761 if (operation.inputs.size() <= inputIndex)
762 {
763 activationFunction = ActivationFn::kActivationNone;
764 }
765 else
766 {
Mike Kellyb5fdf382019-06-11 16:35:25 +0100767 if (!GetInputActivationFunction<HalOperand, HalOperandType>(operation, inputIndex, activationFunction, model,
768 data))
arovir01b0717b52018-09-05 17:03:25 +0100769 {
770 return Fail("%s: Operation has invalid inputs", __func__);
771 }
772 }
773 return true;
774}
775
Mike Kellyb5fdf382019-06-11 16:35:25 +0100776template<typename HalOperand, typename HalOperandType, typename HalModel>
777bool GetTensorInt32Values(const HalOperand& operand,
arovir01b0717b52018-09-05 17:03:25 +0100778 std::vector<int32_t>& outValues,
779 const HalModel& model,
780 const ConversionData& data)
781{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100782 if (operand.type != HalOperandType::TENSOR_INT32)
arovir01b0717b52018-09-05 17:03:25 +0100783 {
784 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
785 }
786
787 const void* startAddress = GetOperandValueReadOnlyAddress(operand, model, data);
788 if (!startAddress)
789 {
790 return Fail("%s: failed to get operand address", __func__, operand.type);
791 }
792
793 // Check number of bytes is sensible
794 const uint32_t numBytes = operand.location.length;
795 if (numBytes % sizeof(int32_t) != 0)
796 {
797 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
798 __func__, numBytes, sizeof(int32_t));
799 }
800
801 outValues.resize(numBytes / sizeof(int32_t));
802 memcpy(outValues.data(), startAddress, numBytes);
803 return true;
804}
805
Mike Kellyb5fdf382019-06-11 16:35:25 +0100806template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100807bool GetInputPaddingScheme(const HalOperation& operation,
808 uint32_t inputIndex,
809 PaddingScheme& outPaddingScheme,
810 const HalModel& model,
811 const ConversionData& data)
812{
813 int32_t paddingSchemeAsInt;
Mike Kellyb5fdf382019-06-11 16:35:25 +0100814 if (!GetInputInt32<HalOperand, HalOperandType>(operation, inputIndex, paddingSchemeAsInt, model, data))
arovir01b0717b52018-09-05 17:03:25 +0100815 {
816 return Fail("%s: failed to get padding scheme input value", __func__);
817 }
818
819 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
820 return true;
821}
822
Mike Kellyb5fdf382019-06-11 16:35:25 +0100823template<typename HalOperand, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100824LayerInputHandle ConvertToLayerInputHandle(const HalOperation& operation,
825 uint32_t inputIndex,
826 const HalModel& model,
827 ConversionData& data)
828{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100829 const HalOperand* operand = GetInputOperand<HalOperand>(operation, inputIndex, model);
arovir01b0717b52018-09-05 17:03:25 +0100830 if (!operand)
831 {
832 Fail("%s: failed to get input operand %i", __func__, inputIndex);
833 return LayerInputHandle();
834 }
835
836 if (!IsOperandTypeSupportedForTensors(operand->type))
837 {
838 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
839 return LayerInputHandle();
840 }
841
842 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
843
844 switch (operand->lifetime)
845 {
Matthew Bentham912b3622019-05-03 15:49:14 +0100846 case V1_0::OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
847 case V1_0::OperandLifeTime::MODEL_INPUT:
848 case V1_0::OperandLifeTime::MODEL_OUTPUT:
arovir01b0717b52018-09-05 17:03:25 +0100849 {
850 // The tensor is either an operand internal to the model, or a model input.
851 // It can be associated with an ArmNN output slot for an existing layer.
852
853 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
854 const uint32_t operandIndex = operation.inputs[inputIndex];
855 return LayerInputHandle(true, data.m_OutputSlotForOperand[operandIndex], operandTensorInfo);
856 break;
857 }
Matthew Bentham912b3622019-05-03 15:49:14 +0100858 case V1_0::OperandLifeTime::CONSTANT_COPY:
859 case V1_0::OperandLifeTime::CONSTANT_REFERENCE:
arovir01b0717b52018-09-05 17:03:25 +0100860 {
861 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
862 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand, model, data);
863 if (tensorPin.IsValid())
864 {
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100865 if (!IsLayerSupportedForAnyBackend(__func__,
866 armnn::IsConstantSupported,
867 data.m_Backends,
868 tensorPin.GetConstTensor().GetInfo()))
arovir01b0717b52018-09-05 17:03:25 +0100869 {
870 return LayerInputHandle();
871 }
872
873 armnn::IConnectableLayer* constantLayer = data.m_Network->AddConstantLayer(tensorPin.GetConstTensor());
874 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
875 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
876
877 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
878 }
879 else
880 {
881 Fail("%s: invalid operand tensor", __func__);
882 return LayerInputHandle();
883 }
884 break;
885 }
886 default:
887 {
888 // Unsupported lifetime for an input tensor
889 Fail("%s: unsupported lifetime for input tensor: %s",
890 __func__, toString(operand->lifetime).c_str());
891 return LayerInputHandle();
892 }
893 }
894}
895
Mike Kellyb5fdf382019-06-11 16:35:25 +0100896template<typename HalOperand, typename HalOperation, typename HalModel>
897bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
898 uint32_t operationOutputIndex,
899 armnn::IConnectableLayer& layer,
900 uint32_t layerOutputIndex,
901 const HalModel& model,
902 ConversionData& data)
903{
904 const HalOperand* outputOperand = GetOutputOperand<HalOperand>(operation, operationOutputIndex, model);
905 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
906 {
907 return false;
908 }
909
910 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
911
912 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
913 data.m_OutputSlotForOperand[operandIndex] = &outputSlot;
914
915 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
916
917 return true;
918}
919
920template<typename HalOperand, typename HalOperation, typename HalModel>
921armnn::DataLayout OptionalDataLayout(const HalOperation& operation,
922 uint32_t inputIndex,
923 const HalModel& model,
924 ConversionData& data)
925{
926 const HalOperand* operand = GetInputOperand<HalOperand>(operation, inputIndex, model);
927 if (!operand)
928 {
929 return armnn::DataLayout::NHWC;
930 }
931
932 if (!IsBool(*operand))
933 {
934 return armnn::DataLayout::NHWC;
935 }
936
937 const void* valueAddress = GetOperandValueReadOnlyAddress(*operand, model, data);
938 if (!valueAddress)
939 {
940 return armnn::DataLayout::NHWC;
941 }
942
943 if (*(static_cast<const bool*>(valueAddress)))
944 {
945 return armnn::DataLayout::NCHW;
946 }
947 else
948 {
949 return armnn::DataLayout::NHWC;
950 }
951}
952
953template<typename HalOperand, typename HalOperation, typename HalModel>
954bool SetupAndTrackLayerOutputSlot(const HalOperation& operation,
955 uint32_t outputIndex,
956 armnn::IConnectableLayer& layer,
957 const HalModel& model,
958 ConversionData& data)
959{
960 return SetupAndTrackLayerOutputSlot<HalOperand>(operation, outputIndex, layer, outputIndex, model, data);
961}
962
963template<typename HalOperand, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +0100964bool ConvertToActivation(const HalOperation& operation,
965 const char* operationName,
966 const armnn::ActivationDescriptor& activationDesc,
967 const HalModel& model,
968 ConversionData& data)
969{
Mike Kellyb5fdf382019-06-11 16:35:25 +0100970 LayerInputHandle input = ConvertToLayerInputHandle<HalOperand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100971 if (!input.IsValid())
972 {
973 return Fail("%s: Input 0 is invalid", operationName);
974 }
975
Mike Kellyb5fdf382019-06-11 16:35:25 +0100976 const HalOperand* outputOperand = GetOutputOperand<HalOperand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +0100977 if (!outputOperand)
978 {
979 return false;
980 }
981 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +0100982 if (!IsLayerSupportedForAnyBackend(__func__,
983 armnn::IsActivationSupported,
984 data.m_Backends,
985 input.GetTensorInfo(),
986 outInfo,
987 activationDesc))
arovir01b0717b52018-09-05 17:03:25 +0100988 {
989 return false;
990 }
991
992 armnn::IConnectableLayer* layer = data.m_Network->AddActivationLayer(activationDesc);
993 BOOST_ASSERT(layer != nullptr);
994 input.Connect(layer->GetInputSlot(0));
995
Mike Kellyb5fdf382019-06-11 16:35:25 +0100996 return SetupAndTrackLayerOutputSlot<HalOperand>(operation, 0, *layer, model, data);
arovir01b0717b52018-09-05 17:03:25 +0100997}
998
Mike Kellyb5fdf382019-06-11 16:35:25 +0100999template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
arovir01b0717b52018-09-05 17:03:25 +01001000bool ConvertPooling2d(const HalOperation& operation,
1001 const char* operationName,
1002 armnn::PoolingAlgorithm poolType,
1003 const HalModel& model,
1004 ConversionData& data)
1005{
Mike Kellyb5fdf382019-06-11 16:35:25 +01001006 LayerInputHandle input = ConvertToLayerInputHandle<HalOperand>(operation, 0, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001007 if (!input.IsValid())
1008 {
1009 return Fail("%s: Could not read input 0", operationName);
1010 }
1011
Mike Kellyb5fdf382019-06-11 16:35:25 +01001012 const HalOperand* output = GetOutputOperand<HalOperand>(operation, 0, model);
arovir01b0717b52018-09-05 17:03:25 +01001013 if (!output)
1014 {
1015 return Fail("%s: Could not read output 0", __func__);
1016 }
1017
1018 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1019 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1020
arovir01b0717b52018-09-05 17:03:25 +01001021 armnn::Pooling2dDescriptor desc;
1022 desc.m_PoolType = poolType;
1023 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001024 desc.m_DataLayout = armnn::DataLayout::NHWC;
arovir01b0717b52018-09-05 17:03:25 +01001025
1026 ActivationFn activation;
1027
1028 if (operation.inputs.size() == 7)
1029 {
1030 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1031 android::nn::PaddingScheme scheme;
Mike Kellyb5fdf382019-06-11 16:35:25 +01001032 if (!GetInputPaddingScheme<HalOperand, HalOperandType>(operation, 1, scheme, model, data)
1033 || !GetInputScalar<HalOperand, HalOperandType>(operation, 2, HalOperandType::INT32, desc.m_StrideX, model,
1034 data)
1035 || !GetInputScalar<HalOperand, HalOperandType>(operation, 3, HalOperandType::INT32, desc.m_StrideY, model,
1036 data)
1037 || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_PoolWidth, model,
1038 data)
1039 || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_PoolHeight,
1040 model, data)
1041 || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 6, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001042 {
1043 return Fail("%s: Operation has invalid inputs", operationName);
1044 }
1045
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001046 const unsigned int inputWidth = inputInfo.GetShape()[2];
1047 const unsigned int inputHeight = inputInfo.GetShape()[1];
arovir01b0717b52018-09-05 17:03:25 +01001048
1049 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1050 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1051 }
1052 else
1053 {
1054 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
Mike Kellyb5fdf382019-06-11 16:35:25 +01001055 if (!GetInputScalar<HalOperand, HalOperandType>(operation, 1, HalOperandType::INT32, desc.m_PadLeft, model,
1056 data)
1057 || !GetInputScalar<HalOperand, HalOperandType>(operation, 2, HalOperandType::INT32, desc.m_PadRight, model,
1058 data)
1059 || !GetInputScalar<HalOperand, HalOperandType>(operation, 3, HalOperandType::INT32, desc.m_PadTop, model,
1060 data)
1061 || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_PadBottom, model,
1062 data)
1063 || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_StrideX, model,
1064 data)
1065 || !GetInputScalar<HalOperand, HalOperandType>(operation, 6, HalOperandType::INT32, desc.m_StrideY, model,
1066 data)
1067 || !GetInputScalar<HalOperand, HalOperandType>(operation, 7, HalOperandType::INT32, desc.m_PoolWidth, model,
1068 data)
1069 || !GetInputScalar<HalOperand, HalOperandType>(operation, 8, HalOperandType::INT32, desc.m_PoolHeight,
1070 model, data)
1071 || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 9, activation, model, data))
arovir01b0717b52018-09-05 17:03:25 +01001072 {
1073 return Fail("%s: Operation has invalid inputs", operationName);
1074 }
1075 }
1076
Nattapat Chaimanowongd5fd9762019-04-04 13:33:10 +01001077 if (!IsLayerSupportedForAnyBackend(__func__,
1078 armnn::IsPooling2dSupported,
1079 data.m_Backends,
1080 inputInfo,
1081 outputInfo,
1082 desc))
arovir01b0717b52018-09-05 17:03:25 +01001083 {
Éanna Ó Catháin3d1059c2018-10-11 15:53:04 +01001084 return false;
arovir01b0717b52018-09-05 17:03:25 +01001085 }
arovir01b0717b52018-09-05 17:03:25 +01001086
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001087 armnn::IConnectableLayer* pooling2dLayer = data.m_Network->AddPooling2dLayer(desc);
1088 if (!pooling2dLayer)
arovir01b0717b52018-09-05 17:03:25 +01001089 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001090 return Fail("%s: AddPooling2dLayer failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001091 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001092
1093 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, pooling2dLayer, data);
1094 if (!endLayer)
arovir01b0717b52018-09-05 17:03:25 +01001095 {
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001096 return Fail("%s: ProcessActivation failed", __func__);
arovir01b0717b52018-09-05 17:03:25 +01001097 }
Matteo Martincigh39fc5472018-10-26 16:39:28 +01001098
1099 input.Connect(pooling2dLayer->GetInputSlot(0));
1100
Mike Kellyb5fdf382019-06-11 16:35:25 +01001101 return SetupAndTrackLayerOutputSlot<HalOperand>(operation, 0, *endLayer, model, data);
1102}
1103
1104template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
1105bool ConvertConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1106{
1107 LayerInputHandle input = ConvertToLayerInputHandle<HalOperand>(operation, 0, model, data);
1108 if (!input.IsValid())
1109 {
1110 return Fail("%s: Operation has invalid inputs", __func__);
1111 }
1112
1113 const HalOperand* output = GetOutputOperand<HalOperand>(operation, 0, model);
1114 if (!output)
1115 {
1116 return Fail("%s: Could not read output 0", __func__);
1117 }
1118
1119 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1120 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1121
1122 // ArmNN does not currently support non-fixed weights or bias
1123 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalOperand>(operation, 1, model, data);
1124 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalOperand>(operation, 2, model, data);
1125
1126 if (!weightsPin.IsValid() || !biasPin.IsValid())
1127 {
1128 return Fail("%s: Operation has invalid inputs", __func__);
1129 }
1130
1131 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1132 armnn::ConstTensor bias = biasPin.GetConstTensor();
1133 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1134
1135 armnn::Convolution2dDescriptor desc;
1136 desc.m_DataLayout = armnn::DataLayout::NHWC;
1137 ActivationFn activation;
1138
1139 if (operation.inputs.size() >= 10)
1140 {
1141 if (!GetInputScalar<HalOperand, HalOperandType>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model,
1142 data)
1143 || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model,
1144 data)
1145 || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model,
1146 data)
1147 || !GetInputScalar<HalOperand, HalOperandType>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model,
1148 data)
1149 || !GetInputScalar<HalOperand, HalOperandType>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model,
1150 data)
1151 || !GetInputScalar<HalOperand, HalOperandType>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model,
1152 data)
1153 || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 9, activation, model, data))
1154 {
1155 return Fail("%s: Operation has invalid inputs", __func__);
1156 }
1157 desc.m_DataLayout = OptionalDataLayout<HalOperand>(operation, 10, model, data);
1158 }
1159 else if (operation.inputs.size() >= 7)
1160 {
1161 android::nn::PaddingScheme paddingScheme;
1162 if (!GetInputPaddingScheme<HalOperand, HalOperandType>(operation, 3, paddingScheme, model, data)
1163 || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_StrideX,
1164 model, data)
1165 || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model,
1166 data)
1167 || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 6, activation, model, data))
1168 {
1169 return Fail("%s: Operation has invalid inputs", __func__);
1170 }
1171
1172 const uint32_t kernelX = weights.GetShape()[2];
1173 const uint32_t kernelY = weights.GetShape()[1];
1174 const uint32_t inputX = inputInfo.GetShape()[2];
1175 const uint32_t inputY = inputInfo.GetShape()[1];
1176
1177 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1178 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1179
1180 desc.m_DataLayout = OptionalDataLayout<HalOperand>(operation, 7, model, data);
1181 }
1182 else
1183 {
1184 return Fail("%s: Unsupported number of operation inputs", __func__);
1185 }
1186
1187 desc.m_BiasEnabled = true;
1188 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1189
1190 if (!IsLayerSupportedForAnyBackend(__func__,
1191 armnn::IsConvolution2dSupported,
1192 data.m_Backends,
1193 inputInfo,
1194 outputInfo,
1195 desc,
1196 weights.GetInfo(),
1197 biases))
1198 {
1199 return false;
1200 }
1201
1202 armnn::IConnectableLayer* startLayer =
1203 data.m_Network->AddConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1204
1205 if (!startLayer)
1206 {
1207 return Fail("%s: AddConvolution2dLayer failed", __func__);
1208 }
1209
1210 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1211
1212 if (!endLayer)
1213 {
1214 return Fail("%s: ProcessActivation failed", __func__);
1215 }
1216
1217 input.Connect(startLayer->GetInputSlot(0));
1218
1219 return SetupAndTrackLayerOutputSlot<HalOperand>(operation, 0, *endLayer, model, data);
1220}
1221
1222template<typename HalOperand, typename HalOperandType, typename HalOperation, typename HalModel>
1223bool ConvertDepthwiseConv2d(const HalOperation& operation, const HalModel& model, ConversionData& data)
1224{
1225 LayerInputHandle input = ConvertToLayerInputHandle<HalOperand>(operation, 0, model, data);
1226
1227 if (!input.IsValid())
1228 {
1229 return Fail("%s: Operation has invalid inputs", __func__);
1230 }
1231
1232 const HalOperand* output = GetOutputOperand<HalOperand>(operation, 0, model);
1233
1234 if (!output)
1235 {
1236 return Fail("%s: Could not read output 0", __func__);
1237 }
1238
1239 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1240 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1241
1242 // ArmNN does not currently support non-fixed weights or bias
1243
1244 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1245 const HalOperand* weightsOperand = GetInputOperand<HalOperand>(operation, 1, model);
1246
1247 if (weightsOperand == nullptr)
1248 {
1249 return Fail("%s: Operand is invalid", __func__);
1250 }
1251 armnn::DepthwiseConvolution2dDescriptor desc;
1252 desc.m_DataLayout = armnn::DataLayout::NHWC;
1253
1254 // Look ahead to find the optional DataLayout, if present
1255 if (operation.inputs.size() >= 12)
1256 {
1257 desc.m_DataLayout = OptionalDataLayout<HalOperand>(operation, 11, model, data);
1258 }
1259 else if (operation.inputs.size() >= 9)
1260 {
1261 desc.m_DataLayout = OptionalDataLayout<HalOperand>(operation, 8, model, data);
1262 }
1263
1264 armnnUtils::DataLayoutIndexed dataLayoutIndexed(desc.m_DataLayout);
1265 unsigned int channelsIndex = dataLayoutIndexed.GetChannelsIndex();
1266 unsigned int widthIndex = dataLayoutIndexed.GetWidthIndex();
1267 unsigned int heightIndex = dataLayoutIndexed.GetHeightIndex();
1268
1269 // Reinterpret weight data as [ H, W, I, M ]
1270 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1],
1271 weightsOperand->dimensions[2],
1272 inputInfo.GetShape()[channelsIndex],
1273 weightsOperand->dimensions[3] / inputInfo.GetShape()[channelsIndex] });
1274
1275 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1276 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1277
1278 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin<HalOperand>(operation, 1, model, data,
1279 HWIMToMIHW, &weightsShape);
1280
1281 // Bias is a 1D tensor
1282 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin<HalOperand>(operation, 2, model, data);
1283
1284 if (!weightsPin.IsValid() || !biasPin.IsValid())
1285 {
1286 return Fail("%s: Operation has invalid inputs", __func__);
1287 }
1288
1289 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1290 armnn::ConstTensor bias = biasPin.GetConstTensor();
1291 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), inputInfo);
1292
1293 ActivationFn activation;
1294
1295 if (operation.inputs.size() >= 11)
1296 {
1297 if (!GetInputScalar<HalOperand, HalOperandType>(operation, 3, HalOperandType::INT32, desc.m_PadLeft, model,
1298 data)
1299 || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_PadRight, model,
1300 data)
1301 || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_PadTop, model,
1302 data)
1303 || !GetInputScalar<HalOperand, HalOperandType>(operation, 6, HalOperandType::INT32, desc.m_PadBottom, model,
1304 data)
1305 || !GetInputScalar<HalOperand, HalOperandType>(operation, 7, HalOperandType::INT32, desc.m_StrideX, model,
1306 data)
1307 || !GetInputScalar<HalOperand, HalOperandType>(operation, 8, HalOperandType::INT32, desc.m_StrideY, model,
1308 data)
1309 || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 10, activation, model, data))
1310 {
1311 return Fail("%s: Operation has invalid inputs", __func__);
1312 }
1313 }
1314 else if (operation.inputs.size() >= 8)
1315 {
1316 android::nn::PaddingScheme paddingScheme;
1317 if (!GetInputPaddingScheme<HalOperand, HalOperandType>(operation, 3, paddingScheme, model, data)
1318 || !GetInputScalar<HalOperand, HalOperandType>(operation, 4, HalOperandType::INT32, desc.m_StrideX, model,
1319 data)
1320 || !GetInputScalar<HalOperand, HalOperandType>(operation, 5, HalOperandType::INT32, desc.m_StrideY, model,
1321 data)
1322 || !GetInputActivationFunction<HalOperand, HalOperandType>(operation, 7, activation, model, data))
1323 {
1324 return Fail("%s: Operation has invalid inputs", __func__);
1325 }
1326
1327 const uint32_t kernelX = weights.GetShape()[3];
1328 const uint32_t kernelY = weights.GetShape()[2];
1329 const uint32_t inputX = inputInfo.GetShape()[widthIndex];
1330 const uint32_t inputY = inputInfo.GetShape()[heightIndex];
1331
1332 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1333 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1334 }
1335 else
1336 {
1337 return Fail("%s: Unsupported number of operation inputs", __func__);
1338 }
1339
1340 desc.m_BiasEnabled = true;
1341 armnn::Optional<armnn::TensorInfo> biases(bias.GetInfo());
1342
1343 if (!IsLayerSupportedForAnyBackend(__func__,
1344 armnn::IsDepthwiseConvolutionSupported,
1345 data.m_Backends,
1346 inputInfo,
1347 outputInfo,
1348 desc,
1349 weights.GetInfo(),
1350 biases))
1351 {
1352 return false;
1353 }
1354
1355 armnn::IConnectableLayer* startLayer =
1356 data.m_Network->AddDepthwiseConvolution2dLayer(desc, weights, armnn::Optional<armnn::ConstTensor>(bias));
1357 if (!startLayer)
1358 {
1359 return Fail("%s: AddDepthwiseConvolution2dLayer failed", __func__);
1360 }
1361
1362 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activation, startLayer, data);
1363 if (!endLayer)
1364 {
1365 return Fail("%s: ProcessActivation failed", __func__);
1366 }
1367
1368 input.Connect(startLayer->GetInputSlot(0));
1369
1370 return SetupAndTrackLayerOutputSlot<HalOperand>(operation, 0, *endLayer, model, data);
arovir01b0717b52018-09-05 17:03:25 +01001371}
1372
saoste01b8471482018-10-10 09:44:51 +01001373} // namespace armnn_driver