blob: bd2443e214035c3b599a30415b8c595349cd6a6b [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// See LICENSE file in the project root for full license information.
4//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "ModelToINetworkConverter.hpp"
9#include "OperationsUtils.h"
10
11#include <armnn/LayerSupport.hpp>
12#include <Permute.hpp>
13
14#include <log/log.h>
15#include <cassert>
16
17#include <boost/format.hpp>
18#include <boost/core/ignore_unused.hpp>
19#include <boost/test/tools/floating_point_comparison.hpp>
20#include <boost/cast.hpp>
21
surmeh0149b9e102018-05-17 14:11:25 +010022namespace armnn_driver
23{
24class LayerInputHandle
25{
26public:
27 LayerInputHandle()
28 : m_OutputSlot(nullptr)
29 , m_Valid(false)
30 {}
31
32 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo)
33 : m_OutputSlot(outputSlot)
34 , m_Valid(valid)
35 , m_TensorInfo(tensorInfo)
36 {}
37
38 bool IsValid() const { return m_Valid; }
39 void Connect(armnn::IInputSlot& inputSlot)
40 {
41 assert(IsValid());
42
43 if (m_OutputSlot)
44 {
45 m_OutputSlot->Connect(inputSlot);
46 }
47 }
48 const armnn::TensorInfo& GetTensorInfo() const { return m_TensorInfo; }
49
50private:
51 armnn::IOutputSlot* m_OutputSlot;
52 bool m_Valid;
53 armnn::TensorInfo m_TensorInfo;
54};
55} // armnn_driver
56
telsoa015307bc12018-03-09 13:51:08 +000057namespace
58{
59using namespace armnn_driver;
60using namespace android::nn;
61
62// Convenience function to log the reason for failing to convert a model.
63// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
64template<class... Args>
65static bool Fail(const char* formatStr, Args&&... args)
66{
67 ALOGD(formatStr, std::forward<Args>(args)...);
68 return false;
69}
70
71// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
72// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
73template<typename IsLayerSupportedFunc, typename ... Args>
74bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
75{
76 std::vector<char> unsupportedReason(1024+1);
77 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
78 if(isSupported)
79 {
80 return true;
81 }
82 else
83 {
84 std::string sUnsupportedReason(unsupportedReason.data());
85 if (sUnsupportedReason.size() > 0)
86 {
87 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
88 } else
89 {
90 ALOGD("%s: not supported by armnn", funcName);
91 }
92 return false;
93 }
94}
95
96armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
97{
98 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
99}
100
101inline bool IsOperandTypeSupportedForTensors(OperandType type)
102{
103 return type == OperandType::TENSOR_FLOAT32 ||
104 type == OperandType::TENSOR_QUANT8_ASYMM ||
105 type == OperandType::TENSOR_INT32;
106}
107
108void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
109 android::nn::PaddingScheme scheme)
110{
111 int32_t padHead;
112 int32_t padTail;
113 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
114 outPadHead = boost::numeric_cast<uint32_t>(padHead);
115 outPadTail = boost::numeric_cast<uint32_t>(padTail);
116}
117
118bool ValidateBroadcast(const Model& model, const Operation& operation, uint32_t numInputs)
119{
120 assert(operation.inputs.size() > 0); // This should have been validated by the caller
121 // validateModel() has been called already so we know the operation.inputs indexes are valid within model.operands.
122 const Operand& firstInput = model.operands[operation.inputs[0]];
123
124 // We don't support broadcasting yet - we require all input operands to have the same shape
125 for (uint32_t i = 1; i < numInputs; ++i)
126 {
127 const Operand& otherInput = model.operands[operation.inputs[i]];
128
129 if (firstInput.dimensions.size() != otherInput.dimensions.size())
130 {
131 return Fail("%s: Broadcasting not supported (Input 0 dims: %i Input %i dims: %i)",
132 __func__, firstInput.dimensions.size(), i, otherInput.dimensions.size());
133 }
134
135 for (unsigned int d = 0; d < firstInput.dimensions.size(); ++d)
136 {
137 if (firstInput.dimensions[d] != otherInput.dimensions[d])
138 {
139 return Fail("%s: Broadcasting not supported (Dimension %i size mismatch. "
140 "Input 0: %i Input %i: %i)",
141 __func__, d, firstInput.dimensions[d], i, otherInput.dimensions[d]);
142 }
143 }
144 }
145
146 return true;
147}
148
149Shape GetOperandShape(const Operand& operand)
150{
151 Shape shape;
152 shape.type = operand.type;
153 shape.dimensions = operand.dimensions;
154 shape.scale = operand.scale;
155 shape.offset = operand.zeroPoint;
156 return shape;
157}
158
159// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
160// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
161// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
162// (us, in this case) to ensure they match.
163void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
164 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
165{
166 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
167 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
168 {
169 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
170 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
171 {
172 ALOGW("Bias quantization scale has been modified to match input*weights");
173 biasInfo.SetQuantizationScale(expectedBiasScale);
174 }
175 }
176}
177
surmeh0149b9e102018-05-17 14:11:25 +0100178const armnn::PermutationVector IdentityPermutation({ 0U, 1U, 2U, 3U });
telsoa015307bc12018-03-09 13:51:08 +0000179const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
surmeh0149b9e102018-05-17 14:11:25 +0100180const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
181const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
telsoa015307bc12018-03-09 13:51:08 +0000182
183template <typename OSlot>
184armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
185 const armnn::PermutationVector& mappings)
186{
187 // Add swizzle layer
188 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
189
190 assert(layer != nullptr);
191
192 // Connect intput to swizzle layer
193 input.Connect(layer->GetInputSlot(0));
194
195 // Setup swizzled output
196 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
197 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
198
199 return *layer;
200}
201
202armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
203 armnn::IConnectableLayer& firstLayer,
204 armnn::IConnectableLayer& lastLayer)
205{
telsoa015307bc12018-03-09 13:51:08 +0000206 // Add swizzle layer
207 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
208
209 // Connect swizzled input to layer
210 swizzleLayer.GetOutputSlot(0).Connect(firstLayer.GetInputSlot(0));
211
212 // Add deswizzle layer
213 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, lastLayer.GetOutputSlot(0), ArmNNToNHWC);
214
215 return deswizzleLayer;
216}
217
218armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
219 armnn::IConnectableLayer& layer)
220{
221 return SwizzleInDeswizzleOut(network, input, layer, layer);
222}
surmeh0149b9e102018-05-17 14:11:25 +0100223
224bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
225 const armnn::TensorShape & outputShape,
226 uint32_t concatDim)
227{
228 // Validate the output shape is correct given the input shapes (which have just been validated)
229 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
230 if (outputShape.GetNumDimensions() != numDimensions)
231 {
232 return Fail("%s: Output shape has wrong number of dimensions", __func__);
233 }
234
235 unsigned int outputSizeAlongConcatenatedDimension = 0;
236 for (unsigned int i = 0; i < inputShapes.size(); i++)
237 {
238 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
239 }
240
241 for (unsigned int i = 0; i < numDimensions; ++i)
242 {
243 if (i == concatDim)
244 {
245 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
246 {
247 return Fail(
248 "%s: Invalid output shape for dimension %d (%d != %d)",
249 __func__,
250 i,
251 outputShape[i],
252 outputSizeAlongConcatenatedDimension);
253 }
254 }
255 else
256 {
257 if (outputShape[i] != inputShapes[0][i])
258 {
259 return Fail("%s: Invalid output shape", __func__);
260 }
261 }
262 }
263
264 return true;
265}
266
267void SwizzleInputs(armnn::INetwork& network,
268 std::vector<LayerInputHandle>& inputs,
269 std::vector<armnn::TensorShape>& inputShapes,
270 const armnn::PermutationVector& mapping)
271{
272 if (!mapping.IsEqual(IdentityPermutation))
273 {
274 size_t nInputs = inputs.size();
275 for (size_t i=0; i<nInputs; ++i)
276 {
277 // add swizzle layer
278 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
279 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
280 auto& outputInfo = outputSlot.GetTensorInfo();
281 // replace inputs with the swizzled ones
282 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
283 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
284 }
285 }
286}
287
telsoa015307bc12018-03-09 13:51:08 +0000288} // namespace
289
290namespace armnn_driver
291{
292
293class ConstTensorPin
294{
295public:
296 // Creates an invalid tensor pin (can be used to signal errors)
297 ConstTensorPin() {}
298
299 // @param tensorInfo TensorInfo associated with the tensor.
300 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
301 // the model being converted.
302 // @param numBytes Number of bytes for the tensor data.
303 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
304 const armnn::PermutationVector& mappings)
305 {
306 boost::ignore_unused(numBytes);
307 assert(tensorInfo.GetNumBytes() == numBytes);
308
309 const bool needsSwizzling = (mappings.GetSize() > 0);
310 if (needsSwizzling)
311 {
312 m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
313 SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
314
315 m_ConstTensor = armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, mappings), m_SwizzledTensorData.data());
316 }
317 else
318 {
319 m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
320 }
321 }
322
323 ConstTensorPin(const ConstTensorPin& other) = delete;
324 ConstTensorPin(ConstTensorPin&& other) = default;
325
326 bool IsValid() const { return m_ConstTensor.GetMemoryArea() != nullptr; }
327 const armnn::ConstTensor& GetConstTensor() const { return m_ConstTensor; }
328
329private:
330 armnn::ConstTensor m_ConstTensor;
331 // Owned memory for swizzled tensor data, only required if the tensor needed
332 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
333 // the pools associated with the model being converted.
334 std::vector<uint8_t> m_SwizzledTensorData;
335};
336
337ModelToINetworkConverter::ModelToINetworkConverter(armnn::Compute compute, const Model& model,
338 const std::set<unsigned int>& forcedUnsupportedOperations)
339 : m_Compute(compute)
340 , m_Model(model)
341 , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
342 , m_Network(nullptr, nullptr)
343 , m_ConversionResult(ConversionResult::Success)
344{
345 try
346 {
347 Convert();
348 }
349 catch (armnn::Exception& e)
350 {
351 m_ConversionResult = ConversionResult::UnsupportedFeature;
352 ALOGE("%s: Unexpected exception: %s", __func__, e.what());
353 assert(false);
354 }
355}
356
357void ModelToINetworkConverter::Convert()
358{
359 ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary(m_Model).c_str());
360
361 // map the memory pool into shared pointers
362 m_MemPools.clear();
363 if (!setRunTimePoolInfosFromHidlMemories(&m_MemPools, m_Model.pools))
364 {
365 Fail("%s: Setting of run time pool infos from Hidl Memories has failed.", __func__);
366 m_ConversionResult = ConversionResult::ErrorMappingPools;
367 return;
368 }
369
370 uint32_t totalPoolSize = 0;
371 for (auto&& pool : m_Model.pools)
372 {
373 totalPoolSize += pool.size();
374 }
375
376 // Create armnn::INetwork
377 m_Network = armnn::INetwork::Create();
378
379 // add operations to it
380 // track which layer outputs each operand
381 m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(m_Model.operands.size(), nullptr);
382
383 try
384 {
385 for (uint32_t i = 0; i < m_Model.inputIndexes.size(); i++)
386 {
387 // inputs in android nn are represented by operands
388 uint32_t inputIndex = m_Model.inputIndexes[i];
389 const Operand& operand = m_Model.operands[inputIndex];
390 const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
391 armnn::IConnectableLayer* layer = m_Network->AddInputLayer(i);
392
393 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
394 outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand));
395
396 // store for later layers
397 m_OutputSlotForOperand[inputIndex] = &outputSlot;
398 }
399 }
400 catch (UnsupportedOperand& e)
401 {
402 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
403 m_ConversionResult = ConversionResult::UnsupportedFeature;
404 }
405 catch (const armnn::InvalidArgumentException& e)
406 {
407 Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what());
408 m_ConversionResult = ConversionResult::UnsupportedFeature;
409 }
410
411 for (uint32_t operationIdx = 0; operationIdx < m_Model.operations.size(); operationIdx++)
412 {
413 const auto& operation = m_Model.operations[operationIdx];
414
415 bool ok = true;
416 if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end())
417 {
418 Fail("%s: Operation at index %i has been forced to be unsupported.", __func__, operationIdx);
419 ok = false;
420 }
421
422 if (ok)
423 {
424 try
425 {
426 ok = ConvertOperation(operation);
427 }
428 catch (UnsupportedOperand& e)
429 {
430 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
431 ok = false;
432 }
433 catch (const armnn::InvalidArgumentException& e)
434 {
435 Fail("%s: Failed to convert operation in %s", __func__, e.what());
436 ok = false;
437 }
438 }
439
440 // Store whether this operation was successfully converted.
441 m_OperationSupported.emplace(operationIdx, ok);
442
443 // Any single operation failing will fail the entire conversion.
444 // We still need to continue and check the other ones.
445 if (!ok)
446 {
447 m_ConversionResult = ConversionResult::UnsupportedFeature;
448 }
449 }
450 try
451 {
452 if (m_ConversionResult == ConversionResult::Success)
453 {
454 for (uint32_t i = 0; i < m_Model.outputIndexes.size(); i++)
455 {
456 // outputs in android nn are represented by operands
457 uint32_t outputIndex = m_Model.outputIndexes[i];
458 const Operand& operand = m_Model.operands[outputIndex];
459 const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
460 armnn::IConnectableLayer* layer = m_Network->AddOutputLayer(i);
461
462 assert(m_OutputSlotForOperand[outputIndex]);
463 m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
464 }
465 }
466 }
467 catch (const armnn::InvalidArgumentException& e)
468 {
469 Fail("%s: Failed to convert output operand to TensorShape: %s", __func__, e.what());
470 m_ConversionResult = ConversionResult::UnsupportedFeature;
471 }
472}
473
474bool ModelToINetworkConverter::ConvertOperation(const Operation& operation)
475{
476 switch (operation.type)
477 {
478 case OperationType::ADD: return ConvertAdd(operation);
479 case OperationType::AVERAGE_POOL_2D: return ConvertAveragePool2d(operation);
480 case OperationType::CONCATENATION: return ConvertConcatenation(operation);
481 case OperationType::CONV_2D: return ConvertConv2d(operation);
482 case OperationType::DEPTHWISE_CONV_2D: return ConvertDepthwiseConv2d(operation);
483 case OperationType::FLOOR: return ConvertFloor(operation);
484 case OperationType::FULLY_CONNECTED: return ConvertFullyConnected(operation);
485 case OperationType::LOCAL_RESPONSE_NORMALIZATION: return ConvertLocalResponseNormalization(operation);
486 case OperationType::LOGISTIC: return ConvertLogistic(operation);
487 case OperationType::L2_NORMALIZATION: return ConvertL2Normalization(operation);
488 case OperationType::L2_POOL_2D: return ConvertL2Pool2d(operation);
489 case OperationType::MAX_POOL_2D: return ConvertMaxPool2d(operation);
490 case OperationType::MUL: return ConvertMul(operation);
491 case OperationType::RELU: return ConvertReLu(operation);
492 case OperationType::RELU1: return ConvertReLu1(operation);
493 case OperationType::RELU6: return ConvertReLu6(operation);
494 case OperationType::SOFTMAX: return ConvertSoftmax(operation);
495 case OperationType::TANH: return ConvertTanH(operation);
496 case OperationType::RESHAPE: return ConvertReshape(operation);
497 case OperationType::RESIZE_BILINEAR: return ConvertResizeBilinear(operation);
498 default: return Fail("%s: Operation type %s not supported in ArmnnDriver",
499 __func__, toString(operation.type).c_str());
500 }
501}
502
telsoa015307bc12018-03-09 13:51:08 +0000503
504bool ModelToINetworkConverter::ConvertAdd(const Operation& operation)
505{
506 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
507 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
508
509 if (!input0.IsValid() || !input1.IsValid())
510 {
511 return Fail("%s: Operation has invalid inputs", __func__);
512 }
513
514 ActivationFn activationFunction;
515 if (!GetInputActivationFunction(operation, 2, activationFunction))
516 {
517 return Fail("%s: Operation has invalid inputs", __func__);
518 }
519
520 const Operand* outputOperand = GetOutputOperand(operation, 0);
521 if (!outputOperand)
522 {
523 return false;
524 }
525
526 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
527
528 if (!IsLayerSupported(__func__,
529 armnn::IsAdditionSupported,
530 m_Compute,
531 input0.GetTensorInfo(),
532 input1.GetTensorInfo(),
533 outInfo))
534 {
535 return false;
536 }
537
538 armnn::IConnectableLayer* const startLayer = m_Network->AddAdditionLayer();
539 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
540
541 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
542 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
543
544 if (endLayer != nullptr)
545 {
546 // If the number of dimensions do not match then we need to add degenerate dimensions
547 // to the "smaller" tensor using a reshape:
548 // Small Big
549 // | |
550 // Reshape |
551 // \ /
552 // Add
553 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
554 {
555 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
556
557 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
558 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
559
560 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
561 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
562
563 std::vector<unsigned int> reshapedDims(bigTensorDims.GetNumDimensions(), 1);
564 unsigned int sizeDifference = bigTensorDims.GetNumDimensions() - smallTensorDims.GetNumDimensions();
565 for (unsigned i = sizeDifference; i < bigTensorDims.GetNumDimensions(); ++i)
566 {
567 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
568 }
569 armnn::TensorInfo reshapedInfo = smallTensorDims;
570 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
571 reshapedDims.data() });
572
573 armnn::ReshapeDescriptor reshapeDesc;
574 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
575 armnn::IConnectableLayer* const reshapeLayer = m_Network->AddReshapeLayer(reshapeDesc);
576 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
577 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
578
579 // Connect the outputs from new reshape and original input layer
580 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
581 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
582 }
583 else
584 {
585 input0.Connect(startLayer->GetInputSlot(0));
586 input1.Connect(startLayer->GetInputSlot(1));
587 }
588
589 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
590 }
591 else
592 {
593 return Fail("%s: ProcessActivation failed", __func__);
594 }
595}
596
597bool ModelToINetworkConverter::ConvertAveragePool2d(const Operation& operation)
598{
599 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average);
600}
601
602bool ModelToINetworkConverter::ConvertConcatenation(const Operation& operation)
603{
604 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
605 if (operation.inputs.size() <= 1)
606 {
607 return Fail("%s: Operation has insufficient arguments", __func__);
608 }
609
610 // Get inputs and outputs
611 const std::size_t numInputTensors = operation.inputs.size() - 1;
612
surmeh0149b9e102018-05-17 14:11:25 +0100613 int32_t concatDim;
614 if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim))
615 {
616 return Fail("%s: Operation has invalid inputs", __func__);
617 }
618
619 const Operand* const outputOperand = GetOutputOperand(operation, 0);
620 if (!outputOperand)
621 {
622 return Fail("%s: Operation has no outputs", __func__);
623 }
624
625 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
626 armnn::TensorShape outputShape = outputInfo.GetShape();
627
628 //
629 // handle negative concat dims along the lines of tensorflow as described here:
630 // https://www.tensorflow.org/api_docs/python/tf/concat
631 // "negative axis refers to axis + rank(values)-th dimension"
632 //
633 if (concatDim < 0)
634 {
635 concatDim += outputShape.GetNumDimensions();
636 }
637
638 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
639 {
640 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
641 }
642
643 // ArmNN uses Compute Library subtensors to perform concatenation
644 // This only works when concatenating along dimension 0 or 1 for a 4-D tensor,
645 // or along dimension 0 for a 3-D tensor.
646 const armnn::PermutationVector* permuteVectorIn = &IdentityPermutation;
647 const armnn::PermutationVector* permuteVectorOut = &IdentityPermutation;
648
649 assert(permuteVectorOut != nullptr);
650
651 if (outputShape.GetNumDimensions() == 4) {
652 if (concatDim == 3) {
653 concatDim = 1;
654 permuteVectorIn = &NHWCToArmNN;
655 permuteVectorOut = &ArmNNToNHWC;
656 outputShape = armnnUtils::Permuted(outputShape, *permuteVectorIn);
657 outputInfo.SetShape(outputShape);
658 } else if (concatDim == 2) {
659 concatDim = 1;
660 permuteVectorIn = &SwapDim1And2;
661 permuteVectorOut = &SwapDim1And2;
662 outputShape = armnnUtils::Permuted(outputShape, *permuteVectorIn);
663 outputInfo.SetShape(outputShape);
664 }
665 }
666 else if (!(outputShape.GetNumDimensions() == 3 && concatDim == 0))
667 {
668 // Operation unsupported
669 return false;
670 }
671
telsoa015307bc12018-03-09 13:51:08 +0000672 std::vector<LayerInputHandle> inputHandles;
673 std::vector<armnn::TensorShape> inputShapes;
674
675 inputHandles.reserve(numInputTensors);
676 inputShapes.reserve(numInputTensors);
677
678 for (uint32_t i = 0; i < numInputTensors; ++i)
679 {
680 const Operand* const operand = GetInputOperand(operation, i);
681 if (!operand)
682 {
683 return Fail("%s: Operation has invalid inputs", __func__);
684 }
685
686 inputShapes.emplace_back(GetTensorShapeForOperand(*operand));
687 inputHandles.emplace_back(ConvertToLayerInputHandle(operation, i));
surmeh0149b9e102018-05-17 14:11:25 +0100688
689
telsoa015307bc12018-03-09 13:51:08 +0000690 if (!inputHandles.back().IsValid())
691 {
692 return Fail("%s: Operation has invalid inputs", __func__);
693 }
694 }
695
696 assert(inputShapes.size() == inputHandles.size());
697
surmeh0149b9e102018-05-17 14:11:25 +0100698 // this is no-op for identity swizzles, otherwise it replaces both
699 // the handles and shapes with the swizzled layer output handles and shapes
700 SwizzleInputs(*m_Network, inputHandles, inputShapes, *permuteVectorIn);
telsoa015307bc12018-03-09 13:51:08 +0000701
702 // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
703 armnn::OriginsDescriptor mergerDescriptor;
704 try
705 {
surmeh0149b9e102018-05-17 14:11:25 +0100706 // The merger descriptor is always created across the only supported concat
707 // dimension, which is 0 or 1
708 mergerDescriptor =
709 armnn::CreateMergerDescriptorForConcatenation(
710 inputShapes.begin(), inputShapes.end(), concatDim);
telsoa015307bc12018-03-09 13:51:08 +0000711 }
712 catch (const armnn::Exception& error)
713 {
714 return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
715 }
716
surmeh0149b9e102018-05-17 14:11:25 +0100717 // Validate the output shape is correct given the input shapes based on the
718 // only valid concat dimension which is 0 or 1
719 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
telsoa015307bc12018-03-09 13:51:08 +0000720 {
surmeh0149b9e102018-05-17 14:11:25 +0100721 return Fail("%s: Error validating the output shape for concat", __func__);
telsoa015307bc12018-03-09 13:51:08 +0000722 }
723
724 std::vector<const armnn::TensorInfo*> inputTensorInfos;
725 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
726 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
727 if (!IsLayerSupported(__func__,
728 armnn::IsMergerSupported,
729 m_Compute,
730 inputTensorInfos,
731 mergerDescriptor))
732 {
733 return false;
734 }
735
736 armnn::IConnectableLayer* layer = m_Network->AddMergerLayer(mergerDescriptor);
737 assert(layer != nullptr);
surmeh0149b9e102018-05-17 14:11:25 +0100738 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
telsoa015307bc12018-03-09 13:51:08 +0000739
740 // Connect inputs to the layer
741 const int numInputSlots = layer->GetNumInputSlots();
742 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
743 for (int i = 0; i < numInputSlots; ++i)
744 {
surmeh0149b9e102018-05-17 14:11:25 +0100745 // connect the input directly to the merge (concat) layer
telsoa015307bc12018-03-09 13:51:08 +0000746 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
747 }
748
surmeh0149b9e102018-05-17 14:11:25 +0100749 if (permuteVectorOut != &IdentityPermutation)
750 {
751 // Add permutation layer and connect the output to it, the permutation becomes the output layer
752 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*m_Network,
753 layer->GetOutputSlot(0),
754 *permuteVectorOut);
755 layer = &deswizzleLayer;
756 }
757
telsoa015307bc12018-03-09 13:51:08 +0000758 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
759}
760
761bool ModelToINetworkConverter::ConvertConv2d(const Operation& operation)
762{
763 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
764 if (!input.IsValid())
765 {
766 return Fail("%s: Operation has invalid inputs", __func__);
767 }
768
769 const Operand* output = GetOutputOperand(operation, 0);
770 if (!output)
771 {
772 return Fail("%s: Could not read output 0", __func__);
773 }
774
775 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
776 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
777
778 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
779 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
780
781 // ArmNN does not currently support non-fixed weights or bias
782 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, NHWCToArmNN);
783 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2);
784
785 if (!weightsPin.IsValid() || !biasPin.IsValid())
786 {
787 return Fail("%s: Operation has invalid inputs", __func__);
788 }
789
790 armnn::ConstTensor weights = weightsPin.GetConstTensor();
791 armnn::ConstTensor bias = biasPin.GetConstTensor();
792 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
793
794 armnn::Convolution2dDescriptor desc;
795 ActivationFn activation;
796
797 if (operation.inputs.size() == 10)
798 {
799 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft) ||
800 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight) ||
801 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop) ||
802 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom) ||
803 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX) ||
804 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY) ||
805 !GetInputActivationFunction(operation, 9, activation))
806 {
807 return Fail("%s: Operation has invalid inputs", __func__);
808 }
809 }
810 else if (operation.inputs.size() == 7)
811 {
812 android::nn::PaddingScheme paddingScheme;
813
814 if (!GetInputPaddingScheme(operation, 3, paddingScheme) ||
815 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX) ||
816 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY) ||
817 !GetInputActivationFunction(operation, 6, activation))
818 {
819 return Fail("%s: Operation has invalid inputs", __func__);
820 }
821
822 const uint32_t kernelX = weights.GetShape()[3];
823 const uint32_t kernelY = weights.GetShape()[2];
824 const uint32_t inputX = swizzledInputInfo.GetShape()[3];
825 const uint32_t inputY = swizzledInputInfo.GetShape()[2];
826
827 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
828 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
829 }
830 else
831 {
832 return Fail("%s: Unsupported number of operation inputs", __func__);
833 }
834
835 desc.m_BiasEnabled = true;
836
837 if (!IsLayerSupported(__func__,
838 armnn::IsConvolution2dSupported,
839 m_Compute,
840 swizzledInputInfo,
surmeh0149b9e102018-05-17 14:11:25 +0100841 swizzledOutputInfo,
telsoa015307bc12018-03-09 13:51:08 +0000842 desc,
surmeh0149b9e102018-05-17 14:11:25 +0100843 weights.GetInfo(),
844 bias.GetInfo()))
telsoa015307bc12018-03-09 13:51:08 +0000845 {
846 return false;
847 }
848
849 armnn::IConnectableLayer* startLayer = m_Network->AddConvolution2dLayer(desc, weights, bias);
850 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
851
852 if (endLayer != nullptr)
853 {
854 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
855 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
856 }
857 else
858 {
859 return Fail("%s: ProcessActivation failed", __func__);
860 }
861}
862
863bool ModelToINetworkConverter::ConvertDepthwiseConv2d(const Operation& operation)
864{
865 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
866 if (!input.IsValid())
867 {
868 return Fail("%s: Operation has invalid inputs", __func__);
869 }
870
871 const Operand* output = GetOutputOperand(operation, 0);
872 if (!output)
873 {
874 return Fail("%s: Could not read output 0", __func__);
875 }
876
877 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
878 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
879
880 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
881 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
882
883 // ArmNN does not currently support non-fixed weights or bias
884
885 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
886 // but in ArmNN it needs to be [ M, I, H, W ]
887 const Operand* weightsOperand = GetInputOperand(operation, 1);
888
889 if (weightsOperand == nullptr)
890 {
891 return Fail("%s: Operand is invalid", __func__);
892 }
893
894 // Reinterpret weight data as [ H, W, I, M ]
895 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1], weightsOperand->dimensions[2],
896 inputInfo.GetShape()[3],
897 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
898
899 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
900 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
901 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, HWIMToMIHW, &weightsShape);
902
903 // Bias is a 1D tensor
904 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2);
905
906 if (!weightsPin.IsValid() || !biasPin.IsValid())
907 {
908 return Fail("%s: Operation has invalid inputs", __func__);
909 }
910
911 armnn::ConstTensor weights = weightsPin.GetConstTensor();
912 armnn::ConstTensor bias = biasPin.GetConstTensor();
913 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
914
915 armnn::DepthwiseConvolution2dDescriptor desc;
916 ActivationFn activation;
917
918 if (operation.inputs.size() == 11)
919 {
920 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft) ||
921 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight) ||
922 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop) ||
923 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom) ||
924 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX) ||
925 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY) ||
926 !GetInputActivationFunction(operation, 10, activation))
927 {
928 return Fail("%s: Operation has invalid inputs", __func__);
929 }
930 }
931 else if (operation.inputs.size() == 8)
932 {
933 android::nn::PaddingScheme paddingScheme;
934
935 if (!GetInputPaddingScheme(operation, 3, paddingScheme) ||
936 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX) ||
937 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY) ||
938 !GetInputActivationFunction(operation, 7, activation))
939 {
940 return Fail("%s: Operation has invalid inputs", __func__);
941 }
942
943 const uint32_t kernelX = weights.GetShape()[3];
944 const uint32_t kernelY = weights.GetShape()[2];
945 const uint32_t inputX = swizzledInputInfo.GetShape()[3];
946 const uint32_t inputY = swizzledInputInfo.GetShape()[2];
947
948 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
949 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
950 }
951 else
952 {
953 return Fail("%s: Unsupported number of operation inputs", __func__);
954 }
955
956 desc.m_BiasEnabled = true;
957
958 if (!IsLayerSupported(__func__,
959 armnn::IsDepthwiseConvolutionSupported,
960 m_Compute,
961 swizzledInputInfo,
962 desc,
963 weights.GetInfo()))
964 {
965 return false;
966 }
967
968 armnn::IConnectableLayer* startLayer = m_Network->AddDepthwiseConvolution2dLayer(desc, weights, bias);
969 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
970
971 if (endLayer != nullptr)
972 {
973 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
974 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
975 }
976 else
977 {
978 return Fail("%s: ProcessActivation failed", __func__);
979 }
980}
981
982bool ModelToINetworkConverter::ConvertFloor(const Operation& operation)
983{
984 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
985 if (!input.IsValid())
986 {
987 return Fail("%s: Operation has invalid inputs", __func__);
988 }
989
990 const Operand* const outputOperand = GetOutputOperand(operation, 0);
991 if (!outputOperand)
992 {
993 return Fail("%s: Operation has invalid outputs", __func__);
994 }
995
996 if (!IsLayerSupported(__func__,
997 armnn::IsFloorSupported,
998 m_Compute,
999 input.GetTensorInfo(),
1000 GetTensorInfoForOperand(*outputOperand)))
1001 {
1002 return false;
1003 }
1004
1005 armnn::IConnectableLayer* layer = m_Network->AddFloorLayer();
1006 assert(layer != nullptr);
1007 input.Connect(layer->GetInputSlot(0));
1008
1009 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1010}
1011
1012bool ModelToINetworkConverter::ConvertFullyConnected(const Operation& operation)
1013{
1014 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1015 if (!input.IsValid())
1016 {
1017 return Fail("%s: Operation has invalid inputs", __func__);
1018 }
1019
1020 const Operand* output = GetOutputOperand(operation, 0);
1021 if (!output)
1022 {
1023 return Fail("%s: Could not read output 0", __func__);
1024 }
1025
1026 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1027 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1028
1029 armnn::TensorInfo reshapedInfo = inputInfo;
1030
1031 if (inputInfo.GetNumDimensions() > 2U)
1032 {
1033 unsigned int dim1 = inputInfo.GetShape()[1];
1034 for (unsigned int i = 2U; i < inputInfo.GetNumDimensions(); ++i)
1035 {
1036 dim1 *= inputInfo.GetShape()[i];
1037 }
1038 reshapedInfo.SetShape(armnn::TensorShape({inputInfo.GetShape()[0], dim1}));
1039 }
1040
1041 // ArmNN does not currently support non-fixed weights or bias
1042 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1); // 2D
1043 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2); // 1D
1044
1045 if (!weightsPin.IsValid() || !biasPin.IsValid())
1046 {
1047 return Fail("%s: Operation has invalid inputs", __func__);
1048 }
1049
1050 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
1051 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1052 armnn::ConstTensor bias = biasPin.GetConstTensor();
1053 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
1054
1055 ActivationFn activationFunction;
1056 if (!GetInputActivationFunction(operation, 3, activationFunction))
1057 {
1058 return Fail("%s: Operation has invalid inputs", __func__);
1059 }
1060
1061 armnn::FullyConnectedDescriptor desc;
1062 desc.m_TransposeWeightMatrix = true;
1063 desc.m_BiasEnabled = true;
1064
1065 if (!IsLayerSupported(__func__,
1066 armnn::IsFullyConnectedSupported,
1067 m_Compute,
1068 reshapedInfo,
1069 desc))
1070 {
1071 return false;
1072 }
1073
1074 armnn::IConnectableLayer* startLayer = m_Network->AddFullyConnectedLayer(desc, weights, bias);
1075 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer);
1076
1077 if (endLayer != nullptr)
1078 {
1079 if (inputInfo.GetNumDimensions() > 2U)
1080 {
1081 armnn::ReshapeDescriptor reshapeDescriptor;
1082 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
1083
1084 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor);
1085 assert(reshapeLayer != nullptr);
1086 input.Connect(reshapeLayer->GetInputSlot(0));
1087 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
1088 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
1089 }
1090 else
1091 {
1092 input.Connect(startLayer->GetInputSlot(0));
1093 }
1094
1095 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
1096 }
1097 else
1098 {
1099 return Fail("%s: ProcessActivation failed", __func__);
1100 }
1101}
1102
1103bool ModelToINetworkConverter::ConvertLocalResponseNormalization(const Operation& operation)
1104{
1105 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1106 if (!input.IsValid())
1107 {
1108 return Fail("%s: Operation has invalid inputs", __func__);
1109 }
1110
1111 const Operand* output = GetOutputOperand(operation, 0);
1112 if (!output)
1113 {
1114 return Fail("%s: Could not read output 0", __func__);
1115 }
1116
1117 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1118 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1119
1120 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1121 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1122
1123 armnn::NormalizationDescriptor descriptor;
1124
1125 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
1126 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1127
1128 if (!input.IsValid() ||
1129 !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize) ||
1130 !GetInputFloat32(operation, 2, descriptor.m_K) ||
1131 !GetInputFloat32(operation, 3, descriptor.m_Alpha) ||
1132 !GetInputFloat32(operation, 4, descriptor.m_Beta))
1133 {
1134 return Fail("%s: Operation has invalid inputs", __func__);
1135 }
1136
1137 // ArmNN expects normSize to be the full size of the normalization
1138 // window rather than the radius as in AndroidNN.
1139 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
1140
1141 if (!IsLayerSupported(__func__,
1142 armnn::IsNormalizationSupported,
1143 m_Compute,
1144 swizzledInputInfo,
1145 swizzledOutputInfo,
1146 descriptor))
1147 {
1148 return false;
1149 }
1150
1151
1152 armnn::IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor);
1153 assert(layer != nullptr);
1154 layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
1155
1156 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
1157
1158 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1159}
1160
1161bool ModelToINetworkConverter::ConvertLogistic(const Operation& operation)
1162{
1163 armnn::ActivationDescriptor desc;
surmeh0149b9e102018-05-17 14:11:25 +01001164 desc.m_Function = armnn::ActivationFunction::Sigmoid;
telsoa015307bc12018-03-09 13:51:08 +00001165
1166 return ConvertToActivation(operation, __func__, desc);
1167}
1168
1169bool ModelToINetworkConverter::ConvertL2Normalization(const Operation& operation)
1170{
1171 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1172 if (!input.IsValid())
1173 {
1174 return Fail("%s: Operation has invalid inputs", __func__);
1175 }
1176
1177 const Operand* output = GetOutputOperand(operation, 0);
1178 if (!output)
1179 {
1180 return Fail("%s: Could not read output 0", __func__);
1181 }
1182
1183 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1184 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1185
1186 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1187 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1188
1189 if (!IsLayerSupported(__func__,
1190 armnn::IsL2NormalizationSupported,
1191 m_Compute,
1192 swizzledInputInfo))
1193 {
1194 return false;
1195 }
1196
1197 armnn::IConnectableLayer* layer = m_Network->AddL2NormalizationLayer();
1198 assert(layer != nullptr);
1199 layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
1200
1201 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
1202
1203 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1204}
1205
1206bool ModelToINetworkConverter::ConvertL2Pool2d(const Operation& operation)
1207{
1208 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2);
1209}
1210
1211bool ModelToINetworkConverter::ConvertMaxPool2d(const Operation& operation)
1212{
1213 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max);
1214}
1215
1216bool ModelToINetworkConverter::ConvertMul(const Operation& operation)
1217{
1218 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
1219 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
1220
1221 if (!input0.IsValid() || !input1.IsValid())
1222 {
1223 return Fail("%s: Operation has invalid inputs", __func__);
1224 }
1225
1226 ActivationFn activationFunction;
1227 if (!GetInputActivationFunction(operation, 2, activationFunction))
1228 {
1229 return Fail("%s: Operation has invalid inputs", __func__);
1230 }
1231
1232 if (!ValidateBroadcast(m_Model, operation, 2u))
1233 {
1234 return Fail("%s is invalid due to broadcasting", __func__);
1235 }
1236
1237 if (!IsLayerSupported(__func__,
1238 armnn::IsMultiplicationSupported,
1239 m_Compute,
1240 input0.GetTensorInfo(),
1241 input1.GetTensorInfo()))
1242 {
1243 return false;
1244 }
1245
1246 const Operand* outputOperand = GetOutputOperand(operation, 0);
1247
1248 if (outputOperand == nullptr)
1249 {
1250 return false;
1251 }
1252
1253 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1254
1255 armnn::IConnectableLayer* const startLayer = m_Network->AddMultiplicationLayer();
1256 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
1257
1258 if (endLayer != nullptr)
1259 {
1260 input0.Connect(startLayer->GetInputSlot(0));
1261 input1.Connect(startLayer->GetInputSlot(1));
1262
1263 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
1264 }
1265 else
1266 {
1267 return Fail("%s: ProcessActivation failed", __func__);
1268 }
1269}
1270
1271bool ModelToINetworkConverter::ConvertReLu(const Operation& operation)
1272{
1273 armnn::ActivationDescriptor desc;
1274 desc.m_Function = armnn::ActivationFunction::ReLu;
1275
1276 return ConvertToActivation(operation, __func__, desc);
1277}
1278
1279bool ModelToINetworkConverter::ConvertReLu1(const Operation& operation)
1280{
1281 armnn::ActivationDescriptor desc;
1282 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1283 desc.m_A = 1.0f;
1284 desc.m_B = -1.0f;
1285
1286 return ConvertToActivation(operation, __func__, desc);
1287}
1288
1289bool ModelToINetworkConverter::ConvertReLu6(const Operation& operation)
1290{
1291 armnn::ActivationDescriptor desc;
1292 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1293 desc.m_A = 6.0f;
1294
1295 return ConvertToActivation(operation, __func__, desc);
1296}
1297
1298bool ModelToINetworkConverter::ConvertSoftmax(const Operation& operation)
1299{
1300 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1301 if (!input.IsValid())
1302 {
1303 return Fail("%s: Operation has invalid inputs", __func__);
1304 }
1305
1306 armnn::SoftmaxDescriptor desc;
1307 if (!GetInputFloat32(operation, 1, desc.m_Beta))
1308 {
1309 return Fail("%s: Operation has invalid inputs", __func__);
1310 }
1311
1312 if (!IsLayerSupported(__func__,
1313 armnn::IsSoftmaxSupported,
1314 m_Compute,
1315 input.GetTensorInfo(),
1316 desc))
1317 {
1318 return false;
1319 }
1320
1321 armnn::IConnectableLayer* layer = m_Network->AddSoftmaxLayer(desc);
1322 assert(layer != nullptr);
1323 input.Connect(layer->GetInputSlot(0));
1324
1325 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1326}
1327
1328bool ModelToINetworkConverter::ConvertTanH(const Operation& operation)
1329{
1330 armnn::ActivationDescriptor desc;
1331 desc.m_Function = armnn::ActivationFunction::TanH;
1332 desc.m_A = 1.0f; // android nn does not support tanH parameters
1333 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1334
1335 return ConvertToActivation(operation, __func__, desc);
1336}
1337
1338bool ModelToINetworkConverter::ConvertReshape(const Operation& operation)
1339{
1340 const Operand* inputOperand = GetInputOperand(operation, 0);
1341 const Operand* requestedShapeOperand = GetInputOperand(operation, 1);
1342 const Operand* outputOperand = GetOutputOperand(operation, 0);
1343
1344 if (inputOperand == nullptr
1345 || requestedShapeOperand == nullptr
1346 || outputOperand == nullptr)
1347 {
1348 return Fail("%s: Operation has invalid inputs", __func__);
1349 }
1350
1351
1352 if (requestedShapeOperand->dimensions.size() != 1)
1353 {
1354 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1355 __func__, requestedShapeOperand->dimensions.size());
1356 }
1357
1358 std::vector<int32_t> targetDimensions;
1359 if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions))
1360 {
1361 return Fail("%s: Could not read values of input 1", __func__);
1362 }
1363
1364 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1365
1366 Shape requestedShape;
1367 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1368 // function that resolves these values into a fully specified tensor shape.
1369 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1370 {
1371 return Fail("%s: Failed to resolve the requested shape", __func__);
1372 }
1373
1374 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1375 if (!SameShape(requestedShape, outputOperandShape))
1376 {
1377 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1378 }
1379
1380 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1381 if (!input.IsValid())
1382 {
1383 return Fail("%s: Could not read input 0", __func__);
1384 }
1385
1386 if (!IsLayerSupported(__func__,
1387 armnn::IsReshapeSupported,
1388 m_Compute,
1389 input.GetTensorInfo()))
1390 {
1391 return false;
1392 }
1393
1394
1395 armnn::ReshapeDescriptor reshapeDescriptor;
1396 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1397 requestedShape.dimensions.data());
1398
1399 armnn::IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDescriptor);
1400 assert(layer != nullptr);
1401 input.Connect(layer->GetInputSlot(0));
1402
1403 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1404}
1405
1406bool ModelToINetworkConverter::ConvertResizeBilinear(const Operation& operation)
1407{
1408 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1409 if (!input.IsValid())
1410 {
1411 return Fail("%s: Could not read input 0", __func__);
1412 }
1413
1414 const Operand* output = GetOutputOperand(operation, 0);
1415 if (!output)
1416 {
1417 return Fail("%s: Could not read output 0", __func__);
1418 }
1419
1420 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1421 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1422
1423 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1424 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1425
1426 if (!IsLayerSupported(__func__,
1427 armnn::IsResizeBilinearSupported,
1428 m_Compute,
1429 swizzledInputInfo))
1430 {
1431 return false;
1432 }
1433
1434 armnn::ResizeBilinearDescriptor desc;
1435
1436 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight)
1437 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth))
1438 {
1439 return Fail("%s: Operation has invalid inputs", __func__);
1440 }
1441
1442 armnn::IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc);
1443 assert(layer != nullptr);
1444 layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
1445
1446 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
1447
1448 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1449
1450}
1451
1452bool ModelToINetworkConverter::ConvertToActivation(const Operation& operation,
1453 const char* operationName,
1454 const armnn::ActivationDescriptor& activationDesc)
1455{
1456 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1457 if (!input.IsValid())
1458 {
1459 return Fail("%s: Input 0 is invalid", operationName);
1460 }
1461
1462 if (!IsLayerSupported(__func__,
1463 armnn::IsActivationSupported,
1464 m_Compute,
1465 input.GetTensorInfo(),
1466 activationDesc))
1467 {
1468 return false;
1469 }
1470
1471 armnn::IConnectableLayer* layer = m_Network->AddActivationLayer(activationDesc);
1472 assert(layer != nullptr);
1473 input.Connect(layer->GetInputSlot(0));
1474
1475 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1476}
1477
1478bool ModelToINetworkConverter::ConvertPooling2d(const Operation& operation,
1479 const char* operationName,
1480 armnn::PoolingAlgorithm poolType)
1481{
1482 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1483 if (!input.IsValid())
1484 {
1485 return Fail("%s: Could not read input 0", operationName);
1486 }
1487
1488 const Operand* output = GetOutputOperand(operation, 0);
1489 if (!output)
1490 {
1491 return Fail("%s: Could not read output 0", __func__);
1492 }
1493
1494 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1495 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1496
1497 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1498 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1499
1500 armnn::Pooling2dDescriptor desc;
1501 desc.m_PoolType = poolType;
1502 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
1503
1504 ActivationFn activation;
1505
1506 if (operation.inputs.size() == 7)
1507 {
1508 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
1509 android::nn::PaddingScheme scheme;
1510
1511 if ( !GetInputPaddingScheme(operation, 1, scheme)
1512 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX)
1513 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY)
1514 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth)
1515 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight)
1516 || !GetInputActivationFunction(operation, 6, activation))
1517 {
1518 return Fail("%s: Operation has invalid inputs", operationName);
1519 }
1520
1521 const unsigned int inputWidth = swizzledInputInfo.GetShape()[3];
1522 const unsigned int inputHeight = swizzledInputInfo.GetShape()[2];
1523
1524 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
1525 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
1526 }
1527 else
1528 {
1529 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
1530 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft)
1531 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight)
1532 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop)
1533 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom)
1534 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX)
1535 || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY)
1536 || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth)
1537 || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight)
1538 || !GetInputActivationFunction(operation, 9, activation))
1539 {
1540 return Fail("%s: Operation has invalid inputs", operationName);
1541 }
1542 }
1543
1544 // ArmNN does not accept a pool size of 1, but the ArmNN driver is expected to cope.
1545 // This is mapped to a trivial splitter instead.
1546 armnn::IConnectableLayer* startLayer = nullptr;
1547 if (desc.m_PoolWidth != 1 || desc.m_PoolHeight != 1)
1548 {
1549 if (!IsLayerSupported(__func__,
1550 armnn::IsPooling2dSupported,
1551 m_Compute,
1552 swizzledInputInfo,
1553 swizzledOutputInfo,
1554 desc))
1555 {
1556 return false;
1557 }
1558
1559 startLayer = m_Network->AddPooling2dLayer(desc);
1560 }
1561 else
1562 {
1563 const unsigned int numDims = swizzledOutputInfo.GetNumDimensions();
1564
1565 armnn::ViewsDescriptor viewsDesc(1, numDims);
1566
1567 for (unsigned int i = 0; i < numDims; ++i)
1568 {
1569 viewsDesc.SetViewOriginCoord(0, i, 0);
1570 viewsDesc.SetViewSize(0, i, swizzledOutputInfo.GetShape()[i]);
1571 }
1572
1573 if (!IsLayerSupported(__func__,
1574 armnn::IsSplitterSupported,
1575 m_Compute,
1576 swizzledInputInfo,
1577 viewsDesc))
1578 {
1579 return false;
1580 }
1581
1582 startLayer = m_Network->AddSplitterLayer(viewsDesc);
1583 }
1584
1585 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
1586
1587 if (endLayer != nullptr)
1588 {
1589 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
1590 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1591 }
1592 else
1593 {
1594 return Fail("%s: ProcessActivation failed", operationName);
1595 }
1596}
1597
1598const void* ModelToINetworkConverter::GetOperandValueReadOnlyAddress(const Operand& operand) const
1599{
1600 const void* valueStart = nullptr;
1601
1602 switch (operand.lifetime)
1603 {
1604 case OperandLifeTime::CONSTANT_COPY:
1605 {
1606 // Constant found in model.operandValues
1607 valueStart = &m_Model.operandValues[operand.location.offset];
1608 break;
1609 }
1610 case OperandLifeTime::CONSTANT_REFERENCE:
1611 {
1612 // Constant specified via a Memory object
1613 valueStart = GetMemoryFromPool(operand.location, m_MemPools);
1614 break;
1615 }
1616 default:
1617 {
1618 // Unsupported/invalid (e.g. can't get value of an input to the model)
1619 Fail("%s: unsupported/invalid operand lifetime: %s",
1620 __func__, toString(operand.lifetime).c_str());
1621 valueStart = nullptr;
1622 }
1623 }
1624
1625 return valueStart;
1626}
1627
1628const Operand* ModelToINetworkConverter::GetInputOperand(const Operation& operation, uint32_t inputIndex) const
1629{
1630 if (inputIndex >= operation.inputs.size())
1631 {
1632 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
1633 return nullptr;
1634 }
1635
1636 assert(operation.inputs[inputIndex] < m_Model.operands.size()); // Model should have been validated beforehand
1637 return &m_Model.operands[operation.inputs[inputIndex]];
1638}
1639
1640const Operand* ModelToINetworkConverter::GetOutputOperand(const Operation& operation, uint32_t outputIndex) const
1641{
1642 if (outputIndex >= operation.outputs.size())
1643 {
1644 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
1645 return nullptr;
1646 }
1647
1648 assert(operation.outputs[outputIndex] < m_Model.operands.size()); // Model should have been validated beforehand
1649 return &m_Model.operands[operation.outputs[outputIndex]];
1650}
1651
1652template<typename T>
1653bool ModelToINetworkConverter::GetInputScalar(const Operation& operation, uint32_t inputIndex,
1654 OperandType type, T& outValue) const
1655{
1656 const Operand* operand = GetInputOperand(operation, inputIndex);
1657 if (!operand)
1658 {
1659 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
1660 }
1661
1662 if (operand->type != type)
1663 {
1664 return Fail("%s: unexpected operand type: %s (should be %s)",
1665 __func__, toString(operand->type).c_str(), toString(type).c_str());
1666 }
1667
1668 if (operand->location.length != sizeof(T))
1669 {
1670 return Fail("%s: incorrect operand location length: %i (should be %i)",
1671 __func__, operand->location.length, sizeof(T));
1672 }
1673
1674 const void* valueAddress = GetOperandValueReadOnlyAddress(*operand);
1675 if (!valueAddress)
1676 {
1677 return Fail("%s: failed to get address for operand", __func__);
1678 }
1679
1680 outValue = *(static_cast<const T*>(valueAddress));
1681 return true;
1682}
1683
1684bool ModelToINetworkConverter::GetInputInt32(const Operation& operation, uint32_t inputIndex, int32_t& outValue) const
1685{
1686 return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue);
1687}
1688
1689bool ModelToINetworkConverter::GetInputFloat32(const Operation& operation, uint32_t inputIndex, float& outValue) const
1690{
1691 return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue);
1692}
1693
1694bool ModelToINetworkConverter::GetInputActivationFunction(const Operation& operation,
1695 uint32_t inputIndex,
1696 ActivationFn& outActivationFunction) const
1697{
1698 int32_t activationFunctionAsInt;
1699 if (!GetInputInt32(operation, inputIndex, activationFunctionAsInt))
1700 {
1701 return Fail("%s: failed to get activation input value", __func__);
1702 }
1703
1704 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
1705 return true;
1706}
1707
1708bool ModelToINetworkConverter::GetInputPaddingScheme(const Operation& operation,
1709 uint32_t inputIndex,
1710 android::nn::PaddingScheme& outPaddingScheme) const
1711{
1712 int32_t paddingSchemeAsInt;
1713 if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt))
1714 {
1715 return Fail("%s: failed to get padding scheme input value", __func__);
1716 }
1717
1718 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
1719 return true;
1720}
1721
1722LayerInputHandle ModelToINetworkConverter::ConvertToLayerInputHandle(
1723 const Operation& operation,
1724 uint32_t inputIndex)
1725{
1726 const Operand* operand = GetInputOperand(operation, inputIndex);
1727 if (!operand)
1728 {
1729 Fail("%s: failed to get input operand %i", __func__, inputIndex);
1730 return LayerInputHandle();
1731 }
1732
1733 if (!IsOperandTypeSupportedForTensors(operand->type))
1734 {
1735 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
1736 return LayerInputHandle();
1737 }
1738
1739 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
1740
1741 switch (operand->lifetime)
1742 {
1743 case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
1744 case OperandLifeTime::MODEL_INPUT:
1745 {
1746 // The tensor is either an operand internal to the model, or a model input.
1747 // It can be associated with an ArmNN output slot for an existing layer.
1748
1749 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
1750 const uint32_t operandIndex = operation.inputs[inputIndex];
1751 return LayerInputHandle(true, m_OutputSlotForOperand[operandIndex], operandTensorInfo);
1752 break;
1753 }
1754 case OperandLifeTime::CONSTANT_COPY:
1755 case OperandLifeTime::CONSTANT_REFERENCE:
1756 {
1757 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
1758 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand);
1759 if (tensorPin.IsValid())
1760 {
1761 if (!IsLayerSupported(__func__,
1762 armnn::IsConstantSupported,
1763 m_Compute,
1764 tensorPin.GetConstTensor().GetInfo()))
1765 {
1766 return LayerInputHandle();
1767 }
1768
1769 armnn::IConnectableLayer* constantLayer = m_Network->AddConstantLayer(tensorPin.GetConstTensor());
1770 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
1771 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
1772
1773 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
1774 }
1775 else
1776 {
1777 Fail("%s: invalid operand tensor", __func__);
1778 return LayerInputHandle();
1779 }
1780 break;
1781 }
1782 default:
1783 {
1784 // Unsupported lifetime for an input tensor
1785 Fail("%s: unsupported lifetime for input tensor: %s",
1786 __func__, toString(operand->lifetime).c_str());
1787 return LayerInputHandle();
1788 }
1789 }
1790}
1791
1792ConstTensorPin ModelToINetworkConverter::ConvertOperationInputToConstTensorPin(const Operation& operation,
1793 uint32_t inputIndex, const armnn::PermutationVector& dimensionMappings,
1794 const armnn::TensorShape* overrideTensorShape)
1795{
1796 const Operand* operand = GetInputOperand(operation, inputIndex);
1797 if (!operand)
1798 {
1799 Fail("%s: failed to get input operand", __func__);
1800 return ConstTensorPin();
1801 }
1802
1803 return ConvertOperandToConstTensorPin(*operand, dimensionMappings, overrideTensorShape);
1804}
1805
1806ConstTensorPin ModelToINetworkConverter::ConvertOperandToConstTensorPin(const Operand& operand,
1807 const armnn::PermutationVector& dimensionMappings, const armnn::TensorShape* overrideTensorShape)
1808{
1809 if (!IsOperandTypeSupportedForTensors(operand.type))
1810 {
1811 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
1812 return ConstTensorPin();
1813 }
1814
1815 if (operand.lifetime != OperandLifeTime::CONSTANT_COPY && operand.lifetime != OperandLifeTime::CONSTANT_REFERENCE)
1816 {
1817 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
1818 return ConstTensorPin();
1819 }
1820
1821 const void* const valueStart = GetOperandValueReadOnlyAddress(operand);
1822 if (!valueStart)
1823 {
1824 Fail("%s: failed to get operand address", __func__);
1825 return ConstTensorPin();
1826 }
1827
1828 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
1829 if (overrideTensorShape != nullptr)
1830 {
1831 tensorInfo.SetShape(*overrideTensorShape);
1832 }
1833 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
1834}
1835
1836bool ModelToINetworkConverter::GetTensorInt32Values(const Operand& operand, std::vector<int32_t>& outValues) const
1837{
1838 if (operand.type != OperandType::TENSOR_INT32)
1839 {
1840 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
1841 }
1842
1843 const void* startAddress = GetOperandValueReadOnlyAddress(operand);
1844 if (!startAddress)
1845 {
1846 return Fail("%s: failed to get operand address", __func__, operand.type);
1847 }
1848
1849 // Check number of bytes is sensible
1850 const uint32_t numBytes = operand.location.length;
1851 if (numBytes % sizeof(int32_t) != 0)
1852 {
1853 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
1854 __func__, numBytes, sizeof(int32_t));
1855 }
1856
1857 outValues.resize(numBytes / sizeof(int32_t));
1858 memcpy(outValues.data(), startAddress, numBytes);
1859 return true;
1860}
1861
1862// Creates an ArmNN activation layer and connects it to the given layer, if the
1863// passed in AndroidNN activation function requires so.
1864// @return The end layer of the sequence of layers built for the given AndroidNN
1865// activation function or nullptr if an error occurred (e.g. unsupported activation).
1866// Note that the end layer matches the input layer if no activation is required
1867// (the sequence of layers has length 1).
1868armnn::IConnectableLayer* ModelToINetworkConverter::ProcessActivation(const armnn::TensorInfo& tensorInfo,
1869 ActivationFn activation, armnn::IConnectableLayer* prevLayer)
1870{
1871 assert(prevLayer->GetNumOutputSlots() == 1);
1872
1873 prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1874
1875 armnn::IConnectableLayer* activationLayer = prevLayer;
1876
1877 if (activation != ActivationFn::kActivationNone)
1878 {
1879 armnn::ActivationDescriptor activationDesc;
1880 switch (activation)
1881 {
1882 case ActivationFn::kActivationRelu:
1883 {
1884 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
1885 break;
1886 }
1887 case ActivationFn::kActivationRelu1:
1888 {
1889 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
1890 activationDesc.m_A = 1.0f;
1891 activationDesc.m_B = -1.0f;
1892 break;
1893 }
1894 case ActivationFn::kActivationRelu6:
1895 {
1896 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
1897 activationDesc.m_A = 6.0f;
1898 break;
1899 }
1900 case ActivationFn::kActivationSigmoid:
1901 {
1902 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
1903 break;
1904 }
1905 case ActivationFn::kActivationTanh:
1906 {
1907 activationDesc.m_Function = armnn::ActivationFunction::TanH;
1908 activationDesc.m_A = 1.0f;
1909 activationDesc.m_B = 1.0f;
1910 break;
1911 }
1912 default:
1913 {
1914 Fail("%s: Invalid activation enum value %i", __func__, activation);
1915 return nullptr;
1916 }
1917 }
1918
1919 if (!IsLayerSupported(__func__, armnn::IsActivationSupported, m_Compute,
1920 prevLayer->GetOutputSlot(0).GetTensorInfo(), activationDesc))
1921 {
1922 return nullptr;
1923 }
1924
1925 activationLayer = m_Network->AddActivationLayer(activationDesc);
1926
1927 prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
1928 activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
1929 }
1930
1931 return activationLayer;
1932}
1933
1934bool ModelToINetworkConverter::SetupAndTrackLayerOutputSlot(const Operation& operation, uint32_t outputIndex,
1935 armnn::IConnectableLayer& layer)
1936{
1937 const Operand* outputOperand = GetOutputOperand(operation, outputIndex);
1938
1939 if ((outputOperand == nullptr) || (outputIndex >= layer.GetNumOutputSlots()))
1940 {
1941 return false;
1942 }
1943
1944 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(outputIndex);
1945
1946 const uint32_t operandIndex = operation.outputs[outputIndex];
1947 m_OutputSlotForOperand[operandIndex] = &outputSlot;
1948
1949 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
1950
1951 return true;
1952}
1953
1954bool ModelToINetworkConverter::IsOperationSupported(uint32_t operationIndex) const
1955{
1956 std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
1957 assert(it != m_OperationSupported.end());
1958 return it->second;
1959}
1960
1961
surmeh0149b9e102018-05-17 14:11:25 +01001962} // armnn_driver