blob: 461a8cdb6d480295c3f1567f21b72165a7cd2b03 [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
3// See LICENSE file in the project root for full license information.
4//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "ModelToINetworkConverter.hpp"
telsoa01ce3e84a2018-08-31 09:31:35 +01009#include <OperationsUtils.h>
telsoa015307bc12018-03-09 13:51:08 +000010
11#include <armnn/LayerSupport.hpp>
12#include <Permute.hpp>
13
14#include <log/log.h>
15#include <cassert>
16
17#include <boost/format.hpp>
18#include <boost/core/ignore_unused.hpp>
19#include <boost/test/tools/floating_point_comparison.hpp>
20#include <boost/cast.hpp>
arovir013b0a2da2018-08-29 10:16:58 +010021#include <boost/optional.hpp>
telsoa015307bc12018-03-09 13:51:08 +000022
telsoa01ce3e84a2018-08-31 09:31:35 +010023using namespace android::hardware;
24
surmeh0149b9e102018-05-17 14:11:25 +010025namespace armnn_driver
26{
kevmay01bc5f7842018-08-30 12:34:39 +010027
surmeh0149b9e102018-05-17 14:11:25 +010028class LayerInputHandle
29{
30public:
31 LayerInputHandle()
32 : m_OutputSlot(nullptr)
33 , m_Valid(false)
34 {}
35
36 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo)
37 : m_OutputSlot(outputSlot)
38 , m_Valid(valid)
39 , m_TensorInfo(tensorInfo)
40 {}
41
42 bool IsValid() const { return m_Valid; }
43 void Connect(armnn::IInputSlot& inputSlot)
44 {
45 assert(IsValid());
46
47 if (m_OutputSlot)
48 {
49 m_OutputSlot->Connect(inputSlot);
50 }
51 }
52 const armnn::TensorInfo& GetTensorInfo() const { return m_TensorInfo; }
53
54private:
55 armnn::IOutputSlot* m_OutputSlot;
56 bool m_Valid;
57 armnn::TensorInfo m_TensorInfo;
58};
kevmay01bc5f7842018-08-30 12:34:39 +010059
60} // namespace armnn_driver
surmeh0149b9e102018-05-17 14:11:25 +010061
telsoa015307bc12018-03-09 13:51:08 +000062namespace
63{
kevmay01bc5f7842018-08-30 12:34:39 +010064
telsoa015307bc12018-03-09 13:51:08 +000065using namespace armnn_driver;
66using namespace android::nn;
67
68// Convenience function to log the reason for failing to convert a model.
69// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
70template<class... Args>
71static bool Fail(const char* formatStr, Args&&... args)
72{
73 ALOGD(formatStr, std::forward<Args>(args)...);
74 return false;
75}
76
77// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
78// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
79template<typename IsLayerSupportedFunc, typename ... Args>
80bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
81{
82 std::vector<char> unsupportedReason(1024+1);
83 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
84 if(isSupported)
85 {
86 return true;
87 }
88 else
89 {
90 std::string sUnsupportedReason(unsupportedReason.data());
91 if (sUnsupportedReason.size() > 0)
92 {
93 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
94 } else
95 {
96 ALOGD("%s: not supported by armnn", funcName);
97 }
98 return false;
99 }
100}
101
102armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
103{
104 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
105}
106
107inline bool IsOperandTypeSupportedForTensors(OperandType type)
108{
109 return type == OperandType::TENSOR_FLOAT32 ||
110 type == OperandType::TENSOR_QUANT8_ASYMM ||
111 type == OperandType::TENSOR_INT32;
112}
113
telsoa01ce3e84a2018-08-31 09:31:35 +0100114void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
115 armnn::INetwork& network)
116{
117 BOOST_ASSERT(startLayer != nullptr);
118 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
119 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
120
121 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
122 {
123 // If the number of dimensions do not match then we need to add degenerate dimensions
124 // to the "smaller" tensor using a reshape:
125 // Small Big
126 // | |
127 // Reshape |
128 // \ /
129 // Add
130 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
131
132 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
133 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
134
135 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
136 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
137
138 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
139 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
140 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
141 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
142 {
143 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
144 }
145 armnn::TensorInfo reshapedInfo = smallTensorDims;
146 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
147 reshapedDims.data() });
148
149 armnn::ReshapeDescriptor reshapeDesc;
150 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
151 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
152 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
153 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
154
155 // Connect the outputs from new reshape and original input layer
156 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
157 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
158 }
159 else
160 {
161 input0.Connect(startLayer->GetInputSlot(0));
162 input1.Connect(startLayer->GetInputSlot(1));
163 }
164}
165
telsoa015307bc12018-03-09 13:51:08 +0000166void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
167 android::nn::PaddingScheme scheme)
168{
169 int32_t padHead;
170 int32_t padTail;
171 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
172 outPadHead = boost::numeric_cast<uint32_t>(padHead);
173 outPadTail = boost::numeric_cast<uint32_t>(padTail);
174}
175
telsoa015307bc12018-03-09 13:51:08 +0000176Shape GetOperandShape(const Operand& operand)
177{
178 Shape shape;
179 shape.type = operand.type;
180 shape.dimensions = operand.dimensions;
181 shape.scale = operand.scale;
182 shape.offset = operand.zeroPoint;
183 return shape;
184}
185
186// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
187// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
188// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
189// (us, in this case) to ensure they match.
190void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
191 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
192{
193 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
194 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
195 {
196 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
197 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
198 {
199 ALOGW("Bias quantization scale has been modified to match input*weights");
200 biasInfo.SetQuantizationScale(expectedBiasScale);
201 }
202 }
203}
204
telsoa01ce3e84a2018-08-31 09:31:35 +0100205// 4D Tensor Permutations
206const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
telsoa015307bc12018-03-09 13:51:08 +0000207const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
surmeh0149b9e102018-05-17 14:11:25 +0100208const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
209const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
telsoa015307bc12018-03-09 13:51:08 +0000210
telsoa01ce3e84a2018-08-31 09:31:35 +0100211// 3D Permutation Vectors
212const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
213const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
214const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
215
kevmay01bc5f7842018-08-30 12:34:39 +0100216template<typename OSlot>
telsoa015307bc12018-03-09 13:51:08 +0000217armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
218 const armnn::PermutationVector& mappings)
219{
220 // Add swizzle layer
221 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
222
223 assert(layer != nullptr);
224
telsoa01ce3e84a2018-08-31 09:31:35 +0100225 // Connect input to swizzle layer
telsoa015307bc12018-03-09 13:51:08 +0000226 input.Connect(layer->GetInputSlot(0));
227
228 // Setup swizzled output
229 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
230 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
231
232 return *layer;
233}
234
telsoa01ce3e84a2018-08-31 09:31:35 +0100235void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
telsoa015307bc12018-03-09 13:51:08 +0000236{
telsoa015307bc12018-03-09 13:51:08 +0000237 // Add swizzle layer
238 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
telsoa015307bc12018-03-09 13:51:08 +0000239 // Connect swizzled input to layer
telsoa01ce3e84a2018-08-31 09:31:35 +0100240 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
241}
telsoa015307bc12018-03-09 13:51:08 +0000242
telsoa01ce3e84a2018-08-31 09:31:35 +0100243armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
244{
telsoa015307bc12018-03-09 13:51:08 +0000245 // Add deswizzle layer
telsoa01ce3e84a2018-08-31 09:31:35 +0100246 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
telsoa015307bc12018-03-09 13:51:08 +0000247 return deswizzleLayer;
248}
249
telsoa01ce3e84a2018-08-31 09:31:35 +0100250// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
251armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
252 LayerInputHandle& input,
253 armnn::IConnectableLayer& firstLayer,
254 armnn::IConnectableLayer& lastLayer)
255{
256 SwizzleIn(network, input, firstLayer, 0);
257 return DeswizzleOut(network, lastLayer, 0);
258}
259
260// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
telsoa015307bc12018-03-09 13:51:08 +0000261armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
262 armnn::IConnectableLayer& layer)
263{
264 return SwizzleInDeswizzleOut(network, input, layer, layer);
265}
surmeh0149b9e102018-05-17 14:11:25 +0100266
267bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
268 const armnn::TensorShape & outputShape,
269 uint32_t concatDim)
270{
271 // Validate the output shape is correct given the input shapes (which have just been validated)
272 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
273 if (outputShape.GetNumDimensions() != numDimensions)
274 {
275 return Fail("%s: Output shape has wrong number of dimensions", __func__);
276 }
277
278 unsigned int outputSizeAlongConcatenatedDimension = 0;
279 for (unsigned int i = 0; i < inputShapes.size(); i++)
280 {
281 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
282 }
283
284 for (unsigned int i = 0; i < numDimensions; ++i)
285 {
286 if (i == concatDim)
287 {
288 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
289 {
290 return Fail(
291 "%s: Invalid output shape for dimension %d (%d != %d)",
292 __func__,
293 i,
294 outputShape[i],
295 outputSizeAlongConcatenatedDimension);
296 }
297 }
298 else
299 {
300 if (outputShape[i] != inputShapes[0][i])
301 {
302 return Fail("%s: Invalid output shape", __func__);
303 }
304 }
305 }
306
307 return true;
308}
309
telsoa01ce3e84a2018-08-31 09:31:35 +0100310bool RequiresReshape(armnn::TensorShape & inputShape)
311{
312 return inputShape.GetNumDimensions() < 3;
313}
314
kevmay01bc5f7842018-08-30 12:34:39 +0100315template<typename OSlot>
telsoa01ce3e84a2018-08-31 09:31:35 +0100316armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
317 armnn::TensorInfo reshapeInfo)
318{
319 armnn::ReshapeDescriptor reshapeDescriptor;
320 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
321
322 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
323 assert(reshapeLayer != nullptr);
324
325 // Attach the input layer to the reshape layer
326 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
327 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
328
329 return *reshapeLayer;
330}
331
surmeh0149b9e102018-05-17 14:11:25 +0100332void SwizzleInputs(armnn::INetwork& network,
333 std::vector<LayerInputHandle>& inputs,
334 std::vector<armnn::TensorShape>& inputShapes,
335 const armnn::PermutationVector& mapping)
336{
telsoa01ce3e84a2018-08-31 09:31:35 +0100337 if (!mapping.IsEqual(IdentityPermutation4D))
surmeh0149b9e102018-05-17 14:11:25 +0100338 {
339 size_t nInputs = inputs.size();
340 for (size_t i=0; i<nInputs; ++i)
341 {
342 // add swizzle layer
343 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
344 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
345 auto& outputInfo = outputSlot.GetTensorInfo();
346 // replace inputs with the swizzled ones
347 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
348 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
349 }
350 }
351}
352
telsoa01ce3e84a2018-08-31 09:31:35 +0100353void CreatePermutationParameters(const unsigned int numberOfDimensions,
354 int32_t & concatDimension,
355 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
356{
357 assert(numberOfDimensions >= 3);
358
359 // ArmNN uses Compute Library subtensors to perform concatenation
360 // This only works when concatenating along dimension 0 or 1 for a 4-D tensor,
361 // or along dimension 0 for a 3-D tensor.
362 if (numberOfDimensions == 4)
363 {
364 if (concatDimension == 3)
365 {
366 concatDimension = 1;
367 permutationPair = std::make_pair(NHWCToArmNN, ArmNNToNHWC);
368 }
369 else if (concatDimension == 2)
370 {
371 concatDimension = 1;
372 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
373 }
374 else
375 {
376 permutationPair = std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
377 }
378
379 }
380 else if (numberOfDimensions == 3)
381 {
382 if (concatDimension == 2)
383 {
384 concatDimension = 0;
385 permutationPair = std::make_pair(RotateTensorRight, RotateTensorLeft);
386 }
387 else if (concatDimension == 1)
388 {
389 concatDimension = 0;
390 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
391 }
392 else
393 {
394 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
395 }
396 }
397}
398
kevmay01bc5f7842018-08-30 12:34:39 +0100399} // anonymous namespace
telsoa015307bc12018-03-09 13:51:08 +0000400
401namespace armnn_driver
402{
403
404class ConstTensorPin
405{
406public:
407 // Creates an invalid tensor pin (can be used to signal errors)
telsoa01ce3e84a2018-08-31 09:31:35 +0100408 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
409 ConstTensorPin(bool optional = false) : m_Optional(optional) {}
telsoa015307bc12018-03-09 13:51:08 +0000410
411 // @param tensorInfo TensorInfo associated with the tensor.
412 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
413 // the model being converted.
414 // @param numBytes Number of bytes for the tensor data.
415 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
416 const armnn::PermutationVector& mappings)
417 {
418 boost::ignore_unused(numBytes);
419 assert(tensorInfo.GetNumBytes() == numBytes);
420
421 const bool needsSwizzling = (mappings.GetSize() > 0);
422 if (needsSwizzling)
423 {
424 m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
425 SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
426
427 m_ConstTensor = armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, mappings), m_SwizzledTensorData.data());
428 }
429 else
430 {
431 m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
432 }
433 }
434
435 ConstTensorPin(const ConstTensorPin& other) = delete;
436 ConstTensorPin(ConstTensorPin&& other) = default;
437
438 bool IsValid() const { return m_ConstTensor.GetMemoryArea() != nullptr; }
telsoa01ce3e84a2018-08-31 09:31:35 +0100439 bool IsOptional() const { return m_Optional; }
telsoa015307bc12018-03-09 13:51:08 +0000440 const armnn::ConstTensor& GetConstTensor() const { return m_ConstTensor; }
telsoa01ce3e84a2018-08-31 09:31:35 +0100441 const armnn::ConstTensor* GetConstTensorPtr() const
442 {
443 if (IsValid() && m_ConstTensor.GetNumElements() > 0)
444 {
445 return &m_ConstTensor;
446 }
447 // tensor is either invalid, or has no elements (indicating an optional tensor that was not provided)
448 return nullptr;
449 }
telsoa015307bc12018-03-09 13:51:08 +0000450
451private:
452 armnn::ConstTensor m_ConstTensor;
453 // Owned memory for swizzled tensor data, only required if the tensor needed
454 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
455 // the pools associated with the model being converted.
456 std::vector<uint8_t> m_SwizzledTensorData;
telsoa01ce3e84a2018-08-31 09:31:35 +0100457 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
458 bool m_Optional;
telsoa015307bc12018-03-09 13:51:08 +0000459};
460
kevmay01bc5f7842018-08-30 12:34:39 +0100461template<typename HalVersion>
462ModelToINetworkConverter<HalVersion>::ModelToINetworkConverter(armnn::Compute compute,
463 const HalModel& model,
telsoa015307bc12018-03-09 13:51:08 +0000464 const std::set<unsigned int>& forcedUnsupportedOperations)
465 : m_Compute(compute)
466 , m_Model(model)
467 , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
468 , m_Network(nullptr, nullptr)
469 , m_ConversionResult(ConversionResult::Success)
470{
471 try
472 {
473 Convert();
474 }
475 catch (armnn::Exception& e)
476 {
477 m_ConversionResult = ConversionResult::UnsupportedFeature;
478 ALOGE("%s: Unexpected exception: %s", __func__, e.what());
479 assert(false);
480 }
481}
482
kevmay01bc5f7842018-08-30 12:34:39 +0100483template<typename HalVersion>
484void ModelToINetworkConverter<HalVersion>::Convert()
telsoa015307bc12018-03-09 13:51:08 +0000485{
arovir01a15dc112018-09-03 17:12:56 +0100486 using HalModel = typename HalVersion::Model;
487 ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary<HalModel>(m_Model).c_str());
telsoa015307bc12018-03-09 13:51:08 +0000488
489 // map the memory pool into shared pointers
490 m_MemPools.clear();
491 if (!setRunTimePoolInfosFromHidlMemories(&m_MemPools, m_Model.pools))
492 {
493 Fail("%s: Setting of run time pool infos from Hidl Memories has failed.", __func__);
494 m_ConversionResult = ConversionResult::ErrorMappingPools;
495 return;
496 }
497
498 uint32_t totalPoolSize = 0;
499 for (auto&& pool : m_Model.pools)
500 {
501 totalPoolSize += pool.size();
502 }
503
504 // Create armnn::INetwork
505 m_Network = armnn::INetwork::Create();
506
507 // add operations to it
508 // track which layer outputs each operand
509 m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(m_Model.operands.size(), nullptr);
510
511 try
512 {
513 for (uint32_t i = 0; i < m_Model.inputIndexes.size(); i++)
514 {
515 // inputs in android nn are represented by operands
516 uint32_t inputIndex = m_Model.inputIndexes[i];
517 const Operand& operand = m_Model.operands[inputIndex];
518 const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
519 armnn::IConnectableLayer* layer = m_Network->AddInputLayer(i);
520
521 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
522 outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand));
523
524 // store for later layers
525 m_OutputSlotForOperand[inputIndex] = &outputSlot;
526 }
527 }
528 catch (UnsupportedOperand& e)
529 {
530 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
531 m_ConversionResult = ConversionResult::UnsupportedFeature;
532 }
533 catch (const armnn::InvalidArgumentException& e)
534 {
535 Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what());
536 m_ConversionResult = ConversionResult::UnsupportedFeature;
537 }
538
539 for (uint32_t operationIdx = 0; operationIdx < m_Model.operations.size(); operationIdx++)
540 {
541 const auto& operation = m_Model.operations[operationIdx];
542
543 bool ok = true;
544 if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end())
545 {
546 Fail("%s: Operation at index %i has been forced to be unsupported.", __func__, operationIdx);
547 ok = false;
548 }
549
550 if (ok)
551 {
552 try
553 {
554 ok = ConvertOperation(operation);
555 }
556 catch (UnsupportedOperand& e)
557 {
558 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
559 ok = false;
560 }
561 catch (const armnn::InvalidArgumentException& e)
562 {
563 Fail("%s: Failed to convert operation in %s", __func__, e.what());
564 ok = false;
565 }
566 }
567
568 // Store whether this operation was successfully converted.
569 m_OperationSupported.emplace(operationIdx, ok);
570
571 // Any single operation failing will fail the entire conversion.
572 // We still need to continue and check the other ones.
573 if (!ok)
574 {
575 m_ConversionResult = ConversionResult::UnsupportedFeature;
576 }
577 }
578 try
579 {
580 if (m_ConversionResult == ConversionResult::Success)
581 {
582 for (uint32_t i = 0; i < m_Model.outputIndexes.size(); i++)
583 {
584 // outputs in android nn are represented by operands
585 uint32_t outputIndex = m_Model.outputIndexes[i];
586 const Operand& operand = m_Model.operands[outputIndex];
587 const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
588 armnn::IConnectableLayer* layer = m_Network->AddOutputLayer(i);
589
590 assert(m_OutputSlotForOperand[outputIndex]);
591 m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
592 }
593 }
594 }
595 catch (const armnn::InvalidArgumentException& e)
596 {
597 Fail("%s: Failed to convert output operand to TensorShape: %s", __func__, e.what());
598 m_ConversionResult = ConversionResult::UnsupportedFeature;
599 }
600}
601
kevmay01bc5f7842018-08-30 12:34:39 +0100602template<typename HalVersion>
603bool ModelToINetworkConverter<HalVersion>::ConvertOperation(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +0000604{
605 switch (operation.type)
606 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100607 case neuralnetworks::V1_0::OperationType::ADD:
608 return ConvertAdd(operation);
609 case neuralnetworks::V1_0::OperationType::AVERAGE_POOL_2D:
610 return ConvertAveragePool2d(operation);
611 case neuralnetworks::V1_0::OperationType::CONCATENATION:
612 return ConvertConcatenation(operation);
613 case neuralnetworks::V1_0::OperationType::CONV_2D:
614 return ConvertConv2d(operation);
615 case neuralnetworks::V1_0::OperationType::DEPTHWISE_CONV_2D:
616 return ConvertDepthwiseConv2d(operation);
617 case neuralnetworks::V1_0::OperationType::FLOOR:
618 return ConvertFloor(operation);
619 case neuralnetworks::V1_0::OperationType::FULLY_CONNECTED:
620 return ConvertFullyConnected(operation);
621 case neuralnetworks::V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
622 return ConvertLocalResponseNormalization(operation);
623 case neuralnetworks::V1_0::OperationType::LOGISTIC:
624 return ConvertLogistic(operation);
625 case neuralnetworks::V1_0::OperationType::LSTM:
626 return ConvertLstm(operation);
627 case neuralnetworks::V1_0::OperationType::L2_NORMALIZATION:
628 return ConvertL2Normalization(operation);
629 case neuralnetworks::V1_0::OperationType::L2_POOL_2D:
630 return ConvertL2Pool2d(operation);
631 case neuralnetworks::V1_0::OperationType::MAX_POOL_2D:
632 return ConvertMaxPool2d(operation);
633 case neuralnetworks::V1_0::OperationType::MUL:
634 return ConvertMul(operation);
635 case neuralnetworks::V1_0::OperationType::RELU:
636 return ConvertReLu(operation);
637 case neuralnetworks::V1_0::OperationType::RELU1:
638 return ConvertReLu1(operation);
639 case neuralnetworks::V1_0::OperationType::RELU6:
640 return ConvertReLu6(operation);
641 case neuralnetworks::V1_0::OperationType::SOFTMAX:
642 return ConvertSoftmax(operation);
643 case neuralnetworks::V1_0::OperationType::TANH:
644 return ConvertTanH(operation);
645 case neuralnetworks::V1_0::OperationType::RESHAPE:
646 return ConvertReshape(operation);
647 case neuralnetworks::V1_0::OperationType::RESIZE_BILINEAR:
648 return ConvertResizeBilinear(operation);
649 default:
650 return Fail("%s: Operation type %s not supported in ArmnnDriver",
651 __func__, toString(operation.type).c_str());
telsoa015307bc12018-03-09 13:51:08 +0000652 }
653}
654
kevmay01bc5f7842018-08-30 12:34:39 +0100655#if defined(ARMNN_ANDROID_NN_V1_1)
656template<typename HalVersion>
657bool ModelToINetworkConverter<HalVersion>::ConvertOperation(const neuralnetworks::V1_1::Operation& operation)
658{
659 if (compliantWithV1_0(operation))
660 {
661 neuralnetworks::V1_0::Operation v1Operation = convertToV1_0(operation);
662 return ConvertOperation(v1Operation);
663 }
664 else
665 {
666 switch (operation.type)
667 {
arovir01a15dc112018-09-03 17:12:56 +0100668 case neuralnetworks::V1_1::OperationType::DIV:
669 return ConvertDiv(operation);
kevmay01bc5f7842018-08-30 12:34:39 +0100670 default:
arovir01a15dc112018-09-03 17:12:56 +0100671 return Fail("%s: Operation type %s not supported in ArmnnDriver",
672 __func__, toString(operation.type).c_str());
kevmay01bc5f7842018-08-30 12:34:39 +0100673 }
674 }
675}
arovir01a15dc112018-09-03 17:12:56 +0100676
677template<typename HalVersion>
678bool ModelToINetworkConverter<HalVersion>::ConvertDiv(const neuralnetworks::V1_1::Operation& operation)
679{
680 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
681 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
682
683 if (!input0.IsValid() || !input1.IsValid())
684 {
685 return Fail("%s: Operation has invalid inputs", __func__);
686 }
687
688 // The FuseActivation parameter is always the input index 2
689 // and it should be optional
690 ActivationFn activationFunction;
691 if (!GetOptionalInputActivation(operation, 2, activationFunction))
692 {
693 return Fail("%s: Operation has invalid inputs", __func__);
694 }
695
696 const Operand* outputOperand = GetOutputOperand(operation, 0);
697 if (!outputOperand)
698 {
699 return false;
700 }
701
702 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
703
704 if (!IsLayerSupported(__func__,
705 armnn::IsDivisionSupported,
706 m_Compute,
707 input0.GetTensorInfo(),
708 input1.GetTensorInfo(),
709 outInfo))
710 {
711 return false;
712 }
713
714 armnn::IConnectableLayer* const startLayer = m_Network->AddDivisionLayer();
715 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
716
717 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
718 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
719
720 if (endLayer)
721 {
722 BroadcastTensor(input0, input1, startLayer, *m_Network);
723 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
724 }
725
726 return Fail("%s: ProcessActivation failed", __func__);
727}
kevmay01bc5f7842018-08-30 12:34:39 +0100728#endif
729
730template<typename HalVersion>
731bool ModelToINetworkConverter<HalVersion>::ConvertAdd(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +0000732{
733 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
734 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
735
736 if (!input0.IsValid() || !input1.IsValid())
737 {
738 return Fail("%s: Operation has invalid inputs", __func__);
739 }
740
telsoa01ce3e84a2018-08-31 09:31:35 +0100741 // The FuseActivation parameter is always the input index 2
742 // and it should be optional
telsoa015307bc12018-03-09 13:51:08 +0000743 ActivationFn activationFunction;
telsoa01ce3e84a2018-08-31 09:31:35 +0100744 if (!GetOptionalInputActivation(operation, 2, activationFunction))
telsoa015307bc12018-03-09 13:51:08 +0000745 {
746 return Fail("%s: Operation has invalid inputs", __func__);
747 }
748
749 const Operand* outputOperand = GetOutputOperand(operation, 0);
750 if (!outputOperand)
751 {
752 return false;
753 }
754
755 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
756
757 if (!IsLayerSupported(__func__,
758 armnn::IsAdditionSupported,
759 m_Compute,
760 input0.GetTensorInfo(),
761 input1.GetTensorInfo(),
762 outInfo))
763 {
764 return false;
765 }
766
767 armnn::IConnectableLayer* const startLayer = m_Network->AddAdditionLayer();
768 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
769
770 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
771 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
772
773 if (endLayer != nullptr)
774 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100775 BroadcastTensor(input0, input1, startLayer, *m_Network);
telsoa015307bc12018-03-09 13:51:08 +0000776 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
777 }
778 else
779 {
780 return Fail("%s: ProcessActivation failed", __func__);
781 }
782}
783
kevmay01bc5f7842018-08-30 12:34:39 +0100784template<typename HalVersion>
785bool ModelToINetworkConverter<HalVersion>::ConvertAveragePool2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +0000786{
787 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average);
788}
789
kevmay01bc5f7842018-08-30 12:34:39 +0100790template<typename HalVersion>
791bool ModelToINetworkConverter<HalVersion>::ConvertConcatenation(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +0000792{
793 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
794 if (operation.inputs.size() <= 1)
795 {
796 return Fail("%s: Operation has insufficient arguments", __func__);
797 }
798
799 // Get inputs and outputs
800 const std::size_t numInputTensors = operation.inputs.size() - 1;
801
surmeh0149b9e102018-05-17 14:11:25 +0100802 int32_t concatDim;
803 if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim))
804 {
805 return Fail("%s: Operation has invalid inputs", __func__);
806 }
807
808 const Operand* const outputOperand = GetOutputOperand(operation, 0);
809 if (!outputOperand)
810 {
811 return Fail("%s: Operation has no outputs", __func__);
812 }
813
telsoa01ce3e84a2018-08-31 09:31:35 +0100814
surmeh0149b9e102018-05-17 14:11:25 +0100815 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
816 armnn::TensorShape outputShape = outputInfo.GetShape();
817
818 //
819 // handle negative concat dims along the lines of tensorflow as described here:
820 // https://www.tensorflow.org/api_docs/python/tf/concat
821 // "negative axis refers to axis + rank(values)-th dimension"
822 //
823 if (concatDim < 0)
824 {
825 concatDim += outputShape.GetNumDimensions();
826 }
827
828 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
829 {
830 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
831 }
832
telsoa015307bc12018-03-09 13:51:08 +0000833 std::vector<LayerInputHandle> inputHandles;
834 std::vector<armnn::TensorShape> inputShapes;
835
836 inputHandles.reserve(numInputTensors);
837 inputShapes.reserve(numInputTensors);
838
telsoa01ce3e84a2018-08-31 09:31:35 +0100839 bool inputsHaveBeenReshaped = false;
840 unsigned int tensorDimensionsAdded = 0;
841
telsoa015307bc12018-03-09 13:51:08 +0000842 for (uint32_t i = 0; i < numInputTensors; ++i)
843 {
844 const Operand* const operand = GetInputOperand(operation, i);
845 if (!operand)
846 {
847 return Fail("%s: Operation has invalid inputs", __func__);
848 }
849
telsoa01ce3e84a2018-08-31 09:31:35 +0100850 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
851 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i);
surmeh0149b9e102018-05-17 14:11:25 +0100852
telsoa01ce3e84a2018-08-31 09:31:35 +0100853 if (operandShape.GetNumDimensions() == 0)
854 {
855 return Fail("%s: Operands with rank 0 are not supported", __func__);
856 }
857
858 if (RequiresReshape(operandShape))
859 {
860 inputsHaveBeenReshaped = true;
861
862 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
863
864 // Expand the tensor to three dimensions
865 if (operandShape.GetNumDimensions() == 2)
866 {
867 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
868 tensorDimensionsAdded = 1;
869 }
870 else
871 {
872 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
873 tensorDimensionsAdded = 2;
874 }
875
876 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
877 *m_Network,
878 operandInputHandle,
879 reshapeInfo
880 );
881
882 // Point to the reshape operation rather then the input operation
883 operandShape = reshapeInfo.GetShape();
884 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
885 }
886
887 inputShapes.emplace_back(operandShape);
888 inputHandles.emplace_back(operandInputHandle);
surmeh0149b9e102018-05-17 14:11:25 +0100889
telsoa015307bc12018-03-09 13:51:08 +0000890 if (!inputHandles.back().IsValid())
891 {
892 return Fail("%s: Operation has invalid inputs", __func__);
893 }
894 }
895
896 assert(inputShapes.size() == inputHandles.size());
897
telsoa01ce3e84a2018-08-31 09:31:35 +0100898 if (inputsHaveBeenReshaped)
899 {
900 // Adjust the concatenation dimension by the amount of dimensions added (if any)
901 concatDim += tensorDimensionsAdded;
902
903 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
904 if (tensorDimensionsAdded == 1)
905 {
906 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
907 }
908 else if (tensorDimensionsAdded == 2)
909 {
910 outputShape = armnn::TensorShape({1, 1, outputShape[0], outputShape[1]});
911 }
912 }
913
914 // Get the pair of permutations required for the concatenation
915 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
916 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
917
918 CreatePermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
919
920 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
921 outputInfo.SetShape(outputShape);
922
surmeh0149b9e102018-05-17 14:11:25 +0100923 // this is no-op for identity swizzles, otherwise it replaces both
924 // the handles and shapes with the swizzled layer output handles and shapes
telsoa01ce3e84a2018-08-31 09:31:35 +0100925 SwizzleInputs(*m_Network, inputHandles, inputShapes, permutationPair.first);
telsoa015307bc12018-03-09 13:51:08 +0000926
927 // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
928 armnn::OriginsDescriptor mergerDescriptor;
929 try
930 {
surmeh0149b9e102018-05-17 14:11:25 +0100931 // The merger descriptor is always created across the only supported concat
932 // dimension, which is 0 or 1
933 mergerDescriptor =
934 armnn::CreateMergerDescriptorForConcatenation(
935 inputShapes.begin(), inputShapes.end(), concatDim);
telsoa015307bc12018-03-09 13:51:08 +0000936 }
937 catch (const armnn::Exception& error)
938 {
939 return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
940 }
941
surmeh0149b9e102018-05-17 14:11:25 +0100942 // Validate the output shape is correct given the input shapes based on the
943 // only valid concat dimension which is 0 or 1
944 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
telsoa015307bc12018-03-09 13:51:08 +0000945 {
surmeh0149b9e102018-05-17 14:11:25 +0100946 return Fail("%s: Error validating the output shape for concat", __func__);
telsoa015307bc12018-03-09 13:51:08 +0000947 }
948
949 std::vector<const armnn::TensorInfo*> inputTensorInfos;
950 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
951 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
952 if (!IsLayerSupported(__func__,
953 armnn::IsMergerSupported,
954 m_Compute,
955 inputTensorInfos,
956 mergerDescriptor))
957 {
958 return false;
959 }
960
961 armnn::IConnectableLayer* layer = m_Network->AddMergerLayer(mergerDescriptor);
962 assert(layer != nullptr);
surmeh0149b9e102018-05-17 14:11:25 +0100963 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
telsoa015307bc12018-03-09 13:51:08 +0000964
965 // Connect inputs to the layer
966 const int numInputSlots = layer->GetNumInputSlots();
967 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
968 for (int i = 0; i < numInputSlots; ++i)
969 {
surmeh0149b9e102018-05-17 14:11:25 +0100970 // connect the input directly to the merge (concat) layer
telsoa015307bc12018-03-09 13:51:08 +0000971 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
972 }
973
telsoa01ce3e84a2018-08-31 09:31:35 +0100974 // Add permutation layer and connect the output to it, the permutation becomes the output layer
975 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*m_Network,
976 layer->GetOutputSlot(0),
977 permutationPair.second);
978 layer = &deswizzleLayer;
979
980 if (inputsHaveBeenReshaped)
surmeh0149b9e102018-05-17 14:11:25 +0100981 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100982 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
983
984 // Undo the reshape knowing the amount of dimensions added
985 if (tensorDimensionsAdded == 1)
986 {
987 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
988 afterConcatInfo.GetShape()[2] }));
989 }
990 else if (tensorDimensionsAdded == 2)
991 {
992 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2],
993 afterConcatInfo.GetShape()[3] }));
994 }
995
996 layer = &AddReshapeLayer(
997 *m_Network,
998 layer->GetOutputSlot(0),
999 afterConcatInfo
1000 );
surmeh0149b9e102018-05-17 14:11:25 +01001001 }
1002
telsoa015307bc12018-03-09 13:51:08 +00001003 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1004}
1005
kevmay01bc5f7842018-08-30 12:34:39 +01001006template<typename HalVersion>
1007bool ModelToINetworkConverter<HalVersion>::ConvertConv2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001008{
1009 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1010 if (!input.IsValid())
1011 {
1012 return Fail("%s: Operation has invalid inputs", __func__);
1013 }
1014
1015 const Operand* output = GetOutputOperand(operation, 0);
1016 if (!output)
1017 {
1018 return Fail("%s: Could not read output 0", __func__);
1019 }
1020
1021 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1022 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1023
1024 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1025 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1026
1027 // ArmNN does not currently support non-fixed weights or bias
1028 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, NHWCToArmNN);
1029 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2);
1030
1031 if (!weightsPin.IsValid() || !biasPin.IsValid())
1032 {
1033 return Fail("%s: Operation has invalid inputs", __func__);
1034 }
1035
1036 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1037 armnn::ConstTensor bias = biasPin.GetConstTensor();
1038 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
1039
1040 armnn::Convolution2dDescriptor desc;
1041 ActivationFn activation;
1042
1043 if (operation.inputs.size() == 10)
1044 {
1045 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft) ||
1046 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight) ||
1047 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop) ||
1048 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom) ||
1049 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX) ||
1050 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY) ||
1051 !GetInputActivationFunction(operation, 9, activation))
1052 {
1053 return Fail("%s: Operation has invalid inputs", __func__);
1054 }
1055 }
1056 else if (operation.inputs.size() == 7)
1057 {
1058 android::nn::PaddingScheme paddingScheme;
1059
1060 if (!GetInputPaddingScheme(operation, 3, paddingScheme) ||
1061 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX) ||
1062 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY) ||
1063 !GetInputActivationFunction(operation, 6, activation))
1064 {
1065 return Fail("%s: Operation has invalid inputs", __func__);
1066 }
1067
1068 const uint32_t kernelX = weights.GetShape()[3];
1069 const uint32_t kernelY = weights.GetShape()[2];
1070 const uint32_t inputX = swizzledInputInfo.GetShape()[3];
1071 const uint32_t inputY = swizzledInputInfo.GetShape()[2];
1072
1073 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1074 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1075 }
1076 else
1077 {
1078 return Fail("%s: Unsupported number of operation inputs", __func__);
1079 }
1080
arovir013b0a2da2018-08-29 10:16:58 +01001081 desc.m_BiasEnabled = true;
1082 auto biases = boost::make_optional(bias.GetInfo());
telsoa015307bc12018-03-09 13:51:08 +00001083
1084 if (!IsLayerSupported(__func__,
1085 armnn::IsConvolution2dSupported,
1086 m_Compute,
1087 swizzledInputInfo,
surmeh0149b9e102018-05-17 14:11:25 +01001088 swizzledOutputInfo,
telsoa015307bc12018-03-09 13:51:08 +00001089 desc,
surmeh0149b9e102018-05-17 14:11:25 +01001090 weights.GetInfo(),
arovir013b0a2da2018-08-29 10:16:58 +01001091 biases))
telsoa015307bc12018-03-09 13:51:08 +00001092 {
1093 return false;
1094 }
1095
1096 armnn::IConnectableLayer* startLayer = m_Network->AddConvolution2dLayer(desc, weights, bias);
1097 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
1098
1099 if (endLayer != nullptr)
1100 {
1101 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
1102 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1103 }
1104 else
1105 {
1106 return Fail("%s: ProcessActivation failed", __func__);
1107 }
1108}
1109
kevmay01bc5f7842018-08-30 12:34:39 +01001110template<typename HalVersion>
1111bool ModelToINetworkConverter<HalVersion>::ConvertDepthwiseConv2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001112{
1113 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1114 if (!input.IsValid())
1115 {
1116 return Fail("%s: Operation has invalid inputs", __func__);
1117 }
1118
1119 const Operand* output = GetOutputOperand(operation, 0);
1120 if (!output)
1121 {
1122 return Fail("%s: Could not read output 0", __func__);
1123 }
1124
1125 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1126 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1127
1128 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1129 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1130
1131 // ArmNN does not currently support non-fixed weights or bias
1132
1133 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1134 // but in ArmNN it needs to be [ M, I, H, W ]
1135 const Operand* weightsOperand = GetInputOperand(operation, 1);
1136
1137 if (weightsOperand == nullptr)
1138 {
1139 return Fail("%s: Operand is invalid", __func__);
1140 }
1141
1142 // Reinterpret weight data as [ H, W, I, M ]
1143 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1], weightsOperand->dimensions[2],
1144 inputInfo.GetShape()[3],
1145 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
1146
1147 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1148 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1149 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, HWIMToMIHW, &weightsShape);
1150
1151 // Bias is a 1D tensor
1152 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2);
1153
1154 if (!weightsPin.IsValid() || !biasPin.IsValid())
1155 {
1156 return Fail("%s: Operation has invalid inputs", __func__);
1157 }
1158
1159 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1160 armnn::ConstTensor bias = biasPin.GetConstTensor();
1161 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
1162
1163 armnn::DepthwiseConvolution2dDescriptor desc;
1164 ActivationFn activation;
1165
1166 if (operation.inputs.size() == 11)
1167 {
1168 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft) ||
1169 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight) ||
1170 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop) ||
1171 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom) ||
1172 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX) ||
1173 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY) ||
1174 !GetInputActivationFunction(operation, 10, activation))
1175 {
1176 return Fail("%s: Operation has invalid inputs", __func__);
1177 }
1178 }
1179 else if (operation.inputs.size() == 8)
1180 {
1181 android::nn::PaddingScheme paddingScheme;
1182
1183 if (!GetInputPaddingScheme(operation, 3, paddingScheme) ||
1184 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX) ||
1185 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY) ||
1186 !GetInputActivationFunction(operation, 7, activation))
1187 {
1188 return Fail("%s: Operation has invalid inputs", __func__);
1189 }
1190
1191 const uint32_t kernelX = weights.GetShape()[3];
1192 const uint32_t kernelY = weights.GetShape()[2];
1193 const uint32_t inputX = swizzledInputInfo.GetShape()[3];
1194 const uint32_t inputY = swizzledInputInfo.GetShape()[2];
1195
1196 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1197 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1198 }
1199 else
1200 {
1201 return Fail("%s: Unsupported number of operation inputs", __func__);
1202 }
1203
1204 desc.m_BiasEnabled = true;
arovir013b0a2da2018-08-29 10:16:58 +01001205 auto biases = boost::make_optional(bias.GetInfo());
telsoa015307bc12018-03-09 13:51:08 +00001206
1207 if (!IsLayerSupported(__func__,
1208 armnn::IsDepthwiseConvolutionSupported,
1209 m_Compute,
1210 swizzledInputInfo,
telsoa01ce3e84a2018-08-31 09:31:35 +01001211 swizzledOutputInfo,
telsoa015307bc12018-03-09 13:51:08 +00001212 desc,
telsoa01ce3e84a2018-08-31 09:31:35 +01001213 weights.GetInfo(),
arovir013b0a2da2018-08-29 10:16:58 +01001214 biases))
telsoa015307bc12018-03-09 13:51:08 +00001215 {
1216 return false;
1217 }
1218
1219 armnn::IConnectableLayer* startLayer = m_Network->AddDepthwiseConvolution2dLayer(desc, weights, bias);
1220 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
1221
1222 if (endLayer != nullptr)
1223 {
1224 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
1225 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1226 }
1227 else
1228 {
1229 return Fail("%s: ProcessActivation failed", __func__);
1230 }
1231}
1232
kevmay01bc5f7842018-08-30 12:34:39 +01001233template<typename HalVersion>
1234bool ModelToINetworkConverter<HalVersion>::ConvertFloor(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001235{
1236 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1237 if (!input.IsValid())
1238 {
1239 return Fail("%s: Operation has invalid inputs", __func__);
1240 }
1241
1242 const Operand* const outputOperand = GetOutputOperand(operation, 0);
1243 if (!outputOperand)
1244 {
1245 return Fail("%s: Operation has invalid outputs", __func__);
1246 }
1247
1248 if (!IsLayerSupported(__func__,
1249 armnn::IsFloorSupported,
1250 m_Compute,
1251 input.GetTensorInfo(),
1252 GetTensorInfoForOperand(*outputOperand)))
1253 {
1254 return false;
1255 }
1256
1257 armnn::IConnectableLayer* layer = m_Network->AddFloorLayer();
1258 assert(layer != nullptr);
1259 input.Connect(layer->GetInputSlot(0));
1260
1261 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1262}
1263
kevmay01bc5f7842018-08-30 12:34:39 +01001264template<typename HalVersion>
1265bool ModelToINetworkConverter<HalVersion>::ConvertFullyConnected(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001266{
1267 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1268 if (!input.IsValid())
1269 {
1270 return Fail("%s: Operation has invalid inputs", __func__);
1271 }
1272
1273 const Operand* output = GetOutputOperand(operation, 0);
1274 if (!output)
1275 {
1276 return Fail("%s: Could not read output 0", __func__);
1277 }
1278
1279 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1280 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1281
telsoa015307bc12018-03-09 13:51:08 +00001282 // ArmNN does not currently support non-fixed weights or bias
1283 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1); // 2D
1284 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2); // 1D
1285
1286 if (!weightsPin.IsValid() || !biasPin.IsValid())
1287 {
1288 return Fail("%s: Operation has invalid inputs", __func__);
1289 }
1290
telsoa015307bc12018-03-09 13:51:08 +00001291 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1292 armnn::ConstTensor bias = biasPin.GetConstTensor();
telsoa01ce3e84a2018-08-31 09:31:35 +01001293
1294 armnn::TensorInfo reshapedInfo = inputInfo;
1295 if (inputInfo.GetNumDimensions() > 2U)
1296 {
1297 unsigned int dim0 = inputInfo.GetShape()[0];
1298 unsigned int dim1 = inputInfo.GetShape()[1];
1299
1300 for (unsigned int i = 2U; i < inputInfo.GetNumDimensions(); ++i)
1301 {
1302 dim1 *= inputInfo.GetShape()[i];
1303 }
1304
1305 unsigned int divisor = weights.GetInfo().GetShape()[1] / dim1;
1306 if(dim0 % divisor != 0)
1307 {
1308 return Fail("%s: Failed to deduce tensor shape", __func__);
1309 }
1310
1311 reshapedInfo.SetShape(armnn::TensorShape({dim0 / divisor, dim1 * divisor}));
1312 }
1313
1314 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
telsoa015307bc12018-03-09 13:51:08 +00001315 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
1316
1317 ActivationFn activationFunction;
1318 if (!GetInputActivationFunction(operation, 3, activationFunction))
1319 {
1320 return Fail("%s: Operation has invalid inputs", __func__);
1321 }
1322
1323 armnn::FullyConnectedDescriptor desc;
1324 desc.m_TransposeWeightMatrix = true;
1325 desc.m_BiasEnabled = true;
1326
1327 if (!IsLayerSupported(__func__,
1328 armnn::IsFullyConnectedSupported,
1329 m_Compute,
telsoa01ce3e84a2018-08-31 09:31:35 +01001330 inputInfo,
1331 outputInfo,
1332 weights.GetInfo(),
1333 bias.GetInfo(),
telsoa015307bc12018-03-09 13:51:08 +00001334 desc))
1335 {
1336 return false;
1337 }
1338
1339 armnn::IConnectableLayer* startLayer = m_Network->AddFullyConnectedLayer(desc, weights, bias);
1340 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer);
1341
1342 if (endLayer != nullptr)
1343 {
1344 if (inputInfo.GetNumDimensions() > 2U)
1345 {
1346 armnn::ReshapeDescriptor reshapeDescriptor;
1347 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
1348
1349 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor);
1350 assert(reshapeLayer != nullptr);
1351 input.Connect(reshapeLayer->GetInputSlot(0));
1352 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
1353 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
1354 }
1355 else
1356 {
1357 input.Connect(startLayer->GetInputSlot(0));
1358 }
1359
1360 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
1361 }
1362 else
1363 {
1364 return Fail("%s: ProcessActivation failed", __func__);
1365 }
1366}
1367
kevmay01bc5f7842018-08-30 12:34:39 +01001368template<typename HalVersion>
1369bool ModelToINetworkConverter<HalVersion>::ConvertLocalResponseNormalization(
1370 const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001371{
1372 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1373 if (!input.IsValid())
1374 {
1375 return Fail("%s: Operation has invalid inputs", __func__);
1376 }
1377
1378 const Operand* output = GetOutputOperand(operation, 0);
1379 if (!output)
1380 {
1381 return Fail("%s: Could not read output 0", __func__);
1382 }
1383
1384 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1385 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1386
1387 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1388 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1389
1390 armnn::NormalizationDescriptor descriptor;
1391
1392 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
1393 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1394
1395 if (!input.IsValid() ||
1396 !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize) ||
1397 !GetInputFloat32(operation, 2, descriptor.m_K) ||
1398 !GetInputFloat32(operation, 3, descriptor.m_Alpha) ||
1399 !GetInputFloat32(operation, 4, descriptor.m_Beta))
1400 {
1401 return Fail("%s: Operation has invalid inputs", __func__);
1402 }
1403
1404 // ArmNN expects normSize to be the full size of the normalization
1405 // window rather than the radius as in AndroidNN.
1406 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
1407
1408 if (!IsLayerSupported(__func__,
1409 armnn::IsNormalizationSupported,
1410 m_Compute,
1411 swizzledInputInfo,
1412 swizzledOutputInfo,
1413 descriptor))
1414 {
1415 return false;
1416 }
1417
1418
1419 armnn::IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor);
1420 assert(layer != nullptr);
1421 layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
1422
1423 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
1424
1425 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1426}
1427
kevmay01bc5f7842018-08-30 12:34:39 +01001428template<typename HalVersion>
1429bool ModelToINetworkConverter<HalVersion>::ConvertLogistic(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001430{
1431 armnn::ActivationDescriptor desc;
surmeh0149b9e102018-05-17 14:11:25 +01001432 desc.m_Function = armnn::ActivationFunction::Sigmoid;
telsoa015307bc12018-03-09 13:51:08 +00001433
1434 return ConvertToActivation(operation, __func__, desc);
1435}
1436
kevmay01bc5f7842018-08-30 12:34:39 +01001437template<typename HalVersion>
1438bool ModelToINetworkConverter<HalVersion>::ConvertL2Normalization(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001439{
1440 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1441 if (!input.IsValid())
1442 {
1443 return Fail("%s: Operation has invalid inputs", __func__);
1444 }
1445
1446 const Operand* output = GetOutputOperand(operation, 0);
1447 if (!output)
1448 {
1449 return Fail("%s: Could not read output 0", __func__);
1450 }
1451
1452 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1453 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1454
1455 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1456 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1457
1458 if (!IsLayerSupported(__func__,
1459 armnn::IsL2NormalizationSupported,
1460 m_Compute,
telsoa01ce3e84a2018-08-31 09:31:35 +01001461 swizzledInputInfo,
1462 swizzledOutputInfo))
telsoa015307bc12018-03-09 13:51:08 +00001463 {
1464 return false;
1465 }
1466
1467 armnn::IConnectableLayer* layer = m_Network->AddL2NormalizationLayer();
1468 assert(layer != nullptr);
1469 layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
1470
1471 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
1472
1473 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1474}
1475
kevmay01bc5f7842018-08-30 12:34:39 +01001476template<typename HalVersion>
1477bool ModelToINetworkConverter<HalVersion>::ConvertL2Pool2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001478{
1479 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2);
1480}
1481
kevmay01bc5f7842018-08-30 12:34:39 +01001482template<typename HalVersion>
1483bool ModelToINetworkConverter<HalVersion>::ConvertMaxPool2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001484{
1485 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max);
1486}
1487
kevmay01bc5f7842018-08-30 12:34:39 +01001488template<typename HalVersion>
1489bool ModelToINetworkConverter<HalVersion>::ConvertMul(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001490{
1491 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
1492 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
1493
1494 if (!input0.IsValid() || !input1.IsValid())
1495 {
1496 return Fail("%s: Operation has invalid inputs", __func__);
1497 }
1498
telsoa01ce3e84a2018-08-31 09:31:35 +01001499 // The FuseActivation parameter is always the input index 2
1500 // and it should be optional
telsoa015307bc12018-03-09 13:51:08 +00001501 ActivationFn activationFunction;
telsoa01ce3e84a2018-08-31 09:31:35 +01001502 if (!GetOptionalInputActivation(operation, 2, activationFunction))
telsoa015307bc12018-03-09 13:51:08 +00001503 {
1504 return Fail("%s: Operation has invalid inputs", __func__);
1505 }
1506
telsoa015307bc12018-03-09 13:51:08 +00001507 const Operand* outputOperand = GetOutputOperand(operation, 0);
1508
1509 if (outputOperand == nullptr)
1510 {
1511 return false;
1512 }
1513
1514 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1515
telsoa01ce3e84a2018-08-31 09:31:35 +01001516 if (!IsLayerSupported(__func__,
1517 armnn::IsMultiplicationSupported,
1518 m_Compute,
1519 input0.GetTensorInfo(),
1520 input1.GetTensorInfo(),
1521 outInfo))
1522 {
1523 return false;
1524 }
1525
telsoa015307bc12018-03-09 13:51:08 +00001526 armnn::IConnectableLayer* const startLayer = m_Network->AddMultiplicationLayer();
1527 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
1528
telsoa01ce3e84a2018-08-31 09:31:35 +01001529 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1530 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1531
telsoa015307bc12018-03-09 13:51:08 +00001532 if (endLayer != nullptr)
1533 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001534 BroadcastTensor(input0, input1, startLayer, *m_Network);
telsoa015307bc12018-03-09 13:51:08 +00001535 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
1536 }
1537 else
1538 {
1539 return Fail("%s: ProcessActivation failed", __func__);
1540 }
1541}
1542
kevmay01bc5f7842018-08-30 12:34:39 +01001543template<typename HalVersion>
1544bool ModelToINetworkConverter<HalVersion>::ConvertReLu(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001545{
1546 armnn::ActivationDescriptor desc;
1547 desc.m_Function = armnn::ActivationFunction::ReLu;
1548
1549 return ConvertToActivation(operation, __func__, desc);
1550}
1551
kevmay01bc5f7842018-08-30 12:34:39 +01001552template<typename HalVersion>
1553bool ModelToINetworkConverter<HalVersion>::ConvertReLu1(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001554{
1555 armnn::ActivationDescriptor desc;
1556 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1557 desc.m_A = 1.0f;
1558 desc.m_B = -1.0f;
1559
1560 return ConvertToActivation(operation, __func__, desc);
1561}
1562
kevmay01bc5f7842018-08-30 12:34:39 +01001563template<typename HalVersion>
1564bool ModelToINetworkConverter<HalVersion>::ConvertReLu6(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001565{
1566 armnn::ActivationDescriptor desc;
1567 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1568 desc.m_A = 6.0f;
1569
1570 return ConvertToActivation(operation, __func__, desc);
1571}
1572
kevmay01bc5f7842018-08-30 12:34:39 +01001573template<typename HalVersion>
1574bool ModelToINetworkConverter<HalVersion>::ConvertSoftmax(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001575{
1576 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1577 if (!input.IsValid())
1578 {
1579 return Fail("%s: Operation has invalid inputs", __func__);
1580 }
1581
telsoa01ce3e84a2018-08-31 09:31:35 +01001582 const Operand* outputOperand = GetOutputOperand(operation, 0);
1583 if (!outputOperand)
1584 {
1585 return Fail("%s: Operation has no outputs", __func__);
1586 }
1587
1588 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1589
telsoa015307bc12018-03-09 13:51:08 +00001590 armnn::SoftmaxDescriptor desc;
1591 if (!GetInputFloat32(operation, 1, desc.m_Beta))
1592 {
1593 return Fail("%s: Operation has invalid inputs", __func__);
1594 }
1595
1596 if (!IsLayerSupported(__func__,
1597 armnn::IsSoftmaxSupported,
1598 m_Compute,
1599 input.GetTensorInfo(),
telsoa01ce3e84a2018-08-31 09:31:35 +01001600 outInfo,
telsoa015307bc12018-03-09 13:51:08 +00001601 desc))
1602 {
1603 return false;
1604 }
1605
1606 armnn::IConnectableLayer* layer = m_Network->AddSoftmaxLayer(desc);
1607 assert(layer != nullptr);
1608 input.Connect(layer->GetInputSlot(0));
1609
1610 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1611}
1612
kevmay01bc5f7842018-08-30 12:34:39 +01001613template<typename HalVersion>
1614bool ModelToINetworkConverter<HalVersion>::ConvertTanH(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001615{
1616 armnn::ActivationDescriptor desc;
1617 desc.m_Function = armnn::ActivationFunction::TanH;
1618 desc.m_A = 1.0f; // android nn does not support tanH parameters
1619 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1620
1621 return ConvertToActivation(operation, __func__, desc);
1622}
1623
kevmay01bc5f7842018-08-30 12:34:39 +01001624template<typename HalVersion>
1625bool ModelToINetworkConverter<HalVersion>::ConvertReshape(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001626{
1627 const Operand* inputOperand = GetInputOperand(operation, 0);
1628 const Operand* requestedShapeOperand = GetInputOperand(operation, 1);
1629 const Operand* outputOperand = GetOutputOperand(operation, 0);
1630
1631 if (inputOperand == nullptr
1632 || requestedShapeOperand == nullptr
1633 || outputOperand == nullptr)
1634 {
1635 return Fail("%s: Operation has invalid inputs", __func__);
1636 }
1637
1638
1639 if (requestedShapeOperand->dimensions.size() != 1)
1640 {
1641 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1642 __func__, requestedShapeOperand->dimensions.size());
1643 }
1644
1645 std::vector<int32_t> targetDimensions;
1646 if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions))
1647 {
1648 return Fail("%s: Could not read values of input 1", __func__);
1649 }
1650
1651 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1652
1653 Shape requestedShape;
1654 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1655 // function that resolves these values into a fully specified tensor shape.
1656 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1657 {
1658 return Fail("%s: Failed to resolve the requested shape", __func__);
1659 }
1660
1661 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1662 if (!SameShape(requestedShape, outputOperandShape))
1663 {
1664 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1665 }
1666
1667 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1668 if (!input.IsValid())
1669 {
1670 return Fail("%s: Could not read input 0", __func__);
1671 }
1672
1673 if (!IsLayerSupported(__func__,
1674 armnn::IsReshapeSupported,
1675 m_Compute,
1676 input.GetTensorInfo()))
1677 {
1678 return false;
1679 }
1680
1681
1682 armnn::ReshapeDescriptor reshapeDescriptor;
1683 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1684 requestedShape.dimensions.data());
1685
1686 armnn::IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDescriptor);
1687 assert(layer != nullptr);
1688 input.Connect(layer->GetInputSlot(0));
1689
1690 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1691}
1692
kevmay01bc5f7842018-08-30 12:34:39 +01001693template<typename HalVersion>
1694bool ModelToINetworkConverter<HalVersion>::ConvertResizeBilinear(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001695{
1696 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1697 if (!input.IsValid())
1698 {
1699 return Fail("%s: Could not read input 0", __func__);
1700 }
1701
1702 const Operand* output = GetOutputOperand(operation, 0);
1703 if (!output)
1704 {
1705 return Fail("%s: Could not read output 0", __func__);
1706 }
1707
1708 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1709 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1710
1711 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1712 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1713
1714 if (!IsLayerSupported(__func__,
1715 armnn::IsResizeBilinearSupported,
1716 m_Compute,
1717 swizzledInputInfo))
1718 {
1719 return false;
1720 }
1721
1722 armnn::ResizeBilinearDescriptor desc;
1723
1724 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight)
1725 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth))
1726 {
1727 return Fail("%s: Operation has invalid inputs", __func__);
1728 }
1729
1730 armnn::IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc);
1731 assert(layer != nullptr);
1732 layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
1733
1734 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
1735
1736 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1737
1738}
1739
kevmay01bc5f7842018-08-30 12:34:39 +01001740template<typename HalVersion>
1741bool ModelToINetworkConverter<HalVersion>::ConvertLstm(const neuralnetworks::V1_0::Operation& operation)
telsoa01ce3e84a2018-08-31 09:31:35 +01001742{
1743 // Inputs:
1744 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1745 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1746 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1747 if (!input.IsValid())
1748 {
1749 return Fail("%s: Could not read input 0: input", __func__);
1750 }
1751 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1752 LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18);
1753 if (!outputStateIn.IsValid())
1754 {
1755 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1756 }
1757 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1758 LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19);
1759 if (!cellStateIn.IsValid())
1760 {
1761 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1762 }
1763
1764 // Get the mandatory input tensors:
1765 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1766 // [num_units, input_size].
1767 const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2);
1768 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1769 const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3);
1770 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1771 // [num_units, input_size].
1772 const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4);
1773 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1774 // [num_units, output_size].
1775 const ConstTensorPin recurrentToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 6);
1776 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1777 // [num_units, output_size].
1778 const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7);
1779 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1780 // [num_units, output_size].
1781 const ConstTensorPin recurrentToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 8);
1782 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1783 const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13);
1784 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1785 const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14);
1786 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1787 const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15);
1788
1789 if (!inputToForgetWeightsPin.IsValid() ||
1790 !inputToCellWeightsPin.IsValid() ||
1791 !inputToOutputWeightsPin.IsValid() ||
1792 !recurrentToForgetWeightsPin.IsValid() ||
1793 !recurrentToCellWeightsPin.IsValid() ||
1794 !recurrentToOutputWeightsPin.IsValid() ||
1795 !forgetGateBiasPin.IsValid() ||
1796 !cellBiasPin.IsValid() ||
1797 !outputGateBiasPin.IsValid())
1798 {
1799 return Fail("%s: Operation has invalid tensor inputs", __func__);
1800 }
1801
1802 // Get the optional input tensors:
1803 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1804 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1805 const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1);
1806 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1807 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1808 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1809 const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5);
1810 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1811 const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9);
1812 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1813 const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10);
1814 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1815 const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11);
1816 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1817 const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12);
1818 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1819 // [output_size, num_units].
1820 const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16);
1821 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1822 const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17);
1823
1824 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
1825 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
1826 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
1827 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
1828 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
1829 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
1830 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
1831 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
1832 {
1833 return Fail("%s: Operation has invalid tensor inputs", __func__);
1834 }
1835
1836 // Get the mandatory input scalars (actually 1-D tensors of size 1):
1837 // 20: The activation function: A value indicating the activation function:
1838 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1839 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1840 // If set to 0.0 then clipping is disabled.
1841 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1842 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1843 ActivationFn activation;
1844 float cellClip;
1845 float projClip;
1846 if (!GetInputActivationFunctionFromTensor(operation, 20, activation) ||
1847 !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip) ||
1848 !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip))
1849 {
1850 return Fail("%s: Operation has invalid scalar inputs", __func__);
1851 }
1852
1853 // Outputs:
1854 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1855 // CIFG, or [batch_size, num_units * 3] without CIFG.
1856 const Operand* scratchBuffer = GetOutputOperand(operation, 0);
1857 if (!scratchBuffer)
1858 {
1859 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
1860 }
1861 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1862 const Operand* outputStateOut = GetOutputOperand(operation, 1);
1863 if (!outputStateOut)
1864 {
1865 return Fail("%s: Could not read output 1: outputStateOut", __func__);
1866 }
1867 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1868 const Operand* cellStateOut = GetOutputOperand(operation, 2);
1869 if (!cellStateOut)
1870 {
1871 return Fail("%s: Could not read output 2: cellStateOut", __func__);
1872 }
1873 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1874 // effectively the same as the current “output state (out)” value.
1875 const Operand* output = GetOutputOperand(operation, 3);
1876 if (!output)
1877 {
1878 return Fail("%s: Could not read output 3: output", __func__);
1879 }
1880
1881 // set the params structure for the AddLstmLayer call
1882 armnn::LstmInputParams params;
1883 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1884 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1885 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1886 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1887 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1888 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1889 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1890 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1891 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
1892 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
1893 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
1894 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1895 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1896 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1897 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1898 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
1899 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
1900
1901 // set the layer descriptor
1902 armnn::LstmDescriptor desc;
1903 desc.m_ActivationFunc = activation;
1904 desc.m_ClippingThresCell = cellClip;
1905 desc.m_ClippingThresProj = projClip;
1906 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
1907 params.m_RecurrentToInputWeights == nullptr ||
1908 params.m_InputGateBias == nullptr);
1909 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
1910 params.m_CellToOutputWeights != nullptr);
1911 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
1912
1913 // validate the optional input groups
1914 if (desc.m_CifgEnabled &&
1915 (params.m_InputToInputWeights != nullptr ||
1916 params.m_RecurrentToInputWeights != nullptr ||
1917 params.m_InputGateBias != nullptr))
1918 {
1919 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
1920 " and input gate bias must be provided", __func__);
1921 }
1922
1923 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
1924 {
1925 return Fail("%s: projection bias should not be provided without projection weights", __func__);
1926 }
1927
1928 if (desc.m_PeepholeEnabled &&
1929 (params.m_CellToForgetWeights == nullptr ||
1930 params.m_CellToOutputWeights == nullptr ||
1931 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
1932 {
1933 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
1934 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
1935 }
1936
1937 // Check if the layer is supported
1938 // Inputs
1939 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1940 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
1941 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
1942
1943 // Outputs
1944 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
1945 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
1946 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1947 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1948
1949 // Basic parameters
1950 const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
1951 const armnn::TensorInfo& inputToCellWeights = params.m_InputToCellWeights->GetInfo();
1952 const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
1953 const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
1954 const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
1955 const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
1956 const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
1957 const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
1958 const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
1959
1960 //Optional parameters
1961 const armnn::TensorInfo* inputToInputWeights = nullptr;
1962 const armnn::TensorInfo* recurrentToInputWeights = nullptr;
1963 const armnn::TensorInfo* cellToInputWeights = nullptr;
1964 const armnn::TensorInfo* inputGateBias = nullptr;
1965 const armnn::TensorInfo* projectionWeights = nullptr;
1966 const armnn::TensorInfo* projectionBias = nullptr;
1967 const armnn::TensorInfo* cellToForgetWeights = nullptr;
1968 const armnn::TensorInfo* cellToOutputWeights = nullptr;
1969
1970 if(!desc.m_CifgEnabled)
1971 {
1972 inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1973 recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1974 if (params.m_CellToInputWeights != nullptr)
1975 {
1976 cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1977 }
1978 inputGateBias = &(params.m_InputGateBias->GetInfo());
1979 }
1980
1981 if(desc.m_ProjectionEnabled)
1982 {
1983 projectionWeights = &(params.m_ProjectionWeights->GetInfo());
1984 if (params.m_ProjectionBias != nullptr)
1985 {
1986 projectionBias = &(params.m_ProjectionBias->GetInfo());
1987 }
1988 }
1989
1990 if(desc.m_PeepholeEnabled)
1991 {
1992 cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1993 cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1994 }
1995
1996 if (!IsLayerSupported(__func__,
1997 armnn::IsLstmSupported,
1998 m_Compute,
1999 inputInfo,
2000 outputStateInInfo,
2001 cellStateInInfo,
2002 scratchBufferInfo,
2003 outputStateOutInfo,
2004 cellStateOutInfo,
2005 outputInfo,
2006 desc,
2007 inputToForgetWeights,
2008 inputToCellWeights,
2009 inputToOutputWeights,
2010 recurrentToForgetWeights,
2011 recurrentToCellWeights,
2012 recurrentToOutputWeights,
2013 forgetGateBias,
2014 cellBias,
2015 outputGateBias,
2016 inputToInputWeights,
2017 recurrentToInputWeights,
2018 cellToInputWeights,
2019 inputGateBias,
2020 projectionWeights,
2021 projectionBias,
2022 cellToForgetWeights,
2023 cellToOutputWeights))
2024 {
2025 return false;
2026 }
2027
2028 // Add the layer
2029 armnn::IConnectableLayer* layer = m_Network->AddLstmLayer(desc, params, "Lstm");
2030
2031 input.Connect(layer->GetInputSlot(0));
2032 outputStateIn.Connect(layer->GetInputSlot(1));
2033 cellStateIn.Connect(layer->GetInputSlot(2));
2034
2035 return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0) &&
2036 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1) &&
2037 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2) &&
2038 SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3));
2039}
2040
kevmay01bc5f7842018-08-30 12:34:39 +01002041template<typename HalVersion>
2042bool ModelToINetworkConverter<HalVersion>::ConvertToActivation(const neuralnetworks::V1_0::Operation& operation,
telsoa015307bc12018-03-09 13:51:08 +00002043 const char* operationName,
2044 const armnn::ActivationDescriptor& activationDesc)
2045{
2046 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
2047 if (!input.IsValid())
2048 {
2049 return Fail("%s: Input 0 is invalid", operationName);
2050 }
2051
telsoa01ce3e84a2018-08-31 09:31:35 +01002052 const Operand* outputOperand = GetOutputOperand(operation, 0);
2053 if (!outputOperand)
2054 {
2055 return false;
2056 }
2057 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
telsoa015307bc12018-03-09 13:51:08 +00002058 if (!IsLayerSupported(__func__,
2059 armnn::IsActivationSupported,
2060 m_Compute,
2061 input.GetTensorInfo(),
telsoa01ce3e84a2018-08-31 09:31:35 +01002062 outInfo,
telsoa015307bc12018-03-09 13:51:08 +00002063 activationDesc))
2064 {
2065 return false;
2066 }
2067
2068 armnn::IConnectableLayer* layer = m_Network->AddActivationLayer(activationDesc);
2069 assert(layer != nullptr);
2070 input.Connect(layer->GetInputSlot(0));
2071
2072 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
2073}
2074
kevmay01bc5f7842018-08-30 12:34:39 +01002075template<typename HalVersion>
2076bool ModelToINetworkConverter<HalVersion>::ConvertPooling2d(const neuralnetworks::V1_0::Operation& operation,
telsoa015307bc12018-03-09 13:51:08 +00002077 const char* operationName,
2078 armnn::PoolingAlgorithm poolType)
2079{
2080 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
2081 if (!input.IsValid())
2082 {
2083 return Fail("%s: Could not read input 0", operationName);
2084 }
2085
2086 const Operand* output = GetOutputOperand(operation, 0);
2087 if (!output)
2088 {
2089 return Fail("%s: Could not read output 0", __func__);
2090 }
2091
2092 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2093 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2094
2095 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
2096 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
2097
2098 armnn::Pooling2dDescriptor desc;
2099 desc.m_PoolType = poolType;
2100 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
2101
2102 ActivationFn activation;
2103
2104 if (operation.inputs.size() == 7)
2105 {
2106 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
2107 android::nn::PaddingScheme scheme;
2108
2109 if ( !GetInputPaddingScheme(operation, 1, scheme)
2110 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX)
2111 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY)
2112 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth)
2113 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight)
2114 || !GetInputActivationFunction(operation, 6, activation))
2115 {
2116 return Fail("%s: Operation has invalid inputs", operationName);
2117 }
2118
2119 const unsigned int inputWidth = swizzledInputInfo.GetShape()[3];
2120 const unsigned int inputHeight = swizzledInputInfo.GetShape()[2];
2121
2122 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
2123 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
2124 }
2125 else
2126 {
2127 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
2128 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft)
2129 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight)
2130 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop)
2131 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom)
2132 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX)
2133 || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY)
2134 || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth)
2135 || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight)
2136 || !GetInputActivationFunction(operation, 9, activation))
2137 {
2138 return Fail("%s: Operation has invalid inputs", operationName);
2139 }
2140 }
2141
2142 // ArmNN does not accept a pool size of 1, but the ArmNN driver is expected to cope.
2143 // This is mapped to a trivial splitter instead.
2144 armnn::IConnectableLayer* startLayer = nullptr;
2145 if (desc.m_PoolWidth != 1 || desc.m_PoolHeight != 1)
2146 {
2147 if (!IsLayerSupported(__func__,
2148 armnn::IsPooling2dSupported,
2149 m_Compute,
2150 swizzledInputInfo,
2151 swizzledOutputInfo,
2152 desc))
2153 {
2154 return false;
2155 }
2156
2157 startLayer = m_Network->AddPooling2dLayer(desc);
2158 }
2159 else
2160 {
2161 const unsigned int numDims = swizzledOutputInfo.GetNumDimensions();
2162
2163 armnn::ViewsDescriptor viewsDesc(1, numDims);
2164
2165 for (unsigned int i = 0; i < numDims; ++i)
2166 {
2167 viewsDesc.SetViewOriginCoord(0, i, 0);
2168 viewsDesc.SetViewSize(0, i, swizzledOutputInfo.GetShape()[i]);
2169 }
2170
2171 if (!IsLayerSupported(__func__,
2172 armnn::IsSplitterSupported,
2173 m_Compute,
2174 swizzledInputInfo,
2175 viewsDesc))
2176 {
2177 return false;
2178 }
2179
2180 startLayer = m_Network->AddSplitterLayer(viewsDesc);
2181 }
2182
2183 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
2184
2185 if (endLayer != nullptr)
2186 {
2187 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
2188 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
2189 }
2190 else
2191 {
2192 return Fail("%s: ProcessActivation failed", operationName);
2193 }
2194}
2195
kevmay01bc5f7842018-08-30 12:34:39 +01002196template<typename HalVersion>
2197const void* ModelToINetworkConverter<HalVersion>::GetOperandValueReadOnlyAddress(const Operand& operand) const
telsoa015307bc12018-03-09 13:51:08 +00002198{
2199 const void* valueStart = nullptr;
2200
2201 switch (operand.lifetime)
2202 {
2203 case OperandLifeTime::CONSTANT_COPY:
2204 {
2205 // Constant found in model.operandValues
2206 valueStart = &m_Model.operandValues[operand.location.offset];
2207 break;
2208 }
2209 case OperandLifeTime::CONSTANT_REFERENCE:
2210 {
2211 // Constant specified via a Memory object
2212 valueStart = GetMemoryFromPool(operand.location, m_MemPools);
2213 break;
2214 }
2215 default:
2216 {
2217 // Unsupported/invalid (e.g. can't get value of an input to the model)
2218 Fail("%s: unsupported/invalid operand lifetime: %s",
2219 __func__, toString(operand.lifetime).c_str());
2220 valueStart = nullptr;
2221 }
2222 }
2223
2224 return valueStart;
2225}
2226
kevmay01bc5f7842018-08-30 12:34:39 +01002227template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002228template<typename HalOperation>
2229const Operand* ModelToINetworkConverter<HalVersion>::GetInputOperand(const HalOperation& operation,
2230 uint32_t inputIndex) const
telsoa015307bc12018-03-09 13:51:08 +00002231{
2232 if (inputIndex >= operation.inputs.size())
2233 {
2234 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
2235 return nullptr;
2236 }
2237
2238 assert(operation.inputs[inputIndex] < m_Model.operands.size()); // Model should have been validated beforehand
2239 return &m_Model.operands[operation.inputs[inputIndex]];
2240}
2241
kevmay01bc5f7842018-08-30 12:34:39 +01002242template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002243template<typename HalOperation>
2244const Operand* ModelToINetworkConverter<HalVersion>::GetOutputOperand(const HalOperation& operation,
2245 uint32_t outputIndex) const
telsoa015307bc12018-03-09 13:51:08 +00002246{
2247 if (outputIndex >= operation.outputs.size())
2248 {
2249 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
2250 return nullptr;
2251 }
2252
2253 assert(operation.outputs[outputIndex] < m_Model.operands.size()); // Model should have been validated beforehand
2254 return &m_Model.operands[operation.outputs[outputIndex]];
2255}
2256
kevmay01bc5f7842018-08-30 12:34:39 +01002257template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002258template<typename HalOperation, typename T>
2259bool ModelToINetworkConverter<HalVersion>::GetInputScalar(const HalOperation& operation,
2260 uint32_t inputIndex,
2261 OperandType type,
2262 T& outValue) const
telsoa015307bc12018-03-09 13:51:08 +00002263{
2264 const Operand* operand = GetInputOperand(operation, inputIndex);
2265 if (!operand)
2266 {
2267 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
2268 }
2269
2270 if (operand->type != type)
2271 {
2272 return Fail("%s: unexpected operand type: %s (should be %s)",
2273 __func__, toString(operand->type).c_str(), toString(type).c_str());
2274 }
2275
2276 if (operand->location.length != sizeof(T))
2277 {
2278 return Fail("%s: incorrect operand location length: %i (should be %i)",
2279 __func__, operand->location.length, sizeof(T));
2280 }
2281
2282 const void* valueAddress = GetOperandValueReadOnlyAddress(*operand);
2283 if (!valueAddress)
2284 {
2285 return Fail("%s: failed to get address for operand", __func__);
2286 }
2287
2288 outValue = *(static_cast<const T*>(valueAddress));
2289 return true;
2290}
2291
kevmay01bc5f7842018-08-30 12:34:39 +01002292template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002293template<typename HalOperation>
2294bool ModelToINetworkConverter<HalVersion>::GetInputInt32(const HalOperation& operation,
2295 uint32_t inputIndex,
2296 int32_t& outValue) const
telsoa015307bc12018-03-09 13:51:08 +00002297{
2298 return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue);
2299}
2300
kevmay01bc5f7842018-08-30 12:34:39 +01002301template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002302template<typename HalOperation>
2303bool ModelToINetworkConverter<HalVersion>::GetInputFloat32(const HalOperation& operation,
2304 uint32_t inputIndex,
2305 float& outValue) const
telsoa015307bc12018-03-09 13:51:08 +00002306{
2307 return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue);
2308}
2309
kevmay01bc5f7842018-08-30 12:34:39 +01002310template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002311template<typename HalOperation>
2312bool ModelToINetworkConverter<HalVersion>::GetInputActivationFunctionImpl(const HalOperation& operation,
2313 uint32_t inputIndex,
2314 OperandType type,
2315 ActivationFn& outActivationFunction) const
telsoa015307bc12018-03-09 13:51:08 +00002316{
telsoa01ce3e84a2018-08-31 09:31:35 +01002317 if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
2318 {
2319 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
2320 __func__,
2321 toString(type).c_str(),
2322 toString(OperandType::INT32).c_str(),
2323 toString(OperandType::TENSOR_INT32).c_str());
2324 }
2325
telsoa015307bc12018-03-09 13:51:08 +00002326 int32_t activationFunctionAsInt;
telsoa01ce3e84a2018-08-31 09:31:35 +01002327 if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt))
telsoa015307bc12018-03-09 13:51:08 +00002328 {
2329 return Fail("%s: failed to get activation input value", __func__);
2330 }
telsoa015307bc12018-03-09 13:51:08 +00002331 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
2332 return true;
2333}
2334
kevmay01bc5f7842018-08-30 12:34:39 +01002335template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002336template<typename HalOperation>
2337bool ModelToINetworkConverter<HalVersion>::GetInputActivationFunction(const HalOperation& operation,
2338 uint32_t inputIndex,
2339 ActivationFn& outActivationFunction) const
telsoa01ce3e84a2018-08-31 09:31:35 +01002340{
2341 return GetInputActivationFunctionImpl(operation, inputIndex, OperandType::INT32, outActivationFunction);
2342}
2343
kevmay01bc5f7842018-08-30 12:34:39 +01002344template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002345template<typename HalOperation>
kevmay01bc5f7842018-08-30 12:34:39 +01002346bool ModelToINetworkConverter<HalVersion>::GetInputActivationFunctionFromTensor(
arovir01a15dc112018-09-03 17:12:56 +01002347 const HalOperation& operation,
kevmay01bc5f7842018-08-30 12:34:39 +01002348 uint32_t inputIndex,
2349 ActivationFn& outActivationFunction) const
telsoa01ce3e84a2018-08-31 09:31:35 +01002350{
2351 // This only accepts a 1-D tensor of size 1
2352 return GetInputActivationFunctionImpl(operation, inputIndex, OperandType::INT32, outActivationFunction);
2353}
2354
kevmay01bc5f7842018-08-30 12:34:39 +01002355template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002356template<typename HalOperation>
2357bool ModelToINetworkConverter<HalVersion>::GetOptionalInputActivation(const HalOperation& operation,
2358 uint32_t inputIndex,
2359 ActivationFn& activationFunction) const
telsoa01ce3e84a2018-08-31 09:31:35 +01002360{
2361 if (operation.inputs.size() <= inputIndex)
2362 {
2363 activationFunction = ActivationFn::kActivationNone;
2364 }
2365 else
2366 {
2367 if (!GetInputActivationFunction(operation, inputIndex, activationFunction))
2368 {
2369 return Fail("%s: Operation has invalid inputs", __func__);
2370 }
2371 }
2372 return true;
2373}
2374
kevmay01bc5f7842018-08-30 12:34:39 +01002375template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002376template<typename HalOperation>
2377bool ModelToINetworkConverter<HalVersion>::GetInputPaddingScheme(const HalOperation& operation,
2378 uint32_t inputIndex,
2379 PaddingScheme& outPaddingScheme) const
telsoa015307bc12018-03-09 13:51:08 +00002380{
2381 int32_t paddingSchemeAsInt;
2382 if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt))
2383 {
2384 return Fail("%s: failed to get padding scheme input value", __func__);
2385 }
2386
2387 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
2388 return true;
2389}
2390
kevmay01bc5f7842018-08-30 12:34:39 +01002391template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002392template<typename HalOperation>
2393LayerInputHandle ModelToINetworkConverter<HalVersion>::ConvertToLayerInputHandle(const HalOperation& operation,
2394 uint32_t inputIndex)
telsoa015307bc12018-03-09 13:51:08 +00002395{
2396 const Operand* operand = GetInputOperand(operation, inputIndex);
2397 if (!operand)
2398 {
2399 Fail("%s: failed to get input operand %i", __func__, inputIndex);
2400 return LayerInputHandle();
2401 }
2402
2403 if (!IsOperandTypeSupportedForTensors(operand->type))
2404 {
2405 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
2406 return LayerInputHandle();
2407 }
2408
2409 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
2410
2411 switch (operand->lifetime)
2412 {
2413 case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
2414 case OperandLifeTime::MODEL_INPUT:
2415 {
2416 // The tensor is either an operand internal to the model, or a model input.
2417 // It can be associated with an ArmNN output slot for an existing layer.
2418
2419 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
2420 const uint32_t operandIndex = operation.inputs[inputIndex];
2421 return LayerInputHandle(true, m_OutputSlotForOperand[operandIndex], operandTensorInfo);
2422 break;
2423 }
2424 case OperandLifeTime::CONSTANT_COPY:
2425 case OperandLifeTime::CONSTANT_REFERENCE:
2426 {
2427 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
2428 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand);
2429 if (tensorPin.IsValid())
2430 {
2431 if (!IsLayerSupported(__func__,
2432 armnn::IsConstantSupported,
2433 m_Compute,
2434 tensorPin.GetConstTensor().GetInfo()))
2435 {
2436 return LayerInputHandle();
2437 }
2438
2439 armnn::IConnectableLayer* constantLayer = m_Network->AddConstantLayer(tensorPin.GetConstTensor());
2440 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
2441 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
2442
2443 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
2444 }
2445 else
2446 {
2447 Fail("%s: invalid operand tensor", __func__);
2448 return LayerInputHandle();
2449 }
2450 break;
2451 }
2452 default:
2453 {
2454 // Unsupported lifetime for an input tensor
2455 Fail("%s: unsupported lifetime for input tensor: %s",
2456 __func__, toString(operand->lifetime).c_str());
2457 return LayerInputHandle();
2458 }
2459 }
2460}
2461
kevmay01bc5f7842018-08-30 12:34:39 +01002462template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002463template<typename HalOperation>
kevmay01bc5f7842018-08-30 12:34:39 +01002464ConstTensorPin ModelToINetworkConverter<HalVersion>::ConvertOperationInputToConstTensorPin(
arovir01a15dc112018-09-03 17:12:56 +01002465 const HalOperation& operation,
2466 uint32_t inputIndex,
2467 const armnn::PermutationVector& dimensionMappings,
2468 const armnn::TensorShape* overrideTensorShape,
2469 bool optional)
telsoa015307bc12018-03-09 13:51:08 +00002470{
2471 const Operand* operand = GetInputOperand(operation, inputIndex);
2472 if (!operand)
2473 {
telsoa01ce3e84a2018-08-31 09:31:35 +01002474 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
telsoa015307bc12018-03-09 13:51:08 +00002475 return ConstTensorPin();
2476 }
telsoa01ce3e84a2018-08-31 09:31:35 +01002477 return ConvertOperandToConstTensorPin(*operand, dimensionMappings, overrideTensorShape, optional);
telsoa015307bc12018-03-09 13:51:08 +00002478}
2479
kevmay01bc5f7842018-08-30 12:34:39 +01002480template<typename HalVersion>
2481ConstTensorPin ModelToINetworkConverter<HalVersion>::ConvertOperandToConstTensorPin(const Operand& operand,
telsoa01ce3e84a2018-08-31 09:31:35 +01002482 const armnn::PermutationVector& dimensionMappings, const armnn::TensorShape* overrideTensorShape, bool optional)
telsoa015307bc12018-03-09 13:51:08 +00002483{
2484 if (!IsOperandTypeSupportedForTensors(operand.type))
2485 {
2486 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
2487 return ConstTensorPin();
2488 }
2489
2490 if (operand.lifetime != OperandLifeTime::CONSTANT_COPY && operand.lifetime != OperandLifeTime::CONSTANT_REFERENCE)
2491 {
2492 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
2493 return ConstTensorPin();
2494 }
2495
2496 const void* const valueStart = GetOperandValueReadOnlyAddress(operand);
2497 if (!valueStart)
2498 {
telsoa01ce3e84a2018-08-31 09:31:35 +01002499 if (optional)
2500 {
2501 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
2502 return ConstTensorPin(true);
2503 }
2504 // mandatory tensor with no values
telsoa015307bc12018-03-09 13:51:08 +00002505 Fail("%s: failed to get operand address", __func__);
2506 return ConstTensorPin();
2507 }
2508
2509 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
2510 if (overrideTensorShape != nullptr)
2511 {
2512 tensorInfo.SetShape(*overrideTensorShape);
2513 }
2514 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
2515}
2516
kevmay01bc5f7842018-08-30 12:34:39 +01002517template<typename HalVersion>
2518bool ModelToINetworkConverter<HalVersion>::GetTensorInt32Values(const Operand& operand,
2519 std::vector<int32_t>& outValues) const
telsoa015307bc12018-03-09 13:51:08 +00002520{
2521 if (operand.type != OperandType::TENSOR_INT32)
2522 {
2523 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
2524 }
2525
2526 const void* startAddress = GetOperandValueReadOnlyAddress(operand);
2527 if (!startAddress)
2528 {
2529 return Fail("%s: failed to get operand address", __func__, operand.type);
2530 }
2531
2532 // Check number of bytes is sensible
2533 const uint32_t numBytes = operand.location.length;
2534 if (numBytes % sizeof(int32_t) != 0)
2535 {
2536 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
2537 __func__, numBytes, sizeof(int32_t));
2538 }
2539
2540 outValues.resize(numBytes / sizeof(int32_t));
2541 memcpy(outValues.data(), startAddress, numBytes);
2542 return true;
2543}
2544
2545// Creates an ArmNN activation layer and connects it to the given layer, if the
2546// passed in AndroidNN activation function requires so.
2547// @return The end layer of the sequence of layers built for the given AndroidNN
2548// activation function or nullptr if an error occurred (e.g. unsupported activation).
2549// Note that the end layer matches the input layer if no activation is required
2550// (the sequence of layers has length 1).
kevmay01bc5f7842018-08-30 12:34:39 +01002551template<typename HalVersion>
2552armnn::IConnectableLayer* ModelToINetworkConverter<HalVersion>::ProcessActivation(const armnn::TensorInfo& tensorInfo,
telsoa015307bc12018-03-09 13:51:08 +00002553 ActivationFn activation, armnn::IConnectableLayer* prevLayer)
2554{
2555 assert(prevLayer->GetNumOutputSlots() == 1);
2556
2557 prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2558
2559 armnn::IConnectableLayer* activationLayer = prevLayer;
2560
2561 if (activation != ActivationFn::kActivationNone)
2562 {
2563 armnn::ActivationDescriptor activationDesc;
2564 switch (activation)
2565 {
2566 case ActivationFn::kActivationRelu:
2567 {
2568 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
2569 break;
2570 }
2571 case ActivationFn::kActivationRelu1:
2572 {
2573 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
2574 activationDesc.m_A = 1.0f;
2575 activationDesc.m_B = -1.0f;
2576 break;
2577 }
2578 case ActivationFn::kActivationRelu6:
2579 {
2580 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
2581 activationDesc.m_A = 6.0f;
2582 break;
2583 }
2584 case ActivationFn::kActivationSigmoid:
2585 {
2586 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
2587 break;
2588 }
2589 case ActivationFn::kActivationTanh:
2590 {
2591 activationDesc.m_Function = armnn::ActivationFunction::TanH;
2592 activationDesc.m_A = 1.0f;
2593 activationDesc.m_B = 1.0f;
2594 break;
2595 }
2596 default:
2597 {
2598 Fail("%s: Invalid activation enum value %i", __func__, activation);
2599 return nullptr;
2600 }
2601 }
2602
2603 if (!IsLayerSupported(__func__, armnn::IsActivationSupported, m_Compute,
telsoa01ce3e84a2018-08-31 09:31:35 +01002604 prevLayer->GetOutputSlot(0).GetTensorInfo(), tensorInfo, activationDesc))
telsoa015307bc12018-03-09 13:51:08 +00002605 {
2606 return nullptr;
2607 }
2608
2609 activationLayer = m_Network->AddActivationLayer(activationDesc);
2610
2611 prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
2612 activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2613 }
2614
2615 return activationLayer;
2616}
2617
kevmay01bc5f7842018-08-30 12:34:39 +01002618template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002619template<typename HalOperation>
2620bool ModelToINetworkConverter<HalVersion>::SetupAndTrackLayerOutputSlot(const HalOperation& operation,
2621 uint32_t operationOutputIndex,
2622 armnn::IConnectableLayer& layer,
2623 uint32_t layerOutputIndex)
telsoa015307bc12018-03-09 13:51:08 +00002624{
telsoa01ce3e84a2018-08-31 09:31:35 +01002625 const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex);
telsoa015307bc12018-03-09 13:51:08 +00002626
telsoa01ce3e84a2018-08-31 09:31:35 +01002627 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
telsoa015307bc12018-03-09 13:51:08 +00002628 {
2629 return false;
2630 }
2631
telsoa01ce3e84a2018-08-31 09:31:35 +01002632 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
telsoa015307bc12018-03-09 13:51:08 +00002633
telsoa01ce3e84a2018-08-31 09:31:35 +01002634 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
telsoa015307bc12018-03-09 13:51:08 +00002635 m_OutputSlotForOperand[operandIndex] = &outputSlot;
2636
2637 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
2638
2639 return true;
2640}
2641
kevmay01bc5f7842018-08-30 12:34:39 +01002642template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002643template<typename HalOperation>
2644bool ModelToINetworkConverter<HalVersion>::SetupAndTrackLayerOutputSlot(const HalOperation& operation,
2645 uint32_t outputIndex,
2646 armnn::IConnectableLayer& layer)
telsoa01ce3e84a2018-08-31 09:31:35 +01002647{
2648 return SetupAndTrackLayerOutputSlot(operation, outputIndex, layer, outputIndex);
2649}
2650
kevmay01bc5f7842018-08-30 12:34:39 +01002651template<typename HalVersion>
2652bool ModelToINetworkConverter<HalVersion>::IsOperationSupported(uint32_t operationIndex) const
telsoa015307bc12018-03-09 13:51:08 +00002653{
2654 std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
2655 assert(it != m_OperationSupported.end());
2656 return it->second;
2657}
2658
kevmay01bc5f7842018-08-30 12:34:39 +01002659template class ModelToINetworkConverter<HalVersion_1_0>;
telsoa015307bc12018-03-09 13:51:08 +00002660
kevmay01bc5f7842018-08-30 12:34:39 +01002661#if defined(ARMNN_ANDROID_NN_V1_1)
2662template class ModelToINetworkConverter<HalVersion_1_1>;
2663#endif
2664
2665} // armnn_driver