blob: 70873b8fd2c5e207cd11a562139f1ab0d73e4150 [file] [log] [blame]
telsoa015307bc12018-03-09 13:51:08 +00001//
2// Copyright © 2017 Arm Ltd. All rights reserved.
David Beck93e48982018-09-05 13:05:09 +01003// SPDX-License-Identifier: MIT
telsoa015307bc12018-03-09 13:51:08 +00004//
5
6#define LOG_TAG "ArmnnDriver"
7
8#include "ModelToINetworkConverter.hpp"
telsoa01ce3e84a2018-08-31 09:31:35 +01009#include <OperationsUtils.h>
telsoa015307bc12018-03-09 13:51:08 +000010
11#include <armnn/LayerSupport.hpp>
12#include <Permute.hpp>
13
14#include <log/log.h>
15#include <cassert>
16
17#include <boost/format.hpp>
18#include <boost/core/ignore_unused.hpp>
19#include <boost/test/tools/floating_point_comparison.hpp>
20#include <boost/cast.hpp>
arovir013b0a2da2018-08-29 10:16:58 +010021#include <boost/optional.hpp>
telsoa015307bc12018-03-09 13:51:08 +000022
telsoa01ce3e84a2018-08-31 09:31:35 +010023using namespace android::hardware;
24
surmeh0149b9e102018-05-17 14:11:25 +010025namespace armnn_driver
26{
kevmay01bc5f7842018-08-30 12:34:39 +010027
surmeh0149b9e102018-05-17 14:11:25 +010028class LayerInputHandle
29{
30public:
31 LayerInputHandle()
32 : m_OutputSlot(nullptr)
33 , m_Valid(false)
34 {}
35
36 LayerInputHandle(bool valid, armnn::IOutputSlot* outputSlot, armnn::TensorInfo tensorInfo)
37 : m_OutputSlot(outputSlot)
38 , m_Valid(valid)
39 , m_TensorInfo(tensorInfo)
40 {}
41
42 bool IsValid() const { return m_Valid; }
43 void Connect(armnn::IInputSlot& inputSlot)
44 {
45 assert(IsValid());
46
47 if (m_OutputSlot)
48 {
49 m_OutputSlot->Connect(inputSlot);
50 }
51 }
52 const armnn::TensorInfo& GetTensorInfo() const { return m_TensorInfo; }
53
54private:
55 armnn::IOutputSlot* m_OutputSlot;
56 bool m_Valid;
57 armnn::TensorInfo m_TensorInfo;
58};
kevmay01bc5f7842018-08-30 12:34:39 +010059
60} // namespace armnn_driver
surmeh0149b9e102018-05-17 14:11:25 +010061
telsoa015307bc12018-03-09 13:51:08 +000062namespace
63{
kevmay01bc5f7842018-08-30 12:34:39 +010064
telsoa015307bc12018-03-09 13:51:08 +000065using namespace armnn_driver;
66using namespace android::nn;
67
68// Convenience function to log the reason for failing to convert a model.
69// @return Always returns false (so that it can be used by callers as a quick way to signal an error and return)
70template<class... Args>
71static bool Fail(const char* formatStr, Args&&... args)
72{
73 ALOGD(formatStr, std::forward<Args>(args)...);
74 return false;
75}
76
77// Convenience function to call an Is*Supported function and log caller name together with reason for lack of support.
78// Called as: IsLayerSupported(__func__, Is*Supported, a, b, c, d, e)
79template<typename IsLayerSupportedFunc, typename ... Args>
80bool IsLayerSupported(const char* funcName, IsLayerSupportedFunc f, Args&&... args)
81{
82 std::vector<char> unsupportedReason(1024+1);
83 bool isSupported = f(std::forward<Args>(args)..., unsupportedReason.data(), unsupportedReason.size()-1);
84 if(isSupported)
85 {
86 return true;
87 }
88 else
89 {
90 std::string sUnsupportedReason(unsupportedReason.data());
91 if (sUnsupportedReason.size() > 0)
92 {
93 ALOGD("%s: not supported by armnn: %s", funcName, sUnsupportedReason.c_str());
94 } else
95 {
96 ALOGD("%s: not supported by armnn", funcName);
97 }
98 return false;
99 }
100}
101
102armnn::TensorShape GetTensorShapeForOperand(const Operand& operand)
103{
104 return armnn::TensorShape(operand.dimensions.size(), operand.dimensions.data());
105}
106
107inline bool IsOperandTypeSupportedForTensors(OperandType type)
108{
109 return type == OperandType::TENSOR_FLOAT32 ||
110 type == OperandType::TENSOR_QUANT8_ASYMM ||
111 type == OperandType::TENSOR_INT32;
112}
113
telsoa01ce3e84a2018-08-31 09:31:35 +0100114void BroadcastTensor(LayerInputHandle& input0, LayerInputHandle& input1, armnn::IConnectableLayer* startLayer,
115 armnn::INetwork& network)
116{
117 BOOST_ASSERT(startLayer != nullptr);
118 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
119 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
120
121 if (inputTensorInfo0.GetNumDimensions() != inputTensorInfo1.GetNumDimensions())
122 {
123 // If the number of dimensions do not match then we need to add degenerate dimensions
124 // to the "smaller" tensor using a reshape:
125 // Small Big
126 // | |
127 // Reshape |
128 // \ /
129 // Add
130 bool input0IsBigger = inputTensorInfo0.GetNumDimensions() > inputTensorInfo1.GetNumDimensions();
131
132 LayerInputHandle& smallTensorHandle = input0IsBigger ? input1 : input0;
133 const armnn::TensorInfo& smallTensorDims = smallTensorHandle.GetTensorInfo();
134
135 LayerInputHandle& bigTensorHandle = input0IsBigger ? input0 : input1;
136 const armnn::TensorInfo& bigTensorDims = bigTensorHandle.GetTensorInfo();
137
138 const unsigned int bigTensorDimsNumber = bigTensorDims.GetNumDimensions();
139 std::vector<unsigned int> reshapedDims(bigTensorDimsNumber, 1);
140 unsigned int sizeDifference = bigTensorDimsNumber - smallTensorDims.GetNumDimensions();
141 for (unsigned i = sizeDifference; i < bigTensorDimsNumber; ++i)
142 {
143 reshapedDims[i] = smallTensorDims.GetShape()[i-sizeDifference];
144 }
145 armnn::TensorInfo reshapedInfo = smallTensorDims;
146 reshapedInfo.SetShape(armnn::TensorShape{ static_cast<unsigned int>(reshapedDims.size()),
147 reshapedDims.data() });
148
149 armnn::ReshapeDescriptor reshapeDesc;
150 reshapeDesc.m_TargetShape = reshapedInfo.GetShape();
151 armnn::IConnectableLayer* const reshapeLayer = network.AddReshapeLayer(reshapeDesc);
152 smallTensorHandle.Connect(reshapeLayer->GetInputSlot(0));
153 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
154
155 // Connect the outputs from new reshape and original input layer
156 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
157 bigTensorHandle.Connect(startLayer->GetInputSlot(1));
158 }
159 else
160 {
161 input0.Connect(startLayer->GetInputSlot(0));
162 input1.Connect(startLayer->GetInputSlot(1));
163 }
164}
165
telsoa015307bc12018-03-09 13:51:08 +0000166void CalcPadding(uint32_t input, uint32_t kernel, uint32_t stride, uint32_t& outPadHead, uint32_t& outPadTail,
167 android::nn::PaddingScheme scheme)
168{
169 int32_t padHead;
170 int32_t padTail;
171 calculateExplicitPadding(input, stride, kernel, scheme, &padHead, &padTail);
172 outPadHead = boost::numeric_cast<uint32_t>(padHead);
173 outPadTail = boost::numeric_cast<uint32_t>(padTail);
174}
175
telsoa015307bc12018-03-09 13:51:08 +0000176Shape GetOperandShape(const Operand& operand)
177{
178 Shape shape;
179 shape.type = operand.type;
180 shape.dimensions = operand.dimensions;
181 shape.scale = operand.scale;
182 shape.offset = operand.zeroPoint;
183 return shape;
184}
185
186// ArmNN requires the bias scale to be equal to the product of the weight and input scales, which is also
187// what AndroidNN requires. However for some of the AndroidNN tests the values don't exactly match so
188// we accept some tolerance. We don't want to ArmNN itself to accept these inconsistencies as it is up to the user
189// (us, in this case) to ensure they match.
190void SanitizeBiasQuantizationScale(armnn::TensorInfo& biasInfo,
191 const armnn::TensorInfo& weightInfo, const armnn::TensorInfo& inputInfo)
192{
193 const float expectedBiasScale = weightInfo.GetQuantizationScale() * inputInfo.GetQuantizationScale();
194 if (biasInfo.GetQuantizationScale() != expectedBiasScale)
195 {
196 boost::math::fpc::close_at_tolerance<float> comparer(boost::math::fpc::percent_tolerance(1.0f));
197 if (comparer(biasInfo.GetQuantizationScale(), expectedBiasScale))
198 {
199 ALOGW("Bias quantization scale has been modified to match input*weights");
200 biasInfo.SetQuantizationScale(expectedBiasScale);
201 }
202 }
203}
204
telsoa01ce3e84a2018-08-31 09:31:35 +0100205// 4D Tensor Permutations
206const armnn::PermutationVector IdentityPermutation4D({ 0U, 1U, 2U, 3U });
telsoa015307bc12018-03-09 13:51:08 +0000207const armnn::PermutationVector NHWCToArmNN({ 0U, 2U, 3U, 1U });
surmeh0149b9e102018-05-17 14:11:25 +0100208const armnn::PermutationVector ArmNNToNHWC({ 0U, 3U, 1U, 2U });
209const armnn::PermutationVector SwapDim1And2({ 0U, 2U, 1U, 3U });
telsoa015307bc12018-03-09 13:51:08 +0000210
telsoa01ce3e84a2018-08-31 09:31:35 +0100211// 3D Permutation Vectors
212const armnn::PermutationVector IdentityPermutation3D({ 0U, 1U, 2U });
213const armnn::PermutationVector RotateTensorLeft({ 2U, 0U, 1U });
214const armnn::PermutationVector RotateTensorRight({ 1U, 2U, 0U });
215
kevmay01bc5f7842018-08-30 12:34:39 +0100216template<typename OSlot>
telsoa015307bc12018-03-09 13:51:08 +0000217armnn::IConnectableLayer& AddPermuteLayer(armnn::INetwork& network, OSlot& input,
218 const armnn::PermutationVector& mappings)
219{
220 // Add swizzle layer
221 armnn::IConnectableLayer* const layer = network.AddPermuteLayer(mappings);
222
223 assert(layer != nullptr);
224
telsoa01ce3e84a2018-08-31 09:31:35 +0100225 // Connect input to swizzle layer
telsoa015307bc12018-03-09 13:51:08 +0000226 input.Connect(layer->GetInputSlot(0));
227
228 // Setup swizzled output
229 const armnn::TensorInfo outInfo = armnnUtils::Permuted(input.GetTensorInfo(), mappings);
230 layer->GetOutputSlot(0).SetTensorInfo(outInfo);
231
232 return *layer;
233}
234
telsoa01ce3e84a2018-08-31 09:31:35 +0100235void SwizzleIn(armnn::INetwork& network, LayerInputHandle& input, armnn::IConnectableLayer& layer, unsigned int index)
telsoa015307bc12018-03-09 13:51:08 +0000236{
telsoa015307bc12018-03-09 13:51:08 +0000237 // Add swizzle layer
238 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, input, NHWCToArmNN);
telsoa015307bc12018-03-09 13:51:08 +0000239 // Connect swizzled input to layer
telsoa01ce3e84a2018-08-31 09:31:35 +0100240 swizzleLayer.GetOutputSlot(0).Connect(layer.GetInputSlot(index));
241}
telsoa015307bc12018-03-09 13:51:08 +0000242
telsoa01ce3e84a2018-08-31 09:31:35 +0100243armnn::IConnectableLayer& DeswizzleOut(armnn::INetwork& network, armnn::IConnectableLayer& layer, unsigned int index)
244{
telsoa015307bc12018-03-09 13:51:08 +0000245 // Add deswizzle layer
telsoa01ce3e84a2018-08-31 09:31:35 +0100246 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(network, layer.GetOutputSlot(index), ArmNNToNHWC);
telsoa015307bc12018-03-09 13:51:08 +0000247 return deswizzleLayer;
248}
249
telsoa01ce3e84a2018-08-31 09:31:35 +0100250// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
251armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network,
252 LayerInputHandle& input,
253 armnn::IConnectableLayer& firstLayer,
254 armnn::IConnectableLayer& lastLayer)
255{
256 SwizzleIn(network, input, firstLayer, 0);
257 return DeswizzleOut(network, lastLayer, 0);
258}
259
260// only suitable for input/output slot index 0, for other slots, use SwizzleIn and DeswizzleOut directly
telsoa015307bc12018-03-09 13:51:08 +0000261armnn::IConnectableLayer& SwizzleInDeswizzleOut(armnn::INetwork& network, LayerInputHandle& input,
262 armnn::IConnectableLayer& layer)
263{
264 return SwizzleInDeswizzleOut(network, input, layer, layer);
265}
surmeh0149b9e102018-05-17 14:11:25 +0100266
267bool ValidateConcatOutputShape(const std::vector<armnn::TensorShape> & inputShapes,
268 const armnn::TensorShape & outputShape,
269 uint32_t concatDim)
270{
271 // Validate the output shape is correct given the input shapes (which have just been validated)
272 unsigned int numDimensions = inputShapes[0].GetNumDimensions();
273 if (outputShape.GetNumDimensions() != numDimensions)
274 {
275 return Fail("%s: Output shape has wrong number of dimensions", __func__);
276 }
277
278 unsigned int outputSizeAlongConcatenatedDimension = 0;
279 for (unsigned int i = 0; i < inputShapes.size(); i++)
280 {
281 outputSizeAlongConcatenatedDimension += inputShapes[i][concatDim];
282 }
283
284 for (unsigned int i = 0; i < numDimensions; ++i)
285 {
286 if (i == concatDim)
287 {
288 if (outputShape[i] != outputSizeAlongConcatenatedDimension)
289 {
290 return Fail(
291 "%s: Invalid output shape for dimension %d (%d != %d)",
292 __func__,
293 i,
294 outputShape[i],
295 outputSizeAlongConcatenatedDimension);
296 }
297 }
298 else
299 {
300 if (outputShape[i] != inputShapes[0][i])
301 {
302 return Fail("%s: Invalid output shape", __func__);
303 }
304 }
305 }
306
307 return true;
308}
309
telsoa01ce3e84a2018-08-31 09:31:35 +0100310bool RequiresReshape(armnn::TensorShape & inputShape)
311{
312 return inputShape.GetNumDimensions() < 3;
313}
314
kevmay01bc5f7842018-08-30 12:34:39 +0100315template<typename OSlot>
telsoa01ce3e84a2018-08-31 09:31:35 +0100316armnn::IConnectableLayer& AddReshapeLayer(armnn::INetwork& network, OSlot& inputLayer,
317 armnn::TensorInfo reshapeInfo)
318{
319 armnn::ReshapeDescriptor reshapeDescriptor;
320 reshapeDescriptor.m_TargetShape = reshapeInfo.GetShape();
321
322 armnn::IConnectableLayer* reshapeLayer = network.AddReshapeLayer(reshapeDescriptor);
323 assert(reshapeLayer != nullptr);
324
325 // Attach the input layer to the reshape layer
326 inputLayer.Connect(reshapeLayer->GetInputSlot(0));
327 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapeInfo);
328
329 return *reshapeLayer;
330}
331
surmeh0149b9e102018-05-17 14:11:25 +0100332void SwizzleInputs(armnn::INetwork& network,
333 std::vector<LayerInputHandle>& inputs,
334 std::vector<armnn::TensorShape>& inputShapes,
335 const armnn::PermutationVector& mapping)
336{
telsoa01ce3e84a2018-08-31 09:31:35 +0100337 if (!mapping.IsEqual(IdentityPermutation4D))
surmeh0149b9e102018-05-17 14:11:25 +0100338 {
339 size_t nInputs = inputs.size();
340 for (size_t i=0; i<nInputs; ++i)
341 {
342 // add swizzle layer
343 armnn::IConnectableLayer& swizzleLayer = AddPermuteLayer(network, inputs[i], mapping);
344 auto& outputSlot = swizzleLayer.GetOutputSlot(0);
345 auto& outputInfo = outputSlot.GetTensorInfo();
346 // replace inputs with the swizzled ones
347 inputs[i] = LayerInputHandle(true, &outputSlot, outputInfo);
348 inputShapes[i] = inputs[i].GetTensorInfo().GetShape();
349 }
350 }
351}
352
telsoa01ce3e84a2018-08-31 09:31:35 +0100353void CreatePermutationParameters(const unsigned int numberOfDimensions,
354 int32_t & concatDimension,
355 std::pair<armnn::PermutationVector, armnn::PermutationVector> & permutationPair)
356{
357 assert(numberOfDimensions >= 3);
358
359 // ArmNN uses Compute Library subtensors to perform concatenation
360 // This only works when concatenating along dimension 0 or 1 for a 4-D tensor,
361 // or along dimension 0 for a 3-D tensor.
362 if (numberOfDimensions == 4)
363 {
364 if (concatDimension == 3)
365 {
366 concatDimension = 1;
367 permutationPair = std::make_pair(NHWCToArmNN, ArmNNToNHWC);
368 }
369 else if (concatDimension == 2)
370 {
371 concatDimension = 1;
372 permutationPair = std::make_pair(SwapDim1And2, SwapDim1And2);
373 }
374 else
375 {
376 permutationPair = std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
377 }
378
379 }
380 else if (numberOfDimensions == 3)
381 {
382 if (concatDimension == 2)
383 {
384 concatDimension = 0;
385 permutationPair = std::make_pair(RotateTensorRight, RotateTensorLeft);
386 }
387 else if (concatDimension == 1)
388 {
389 concatDimension = 0;
390 permutationPair = std::make_pair(RotateTensorLeft, RotateTensorRight);
391 }
392 else
393 {
394 permutationPair = std::make_pair(IdentityPermutation3D, IdentityPermutation3D);
395 }
396 }
397}
398
kevmay01bc5f7842018-08-30 12:34:39 +0100399} // anonymous namespace
telsoa015307bc12018-03-09 13:51:08 +0000400
401namespace armnn_driver
402{
403
404class ConstTensorPin
405{
406public:
407 // Creates an invalid tensor pin (can be used to signal errors)
telsoa01ce3e84a2018-08-31 09:31:35 +0100408 // The optional flag can be set to indicate the tensor values were missing, but it was otherwise valid
409 ConstTensorPin(bool optional = false) : m_Optional(optional) {}
telsoa015307bc12018-03-09 13:51:08 +0000410
411 // @param tensorInfo TensorInfo associated with the tensor.
412 // @param valueStart Start address of tensor data. Belongs to one of the memory pools associated with
413 // the model being converted.
414 // @param numBytes Number of bytes for the tensor data.
415 ConstTensorPin(const armnn::TensorInfo& tensorInfo, const void* valueStart, uint32_t numBytes,
416 const armnn::PermutationVector& mappings)
417 {
418 boost::ignore_unused(numBytes);
419 assert(tensorInfo.GetNumBytes() == numBytes);
420
421 const bool needsSwizzling = (mappings.GetSize() > 0);
422 if (needsSwizzling)
423 {
424 m_SwizzledTensorData.resize(tensorInfo.GetNumBytes());
425 SwizzleAndroidNn4dTensorToArmNn(tensorInfo, valueStart, m_SwizzledTensorData.data(), mappings);
426
427 m_ConstTensor = armnn::ConstTensor(armnnUtils::Permuted(tensorInfo, mappings), m_SwizzledTensorData.data());
428 }
429 else
430 {
431 m_ConstTensor = armnn::ConstTensor(tensorInfo, valueStart);
432 }
433 }
434
435 ConstTensorPin(const ConstTensorPin& other) = delete;
436 ConstTensorPin(ConstTensorPin&& other) = default;
437
438 bool IsValid() const { return m_ConstTensor.GetMemoryArea() != nullptr; }
telsoa01ce3e84a2018-08-31 09:31:35 +0100439 bool IsOptional() const { return m_Optional; }
telsoa015307bc12018-03-09 13:51:08 +0000440 const armnn::ConstTensor& GetConstTensor() const { return m_ConstTensor; }
telsoa01ce3e84a2018-08-31 09:31:35 +0100441 const armnn::ConstTensor* GetConstTensorPtr() const
442 {
443 if (IsValid() && m_ConstTensor.GetNumElements() > 0)
444 {
445 return &m_ConstTensor;
446 }
447 // tensor is either invalid, or has no elements (indicating an optional tensor that was not provided)
448 return nullptr;
449 }
telsoa015307bc12018-03-09 13:51:08 +0000450
451private:
452 armnn::ConstTensor m_ConstTensor;
453 // Owned memory for swizzled tensor data, only required if the tensor needed
454 // swizzling. Otherwise, @ref m_ConstTensor will reference memory from one of
455 // the pools associated with the model being converted.
456 std::vector<uint8_t> m_SwizzledTensorData;
telsoa01ce3e84a2018-08-31 09:31:35 +0100457 // optional flag to indicate that an invalid tensor pin is not an error, but the optional values were not given
458 bool m_Optional;
telsoa015307bc12018-03-09 13:51:08 +0000459};
460
kevmay01bc5f7842018-08-30 12:34:39 +0100461template<typename HalVersion>
462ModelToINetworkConverter<HalVersion>::ModelToINetworkConverter(armnn::Compute compute,
463 const HalModel& model,
telsoa015307bc12018-03-09 13:51:08 +0000464 const std::set<unsigned int>& forcedUnsupportedOperations)
465 : m_Compute(compute)
466 , m_Model(model)
467 , m_ForcedUnsupportedOperations(forcedUnsupportedOperations)
468 , m_Network(nullptr, nullptr)
469 , m_ConversionResult(ConversionResult::Success)
470{
471 try
472 {
473 Convert();
474 }
475 catch (armnn::Exception& e)
476 {
477 m_ConversionResult = ConversionResult::UnsupportedFeature;
478 ALOGE("%s: Unexpected exception: %s", __func__, e.what());
479 assert(false);
480 }
481}
482
kevmay01bc5f7842018-08-30 12:34:39 +0100483template<typename HalVersion>
484void ModelToINetworkConverter<HalVersion>::Convert()
telsoa015307bc12018-03-09 13:51:08 +0000485{
arovir01a15dc112018-09-03 17:12:56 +0100486 using HalModel = typename HalVersion::Model;
Matteo Martincighe48bdff2018-09-03 13:50:50 +0100487
arovir01a15dc112018-09-03 17:12:56 +0100488 ALOGV("ModelToINetworkConverter::Convert(): %s", GetModelSummary<HalModel>(m_Model).c_str());
telsoa015307bc12018-03-09 13:51:08 +0000489
490 // map the memory pool into shared pointers
491 m_MemPools.clear();
492 if (!setRunTimePoolInfosFromHidlMemories(&m_MemPools, m_Model.pools))
493 {
494 Fail("%s: Setting of run time pool infos from Hidl Memories has failed.", __func__);
495 m_ConversionResult = ConversionResult::ErrorMappingPools;
496 return;
497 }
498
499 uint32_t totalPoolSize = 0;
500 for (auto&& pool : m_Model.pools)
501 {
502 totalPoolSize += pool.size();
503 }
504
505 // Create armnn::INetwork
506 m_Network = armnn::INetwork::Create();
507
508 // add operations to it
509 // track which layer outputs each operand
510 m_OutputSlotForOperand = std::vector<armnn::IOutputSlot*>(m_Model.operands.size(), nullptr);
511
512 try
513 {
514 for (uint32_t i = 0; i < m_Model.inputIndexes.size(); i++)
515 {
516 // inputs in android nn are represented by operands
517 uint32_t inputIndex = m_Model.inputIndexes[i];
518 const Operand& operand = m_Model.operands[inputIndex];
519 const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
520 armnn::IConnectableLayer* layer = m_Network->AddInputLayer(i);
521
522 armnn::IOutputSlot& outputSlot = layer->GetOutputSlot(0);
523 outputSlot.SetTensorInfo(GetTensorInfoForOperand(operand));
524
525 // store for later layers
526 m_OutputSlotForOperand[inputIndex] = &outputSlot;
527 }
528 }
529 catch (UnsupportedOperand& e)
530 {
531 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
532 m_ConversionResult = ConversionResult::UnsupportedFeature;
533 }
534 catch (const armnn::InvalidArgumentException& e)
535 {
536 Fail("%s: Failed to convert input operand to TensorShape: %s", __func__, e.what());
537 m_ConversionResult = ConversionResult::UnsupportedFeature;
538 }
539
540 for (uint32_t operationIdx = 0; operationIdx < m_Model.operations.size(); operationIdx++)
541 {
542 const auto& operation = m_Model.operations[operationIdx];
543
544 bool ok = true;
545 if (m_ForcedUnsupportedOperations.find(operationIdx) != m_ForcedUnsupportedOperations.end())
546 {
547 Fail("%s: Operation at index %i has been forced to be unsupported.", __func__, operationIdx);
548 ok = false;
549 }
550
551 if (ok)
552 {
553 try
554 {
555 ok = ConvertOperation(operation);
556 }
557 catch (UnsupportedOperand& e)
558 {
559 Fail("%s: Operand type %s not supported in ArmnnDriver", __func__, toString(e.m_type).c_str());
560 ok = false;
561 }
562 catch (const armnn::InvalidArgumentException& e)
563 {
564 Fail("%s: Failed to convert operation in %s", __func__, e.what());
565 ok = false;
566 }
567 }
568
569 // Store whether this operation was successfully converted.
570 m_OperationSupported.emplace(operationIdx, ok);
571
572 // Any single operation failing will fail the entire conversion.
573 // We still need to continue and check the other ones.
574 if (!ok)
575 {
576 m_ConversionResult = ConversionResult::UnsupportedFeature;
577 }
578 }
579 try
580 {
581 if (m_ConversionResult == ConversionResult::Success)
582 {
583 for (uint32_t i = 0; i < m_Model.outputIndexes.size(); i++)
584 {
585 // outputs in android nn are represented by operands
586 uint32_t outputIndex = m_Model.outputIndexes[i];
587 const Operand& operand = m_Model.operands[outputIndex];
588 const armnn::TensorInfo& tensor = GetTensorInfoForOperand(operand);
589 armnn::IConnectableLayer* layer = m_Network->AddOutputLayer(i);
590
591 assert(m_OutputSlotForOperand[outputIndex]);
592 m_OutputSlotForOperand[outputIndex]->Connect(layer->GetInputSlot(0));
593 }
594 }
595 }
596 catch (const armnn::InvalidArgumentException& e)
597 {
598 Fail("%s: Failed to convert output operand to TensorShape: %s", __func__, e.what());
599 m_ConversionResult = ConversionResult::UnsupportedFeature;
600 }
601}
602
kevmay01bc5f7842018-08-30 12:34:39 +0100603template<typename HalVersion>
604bool ModelToINetworkConverter<HalVersion>::ConvertOperation(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +0000605{
606 switch (operation.type)
607 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100608 case neuralnetworks::V1_0::OperationType::ADD:
609 return ConvertAdd(operation);
610 case neuralnetworks::V1_0::OperationType::AVERAGE_POOL_2D:
611 return ConvertAveragePool2d(operation);
612 case neuralnetworks::V1_0::OperationType::CONCATENATION:
613 return ConvertConcatenation(operation);
614 case neuralnetworks::V1_0::OperationType::CONV_2D:
615 return ConvertConv2d(operation);
616 case neuralnetworks::V1_0::OperationType::DEPTHWISE_CONV_2D:
617 return ConvertDepthwiseConv2d(operation);
618 case neuralnetworks::V1_0::OperationType::FLOOR:
619 return ConvertFloor(operation);
620 case neuralnetworks::V1_0::OperationType::FULLY_CONNECTED:
621 return ConvertFullyConnected(operation);
622 case neuralnetworks::V1_0::OperationType::LOCAL_RESPONSE_NORMALIZATION:
623 return ConvertLocalResponseNormalization(operation);
624 case neuralnetworks::V1_0::OperationType::LOGISTIC:
625 return ConvertLogistic(operation);
626 case neuralnetworks::V1_0::OperationType::LSTM:
627 return ConvertLstm(operation);
628 case neuralnetworks::V1_0::OperationType::L2_NORMALIZATION:
629 return ConvertL2Normalization(operation);
630 case neuralnetworks::V1_0::OperationType::L2_POOL_2D:
631 return ConvertL2Pool2d(operation);
632 case neuralnetworks::V1_0::OperationType::MAX_POOL_2D:
633 return ConvertMaxPool2d(operation);
634 case neuralnetworks::V1_0::OperationType::MUL:
635 return ConvertMul(operation);
636 case neuralnetworks::V1_0::OperationType::RELU:
637 return ConvertReLu(operation);
638 case neuralnetworks::V1_0::OperationType::RELU1:
639 return ConvertReLu1(operation);
640 case neuralnetworks::V1_0::OperationType::RELU6:
641 return ConvertReLu6(operation);
642 case neuralnetworks::V1_0::OperationType::SOFTMAX:
643 return ConvertSoftmax(operation);
644 case neuralnetworks::V1_0::OperationType::TANH:
645 return ConvertTanH(operation);
646 case neuralnetworks::V1_0::OperationType::RESHAPE:
647 return ConvertReshape(operation);
648 case neuralnetworks::V1_0::OperationType::RESIZE_BILINEAR:
649 return ConvertResizeBilinear(operation);
650 default:
651 return Fail("%s: Operation type %s not supported in ArmnnDriver",
652 __func__, toString(operation.type).c_str());
telsoa015307bc12018-03-09 13:51:08 +0000653 }
654}
655
Matteo Martincigh79250ab2018-09-04 16:28:10 +0100656#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
kevmay01bc5f7842018-08-30 12:34:39 +0100657template<typename HalVersion>
658bool ModelToINetworkConverter<HalVersion>::ConvertOperation(const neuralnetworks::V1_1::Operation& operation)
659{
660 if (compliantWithV1_0(operation))
661 {
662 neuralnetworks::V1_0::Operation v1Operation = convertToV1_0(operation);
663 return ConvertOperation(v1Operation);
664 }
665 else
666 {
667 switch (operation.type)
668 {
arovir01a15dc112018-09-03 17:12:56 +0100669 case neuralnetworks::V1_1::OperationType::DIV:
670 return ConvertDiv(operation);
kevmay01bc5f7842018-08-30 12:34:39 +0100671 default:
arovir01a15dc112018-09-03 17:12:56 +0100672 return Fail("%s: Operation type %s not supported in ArmnnDriver",
673 __func__, toString(operation.type).c_str());
kevmay01bc5f7842018-08-30 12:34:39 +0100674 }
675 }
676}
arovir01a15dc112018-09-03 17:12:56 +0100677
678template<typename HalVersion>
679bool ModelToINetworkConverter<HalVersion>::ConvertDiv(const neuralnetworks::V1_1::Operation& operation)
680{
681 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
682 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
683
684 if (!input0.IsValid() || !input1.IsValid())
685 {
686 return Fail("%s: Operation has invalid inputs", __func__);
687 }
688
689 // The FuseActivation parameter is always the input index 2
690 // and it should be optional
691 ActivationFn activationFunction;
692 if (!GetOptionalInputActivation(operation, 2, activationFunction))
693 {
694 return Fail("%s: Operation has invalid inputs", __func__);
695 }
696
697 const Operand* outputOperand = GetOutputOperand(operation, 0);
698 if (!outputOperand)
699 {
700 return false;
701 }
702
703 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
704
705 if (!IsLayerSupported(__func__,
706 armnn::IsDivisionSupported,
707 m_Compute,
708 input0.GetTensorInfo(),
709 input1.GetTensorInfo(),
710 outInfo))
711 {
712 return false;
713 }
714
715 armnn::IConnectableLayer* const startLayer = m_Network->AddDivisionLayer();
716 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
717
718 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
719 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
720
721 if (endLayer)
722 {
723 BroadcastTensor(input0, input1, startLayer, *m_Network);
724 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
725 }
726
727 return Fail("%s: ProcessActivation failed", __func__);
728}
kevmay01bc5f7842018-08-30 12:34:39 +0100729#endif
730
731template<typename HalVersion>
732bool ModelToINetworkConverter<HalVersion>::ConvertAdd(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +0000733{
734 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
735 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
736
737 if (!input0.IsValid() || !input1.IsValid())
738 {
739 return Fail("%s: Operation has invalid inputs", __func__);
740 }
741
telsoa01ce3e84a2018-08-31 09:31:35 +0100742 // The FuseActivation parameter is always the input index 2
743 // and it should be optional
telsoa015307bc12018-03-09 13:51:08 +0000744 ActivationFn activationFunction;
telsoa01ce3e84a2018-08-31 09:31:35 +0100745 if (!GetOptionalInputActivation(operation, 2, activationFunction))
telsoa015307bc12018-03-09 13:51:08 +0000746 {
747 return Fail("%s: Operation has invalid inputs", __func__);
748 }
749
750 const Operand* outputOperand = GetOutputOperand(operation, 0);
751 if (!outputOperand)
752 {
753 return false;
754 }
755
756 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
757
758 if (!IsLayerSupported(__func__,
759 armnn::IsAdditionSupported,
760 m_Compute,
761 input0.GetTensorInfo(),
762 input1.GetTensorInfo(),
763 outInfo))
764 {
765 return false;
766 }
767
768 armnn::IConnectableLayer* const startLayer = m_Network->AddAdditionLayer();
769 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
770
771 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
772 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
773
774 if (endLayer != nullptr)
775 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100776 BroadcastTensor(input0, input1, startLayer, *m_Network);
telsoa015307bc12018-03-09 13:51:08 +0000777 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
778 }
779 else
780 {
781 return Fail("%s: ProcessActivation failed", __func__);
782 }
783}
784
kevmay01bc5f7842018-08-30 12:34:39 +0100785template<typename HalVersion>
786bool ModelToINetworkConverter<HalVersion>::ConvertAveragePool2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +0000787{
788 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Average);
789}
790
kevmay01bc5f7842018-08-30 12:34:39 +0100791template<typename HalVersion>
792bool ModelToINetworkConverter<HalVersion>::ConvertConcatenation(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +0000793{
794 // The first N (0..N-1) inputs are tensors. The Nth input is the concatenation axis.
795 if (operation.inputs.size() <= 1)
796 {
797 return Fail("%s: Operation has insufficient arguments", __func__);
798 }
799
800 // Get inputs and outputs
801 const std::size_t numInputTensors = operation.inputs.size() - 1;
802
surmeh0149b9e102018-05-17 14:11:25 +0100803 int32_t concatDim;
804 if (!GetInputScalar(operation, numInputTensors, OperandType::INT32, concatDim))
805 {
806 return Fail("%s: Operation has invalid inputs", __func__);
807 }
808
809 const Operand* const outputOperand = GetOutputOperand(operation, 0);
810 if (!outputOperand)
811 {
812 return Fail("%s: Operation has no outputs", __func__);
813 }
814
telsoa01ce3e84a2018-08-31 09:31:35 +0100815
surmeh0149b9e102018-05-17 14:11:25 +0100816 armnn::TensorInfo outputInfo = GetTensorInfoForOperand(*outputOperand);
817 armnn::TensorShape outputShape = outputInfo.GetShape();
818
819 //
820 // handle negative concat dims along the lines of tensorflow as described here:
821 // https://www.tensorflow.org/api_docs/python/tf/concat
822 // "negative axis refers to axis + rank(values)-th dimension"
823 //
824 if (concatDim < 0)
825 {
826 concatDim += outputShape.GetNumDimensions();
827 }
828
829 if (concatDim >= static_cast<int32_t>(outputShape.GetNumDimensions()) || concatDim < 0)
830 {
831 return Fail("%s: Operation has invalid concat axis: %d", __func__, concatDim);
832 }
833
telsoa015307bc12018-03-09 13:51:08 +0000834 std::vector<LayerInputHandle> inputHandles;
835 std::vector<armnn::TensorShape> inputShapes;
836
837 inputHandles.reserve(numInputTensors);
838 inputShapes.reserve(numInputTensors);
839
telsoa01ce3e84a2018-08-31 09:31:35 +0100840 bool inputsHaveBeenReshaped = false;
841 unsigned int tensorDimensionsAdded = 0;
842
telsoa015307bc12018-03-09 13:51:08 +0000843 for (uint32_t i = 0; i < numInputTensors; ++i)
844 {
845 const Operand* const operand = GetInputOperand(operation, i);
846 if (!operand)
847 {
848 return Fail("%s: Operation has invalid inputs", __func__);
849 }
850
telsoa01ce3e84a2018-08-31 09:31:35 +0100851 armnn::TensorShape operandShape = GetTensorShapeForOperand(*operand);
852 LayerInputHandle operandInputHandle = ConvertToLayerInputHandle(operation, i);
surmeh0149b9e102018-05-17 14:11:25 +0100853
telsoa01ce3e84a2018-08-31 09:31:35 +0100854 if (operandShape.GetNumDimensions() == 0)
855 {
856 return Fail("%s: Operands with rank 0 are not supported", __func__);
857 }
858
859 if (RequiresReshape(operandShape))
860 {
861 inputsHaveBeenReshaped = true;
862
863 armnn::TensorInfo reshapeInfo = operandInputHandle.GetTensorInfo();
864
865 // Expand the tensor to three dimensions
866 if (operandShape.GetNumDimensions() == 2)
867 {
868 reshapeInfo.SetShape(armnn::TensorShape({1, operandShape[0], operandShape[1]}));
869 tensorDimensionsAdded = 1;
870 }
871 else
872 {
873 reshapeInfo.SetShape(armnn::TensorShape({1, 1, operandShape[0]}));
874 tensorDimensionsAdded = 2;
875 }
876
877 armnn::IConnectableLayer& newReshape = AddReshapeLayer(
878 *m_Network,
879 operandInputHandle,
880 reshapeInfo
881 );
882
883 // Point to the reshape operation rather then the input operation
884 operandShape = reshapeInfo.GetShape();
885 operandInputHandle = LayerInputHandle(true, &newReshape.GetOutputSlot(0), reshapeInfo);
886 }
887
888 inputShapes.emplace_back(operandShape);
889 inputHandles.emplace_back(operandInputHandle);
surmeh0149b9e102018-05-17 14:11:25 +0100890
telsoa015307bc12018-03-09 13:51:08 +0000891 if (!inputHandles.back().IsValid())
892 {
893 return Fail("%s: Operation has invalid inputs", __func__);
894 }
895 }
896
897 assert(inputShapes.size() == inputHandles.size());
898
telsoa01ce3e84a2018-08-31 09:31:35 +0100899 if (inputsHaveBeenReshaped)
900 {
901 // Adjust the concatenation dimension by the amount of dimensions added (if any)
902 concatDim += tensorDimensionsAdded;
903
904 // Add extra dimensions to the output shape to reflect the addition of the reshape layers
905 if (tensorDimensionsAdded == 1)
906 {
907 outputShape = armnn::TensorShape({1, outputShape[0], outputShape[1]});
908 }
909 else if (tensorDimensionsAdded == 2)
910 {
911 outputShape = armnn::TensorShape({1, 1, outputShape[0], outputShape[1]});
912 }
913 }
914
915 // Get the pair of permutations required for the concatenation
916 std::pair<armnn::PermutationVector, armnn::PermutationVector> permutationPair =
917 std::make_pair(IdentityPermutation4D, IdentityPermutation4D);
918
919 CreatePermutationParameters(inputShapes[0].GetNumDimensions(), concatDim, permutationPair);
920
921 outputShape = armnnUtils::Permuted(outputShape, permutationPair.first);
922 outputInfo.SetShape(outputShape);
923
surmeh0149b9e102018-05-17 14:11:25 +0100924 // this is no-op for identity swizzles, otherwise it replaces both
925 // the handles and shapes with the swizzled layer output handles and shapes
telsoa01ce3e84a2018-08-31 09:31:35 +0100926 SwizzleInputs(*m_Network, inputHandles, inputShapes, permutationPair.first);
telsoa015307bc12018-03-09 13:51:08 +0000927
928 // Create an armnn merger layer descriptor - this will also perform validation on the input shapes
929 armnn::OriginsDescriptor mergerDescriptor;
930 try
931 {
surmeh0149b9e102018-05-17 14:11:25 +0100932 // The merger descriptor is always created across the only supported concat
933 // dimension, which is 0 or 1
934 mergerDescriptor =
935 armnn::CreateMergerDescriptorForConcatenation(
936 inputShapes.begin(), inputShapes.end(), concatDim);
telsoa015307bc12018-03-09 13:51:08 +0000937 }
938 catch (const armnn::Exception& error)
939 {
940 return Fail("%s: Error preparing merger descriptor. %s", __func__, error.what());
941 }
942
surmeh0149b9e102018-05-17 14:11:25 +0100943 // Validate the output shape is correct given the input shapes based on the
944 // only valid concat dimension which is 0 or 1
945 if (!ValidateConcatOutputShape(inputShapes, outputShape, concatDim))
telsoa015307bc12018-03-09 13:51:08 +0000946 {
surmeh0149b9e102018-05-17 14:11:25 +0100947 return Fail("%s: Error validating the output shape for concat", __func__);
telsoa015307bc12018-03-09 13:51:08 +0000948 }
949
950 std::vector<const armnn::TensorInfo*> inputTensorInfos;
951 std::transform(inputHandles.begin(), inputHandles.end(), std::back_inserter(inputTensorInfos),
952 [](const LayerInputHandle& h) -> const armnn::TensorInfo*{ return &h.GetTensorInfo(); });
953 if (!IsLayerSupported(__func__,
954 armnn::IsMergerSupported,
955 m_Compute,
956 inputTensorInfos,
957 mergerDescriptor))
958 {
959 return false;
960 }
961
962 armnn::IConnectableLayer* layer = m_Network->AddMergerLayer(mergerDescriptor);
963 assert(layer != nullptr);
surmeh0149b9e102018-05-17 14:11:25 +0100964 layer->GetOutputSlot(0).SetTensorInfo(outputInfo);
telsoa015307bc12018-03-09 13:51:08 +0000965
966 // Connect inputs to the layer
967 const int numInputSlots = layer->GetNumInputSlots();
968 assert(static_cast<std::size_t>(numInputSlots) == inputHandles.size());
969 for (int i = 0; i < numInputSlots; ++i)
970 {
surmeh0149b9e102018-05-17 14:11:25 +0100971 // connect the input directly to the merge (concat) layer
telsoa015307bc12018-03-09 13:51:08 +0000972 inputHandles[static_cast<unsigned int>(i)].Connect(layer->GetInputSlot(i));
973 }
974
telsoa01ce3e84a2018-08-31 09:31:35 +0100975 // Add permutation layer and connect the output to it, the permutation becomes the output layer
976 armnn::IConnectableLayer& deswizzleLayer = AddPermuteLayer(*m_Network,
977 layer->GetOutputSlot(0),
978 permutationPair.second);
979 layer = &deswizzleLayer;
980
981 if (inputsHaveBeenReshaped)
surmeh0149b9e102018-05-17 14:11:25 +0100982 {
telsoa01ce3e84a2018-08-31 09:31:35 +0100983 armnn::TensorInfo afterConcatInfo = layer->GetOutputSlot(0).GetTensorInfo();
984
985 // Undo the reshape knowing the amount of dimensions added
986 if (tensorDimensionsAdded == 1)
987 {
988 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[1],
989 afterConcatInfo.GetShape()[2] }));
990 }
991 else if (tensorDimensionsAdded == 2)
992 {
993 afterConcatInfo.SetShape(armnn::TensorShape({ afterConcatInfo.GetShape()[2],
994 afterConcatInfo.GetShape()[3] }));
995 }
996
997 layer = &AddReshapeLayer(
998 *m_Network,
999 layer->GetOutputSlot(0),
1000 afterConcatInfo
1001 );
surmeh0149b9e102018-05-17 14:11:25 +01001002 }
1003
telsoa015307bc12018-03-09 13:51:08 +00001004 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1005}
1006
kevmay01bc5f7842018-08-30 12:34:39 +01001007template<typename HalVersion>
1008bool ModelToINetworkConverter<HalVersion>::ConvertConv2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001009{
1010 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1011 if (!input.IsValid())
1012 {
1013 return Fail("%s: Operation has invalid inputs", __func__);
1014 }
1015
1016 const Operand* output = GetOutputOperand(operation, 0);
1017 if (!output)
1018 {
1019 return Fail("%s: Could not read output 0", __func__);
1020 }
1021
1022 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1023 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1024
1025 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1026 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1027
1028 // ArmNN does not currently support non-fixed weights or bias
1029 const ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, NHWCToArmNN);
1030 const ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2);
1031
1032 if (!weightsPin.IsValid() || !biasPin.IsValid())
1033 {
1034 return Fail("%s: Operation has invalid inputs", __func__);
1035 }
1036
1037 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1038 armnn::ConstTensor bias = biasPin.GetConstTensor();
1039 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
1040
1041 armnn::Convolution2dDescriptor desc;
1042 ActivationFn activation;
1043
1044 if (operation.inputs.size() == 10)
1045 {
1046 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft) ||
1047 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight) ||
1048 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop) ||
1049 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom) ||
1050 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX) ||
1051 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY) ||
1052 !GetInputActivationFunction(operation, 9, activation))
1053 {
1054 return Fail("%s: Operation has invalid inputs", __func__);
1055 }
1056 }
1057 else if (operation.inputs.size() == 7)
1058 {
1059 android::nn::PaddingScheme paddingScheme;
1060
1061 if (!GetInputPaddingScheme(operation, 3, paddingScheme) ||
1062 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX) ||
1063 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY) ||
1064 !GetInputActivationFunction(operation, 6, activation))
1065 {
1066 return Fail("%s: Operation has invalid inputs", __func__);
1067 }
1068
1069 const uint32_t kernelX = weights.GetShape()[3];
1070 const uint32_t kernelY = weights.GetShape()[2];
1071 const uint32_t inputX = swizzledInputInfo.GetShape()[3];
1072 const uint32_t inputY = swizzledInputInfo.GetShape()[2];
1073
1074 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1075 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1076 }
1077 else
1078 {
1079 return Fail("%s: Unsupported number of operation inputs", __func__);
1080 }
1081
arovir013b0a2da2018-08-29 10:16:58 +01001082 desc.m_BiasEnabled = true;
1083 auto biases = boost::make_optional(bias.GetInfo());
telsoa015307bc12018-03-09 13:51:08 +00001084
1085 if (!IsLayerSupported(__func__,
1086 armnn::IsConvolution2dSupported,
1087 m_Compute,
1088 swizzledInputInfo,
surmeh0149b9e102018-05-17 14:11:25 +01001089 swizzledOutputInfo,
telsoa015307bc12018-03-09 13:51:08 +00001090 desc,
surmeh0149b9e102018-05-17 14:11:25 +01001091 weights.GetInfo(),
arovir013b0a2da2018-08-29 10:16:58 +01001092 biases))
telsoa015307bc12018-03-09 13:51:08 +00001093 {
1094 return false;
1095 }
1096
1097 armnn::IConnectableLayer* startLayer = m_Network->AddConvolution2dLayer(desc, weights, bias);
1098 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
1099
1100 if (endLayer != nullptr)
1101 {
1102 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
1103 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1104 }
1105 else
1106 {
1107 return Fail("%s: ProcessActivation failed", __func__);
1108 }
1109}
1110
kevmay01bc5f7842018-08-30 12:34:39 +01001111template<typename HalVersion>
1112bool ModelToINetworkConverter<HalVersion>::ConvertDepthwiseConv2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001113{
1114 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1115 if (!input.IsValid())
1116 {
1117 return Fail("%s: Operation has invalid inputs", __func__);
1118 }
1119
1120 const Operand* output = GetOutputOperand(operation, 0);
1121 if (!output)
1122 {
1123 return Fail("%s: Could not read output 0", __func__);
1124 }
1125
1126 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1127 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1128
1129 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1130 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1131
1132 // ArmNN does not currently support non-fixed weights or bias
1133
1134 // Find the shape of the weights tensor. In AndroidNN this will be [ 1, H, W, I * M ]
1135 // but in ArmNN it needs to be [ M, I, H, W ]
1136 const Operand* weightsOperand = GetInputOperand(operation, 1);
1137
1138 if (weightsOperand == nullptr)
1139 {
1140 return Fail("%s: Operand is invalid", __func__);
1141 }
1142
1143 // Reinterpret weight data as [ H, W, I, M ]
1144 armnn::TensorShape weightsShape({ weightsOperand->dimensions[1], weightsOperand->dimensions[2],
1145 inputInfo.GetShape()[3],
1146 weightsOperand->dimensions[3] / inputInfo.GetShape()[3] });
1147
1148 // Swizzle weight data [ H, W, I, M ] -> [ M, I, H, W ]
1149 const armnn::PermutationVector HWIMToMIHW = { 2U, 3U, 1U, 0U };
1150 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1, HWIMToMIHW, &weightsShape);
1151
1152 // Bias is a 1D tensor
1153 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2);
1154
1155 if (!weightsPin.IsValid() || !biasPin.IsValid())
1156 {
1157 return Fail("%s: Operation has invalid inputs", __func__);
1158 }
1159
1160 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1161 armnn::ConstTensor bias = biasPin.GetConstTensor();
1162 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), swizzledInputInfo);
1163
1164 armnn::DepthwiseConvolution2dDescriptor desc;
1165 ActivationFn activation;
1166
1167 if (operation.inputs.size() == 11)
1168 {
1169 if (!GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadLeft) ||
1170 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadRight) ||
1171 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PadTop) ||
1172 !GetInputScalar(operation, 6, OperandType::INT32, desc.m_PadBottom) ||
1173 !GetInputScalar(operation, 7, OperandType::INT32, desc.m_StrideX) ||
1174 !GetInputScalar(operation, 8, OperandType::INT32, desc.m_StrideY) ||
1175 !GetInputActivationFunction(operation, 10, activation))
1176 {
1177 return Fail("%s: Operation has invalid inputs", __func__);
1178 }
1179 }
1180 else if (operation.inputs.size() == 8)
1181 {
1182 android::nn::PaddingScheme paddingScheme;
1183
1184 if (!GetInputPaddingScheme(operation, 3, paddingScheme) ||
1185 !GetInputScalar(operation, 4, OperandType::INT32, desc.m_StrideX) ||
1186 !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideY) ||
1187 !GetInputActivationFunction(operation, 7, activation))
1188 {
1189 return Fail("%s: Operation has invalid inputs", __func__);
1190 }
1191
1192 const uint32_t kernelX = weights.GetShape()[3];
1193 const uint32_t kernelY = weights.GetShape()[2];
1194 const uint32_t inputX = swizzledInputInfo.GetShape()[3];
1195 const uint32_t inputY = swizzledInputInfo.GetShape()[2];
1196
1197 CalcPadding(inputX, kernelX, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, paddingScheme);
1198 CalcPadding(inputY, kernelY, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, paddingScheme);
1199 }
1200 else
1201 {
1202 return Fail("%s: Unsupported number of operation inputs", __func__);
1203 }
1204
1205 desc.m_BiasEnabled = true;
arovir013b0a2da2018-08-29 10:16:58 +01001206 auto biases = boost::make_optional(bias.GetInfo());
telsoa015307bc12018-03-09 13:51:08 +00001207
1208 if (!IsLayerSupported(__func__,
1209 armnn::IsDepthwiseConvolutionSupported,
1210 m_Compute,
1211 swizzledInputInfo,
telsoa01ce3e84a2018-08-31 09:31:35 +01001212 swizzledOutputInfo,
telsoa015307bc12018-03-09 13:51:08 +00001213 desc,
telsoa01ce3e84a2018-08-31 09:31:35 +01001214 weights.GetInfo(),
arovir013b0a2da2018-08-29 10:16:58 +01001215 biases))
telsoa015307bc12018-03-09 13:51:08 +00001216 {
1217 return false;
1218 }
1219
1220 armnn::IConnectableLayer* startLayer = m_Network->AddDepthwiseConvolution2dLayer(desc, weights, bias);
1221 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
1222
1223 if (endLayer != nullptr)
1224 {
1225 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
1226 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1227 }
1228 else
1229 {
1230 return Fail("%s: ProcessActivation failed", __func__);
1231 }
1232}
1233
kevmay01bc5f7842018-08-30 12:34:39 +01001234template<typename HalVersion>
1235bool ModelToINetworkConverter<HalVersion>::ConvertFloor(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001236{
1237 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1238 if (!input.IsValid())
1239 {
1240 return Fail("%s: Operation has invalid inputs", __func__);
1241 }
1242
1243 const Operand* const outputOperand = GetOutputOperand(operation, 0);
1244 if (!outputOperand)
1245 {
1246 return Fail("%s: Operation has invalid outputs", __func__);
1247 }
1248
1249 if (!IsLayerSupported(__func__,
1250 armnn::IsFloorSupported,
1251 m_Compute,
1252 input.GetTensorInfo(),
1253 GetTensorInfoForOperand(*outputOperand)))
1254 {
1255 return false;
1256 }
1257
1258 armnn::IConnectableLayer* layer = m_Network->AddFloorLayer();
1259 assert(layer != nullptr);
1260 input.Connect(layer->GetInputSlot(0));
1261
1262 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1263}
1264
kevmay01bc5f7842018-08-30 12:34:39 +01001265template<typename HalVersion>
1266bool ModelToINetworkConverter<HalVersion>::ConvertFullyConnected(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001267{
1268 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1269 if (!input.IsValid())
1270 {
1271 return Fail("%s: Operation has invalid inputs", __func__);
1272 }
1273
1274 const Operand* output = GetOutputOperand(operation, 0);
1275 if (!output)
1276 {
1277 return Fail("%s: Could not read output 0", __func__);
1278 }
1279
1280 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1281 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1282
telsoa015307bc12018-03-09 13:51:08 +00001283 // ArmNN does not currently support non-fixed weights or bias
1284 ConstTensorPin weightsPin = ConvertOperationInputToConstTensorPin(operation, 1); // 2D
1285 ConstTensorPin biasPin = ConvertOperationInputToConstTensorPin(operation, 2); // 1D
1286
1287 if (!weightsPin.IsValid() || !biasPin.IsValid())
1288 {
1289 return Fail("%s: Operation has invalid inputs", __func__);
1290 }
1291
telsoa015307bc12018-03-09 13:51:08 +00001292 armnn::ConstTensor weights = weightsPin.GetConstTensor();
1293 armnn::ConstTensor bias = biasPin.GetConstTensor();
telsoa01ce3e84a2018-08-31 09:31:35 +01001294
1295 armnn::TensorInfo reshapedInfo = inputInfo;
1296 if (inputInfo.GetNumDimensions() > 2U)
1297 {
1298 unsigned int dim0 = inputInfo.GetShape()[0];
1299 unsigned int dim1 = inputInfo.GetShape()[1];
1300
1301 for (unsigned int i = 2U; i < inputInfo.GetNumDimensions(); ++i)
1302 {
1303 dim1 *= inputInfo.GetShape()[i];
1304 }
1305
1306 unsigned int divisor = weights.GetInfo().GetShape()[1] / dim1;
1307 if(dim0 % divisor != 0)
1308 {
1309 return Fail("%s: Failed to deduce tensor shape", __func__);
1310 }
1311
1312 reshapedInfo.SetShape(armnn::TensorShape({dim0 / divisor, dim1 * divisor}));
1313 }
1314
1315 // ensuring that the bias value is within 1% of the weights input (small float differences can exist)
telsoa015307bc12018-03-09 13:51:08 +00001316 SanitizeBiasQuantizationScale(bias.GetInfo(), weights.GetInfo(), reshapedInfo);
1317
1318 ActivationFn activationFunction;
1319 if (!GetInputActivationFunction(operation, 3, activationFunction))
1320 {
1321 return Fail("%s: Operation has invalid inputs", __func__);
1322 }
1323
1324 armnn::FullyConnectedDescriptor desc;
1325 desc.m_TransposeWeightMatrix = true;
1326 desc.m_BiasEnabled = true;
1327
1328 if (!IsLayerSupported(__func__,
1329 armnn::IsFullyConnectedSupported,
1330 m_Compute,
telsoa01ce3e84a2018-08-31 09:31:35 +01001331 inputInfo,
1332 outputInfo,
1333 weights.GetInfo(),
1334 bias.GetInfo(),
telsoa015307bc12018-03-09 13:51:08 +00001335 desc))
1336 {
1337 return false;
1338 }
1339
1340 armnn::IConnectableLayer* startLayer = m_Network->AddFullyConnectedLayer(desc, weights, bias);
1341 armnn::IConnectableLayer* endLayer = ProcessActivation(outputInfo, activationFunction, startLayer);
1342
1343 if (endLayer != nullptr)
1344 {
1345 if (inputInfo.GetNumDimensions() > 2U)
1346 {
1347 armnn::ReshapeDescriptor reshapeDescriptor;
1348 reshapeDescriptor.m_TargetShape = reshapedInfo.GetShape();
1349
1350 armnn::IConnectableLayer* reshapeLayer = m_Network->AddReshapeLayer(reshapeDescriptor);
1351 assert(reshapeLayer != nullptr);
1352 input.Connect(reshapeLayer->GetInputSlot(0));
1353 reshapeLayer->GetOutputSlot(0).SetTensorInfo(reshapedInfo);
1354 reshapeLayer->GetOutputSlot(0).Connect(startLayer->GetInputSlot(0));
1355 }
1356 else
1357 {
1358 input.Connect(startLayer->GetInputSlot(0));
1359 }
1360
1361 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
1362 }
1363 else
1364 {
1365 return Fail("%s: ProcessActivation failed", __func__);
1366 }
1367}
1368
kevmay01bc5f7842018-08-30 12:34:39 +01001369template<typename HalVersion>
1370bool ModelToINetworkConverter<HalVersion>::ConvertLocalResponseNormalization(
1371 const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001372{
1373 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1374 if (!input.IsValid())
1375 {
1376 return Fail("%s: Operation has invalid inputs", __func__);
1377 }
1378
1379 const Operand* output = GetOutputOperand(operation, 0);
1380 if (!output)
1381 {
1382 return Fail("%s: Could not read output 0", __func__);
1383 }
1384
1385 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1386 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1387
1388 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1389 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1390
1391 armnn::NormalizationDescriptor descriptor;
1392
1393 descriptor.m_NormChannelType = armnn::NormalizationAlgorithmChannel::Across;
1394 descriptor.m_NormMethodType = armnn::NormalizationAlgorithmMethod::LocalBrightness;
1395
1396 if (!input.IsValid() ||
1397 !GetInputScalar(operation, 1, OperandType::INT32, descriptor.m_NormSize) ||
1398 !GetInputFloat32(operation, 2, descriptor.m_K) ||
1399 !GetInputFloat32(operation, 3, descriptor.m_Alpha) ||
1400 !GetInputFloat32(operation, 4, descriptor.m_Beta))
1401 {
1402 return Fail("%s: Operation has invalid inputs", __func__);
1403 }
1404
1405 // ArmNN expects normSize to be the full size of the normalization
1406 // window rather than the radius as in AndroidNN.
1407 descriptor.m_NormSize = 1 + (2 * descriptor.m_NormSize);
1408
1409 if (!IsLayerSupported(__func__,
1410 armnn::IsNormalizationSupported,
1411 m_Compute,
1412 swizzledInputInfo,
1413 swizzledOutputInfo,
1414 descriptor))
1415 {
1416 return false;
1417 }
1418
1419
1420 armnn::IConnectableLayer* layer = m_Network->AddNormalizationLayer(descriptor);
1421 assert(layer != nullptr);
1422 layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
1423
1424 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
1425
1426 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1427}
1428
kevmay01bc5f7842018-08-30 12:34:39 +01001429template<typename HalVersion>
1430bool ModelToINetworkConverter<HalVersion>::ConvertLogistic(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001431{
1432 armnn::ActivationDescriptor desc;
surmeh0149b9e102018-05-17 14:11:25 +01001433 desc.m_Function = armnn::ActivationFunction::Sigmoid;
telsoa015307bc12018-03-09 13:51:08 +00001434
1435 return ConvertToActivation(operation, __func__, desc);
1436}
1437
kevmay01bc5f7842018-08-30 12:34:39 +01001438template<typename HalVersion>
1439bool ModelToINetworkConverter<HalVersion>::ConvertL2Normalization(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001440{
1441 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1442 if (!input.IsValid())
1443 {
1444 return Fail("%s: Operation has invalid inputs", __func__);
1445 }
1446
1447 const Operand* output = GetOutputOperand(operation, 0);
1448 if (!output)
1449 {
1450 return Fail("%s: Could not read output 0", __func__);
1451 }
1452
1453 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1454 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1455
1456 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1457 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1458
1459 if (!IsLayerSupported(__func__,
1460 armnn::IsL2NormalizationSupported,
1461 m_Compute,
telsoa01ce3e84a2018-08-31 09:31:35 +01001462 swizzledInputInfo,
1463 swizzledOutputInfo))
telsoa015307bc12018-03-09 13:51:08 +00001464 {
1465 return false;
1466 }
1467
1468 armnn::IConnectableLayer* layer = m_Network->AddL2NormalizationLayer();
1469 assert(layer != nullptr);
1470 layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
1471
1472 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
1473
1474 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1475}
1476
kevmay01bc5f7842018-08-30 12:34:39 +01001477template<typename HalVersion>
1478bool ModelToINetworkConverter<HalVersion>::ConvertL2Pool2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001479{
1480 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::L2);
1481}
1482
kevmay01bc5f7842018-08-30 12:34:39 +01001483template<typename HalVersion>
1484bool ModelToINetworkConverter<HalVersion>::ConvertMaxPool2d(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001485{
1486 return ConvertPooling2d(operation, __func__, armnn::PoolingAlgorithm::Max);
1487}
1488
kevmay01bc5f7842018-08-30 12:34:39 +01001489template<typename HalVersion>
1490bool ModelToINetworkConverter<HalVersion>::ConvertMul(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001491{
1492 LayerInputHandle input0 = ConvertToLayerInputHandle(operation, 0);
1493 LayerInputHandle input1 = ConvertToLayerInputHandle(operation, 1);
1494
1495 if (!input0.IsValid() || !input1.IsValid())
1496 {
1497 return Fail("%s: Operation has invalid inputs", __func__);
1498 }
1499
telsoa01ce3e84a2018-08-31 09:31:35 +01001500 // The FuseActivation parameter is always the input index 2
1501 // and it should be optional
telsoa015307bc12018-03-09 13:51:08 +00001502 ActivationFn activationFunction;
telsoa01ce3e84a2018-08-31 09:31:35 +01001503 if (!GetOptionalInputActivation(operation, 2, activationFunction))
telsoa015307bc12018-03-09 13:51:08 +00001504 {
1505 return Fail("%s: Operation has invalid inputs", __func__);
1506 }
1507
telsoa015307bc12018-03-09 13:51:08 +00001508 const Operand* outputOperand = GetOutputOperand(operation, 0);
1509
1510 if (outputOperand == nullptr)
1511 {
1512 return false;
1513 }
1514
1515 const armnn::TensorInfo& outInfo = GetTensorInfoForOperand(*outputOperand);
1516
telsoa01ce3e84a2018-08-31 09:31:35 +01001517 if (!IsLayerSupported(__func__,
1518 armnn::IsMultiplicationSupported,
1519 m_Compute,
1520 input0.GetTensorInfo(),
1521 input1.GetTensorInfo(),
1522 outInfo))
1523 {
1524 return false;
1525 }
1526
telsoa015307bc12018-03-09 13:51:08 +00001527 armnn::IConnectableLayer* const startLayer = m_Network->AddMultiplicationLayer();
1528 armnn::IConnectableLayer* const endLayer = ProcessActivation(outInfo, activationFunction, startLayer);
1529
telsoa01ce3e84a2018-08-31 09:31:35 +01001530 const armnn::TensorInfo& inputTensorInfo0 = input0.GetTensorInfo();
1531 const armnn::TensorInfo& inputTensorInfo1 = input1.GetTensorInfo();
1532
telsoa015307bc12018-03-09 13:51:08 +00001533 if (endLayer != nullptr)
1534 {
telsoa01ce3e84a2018-08-31 09:31:35 +01001535 BroadcastTensor(input0, input1, startLayer, *m_Network);
telsoa015307bc12018-03-09 13:51:08 +00001536 return SetupAndTrackLayerOutputSlot(operation, 0, *endLayer);
1537 }
1538 else
1539 {
1540 return Fail("%s: ProcessActivation failed", __func__);
1541 }
1542}
1543
kevmay01bc5f7842018-08-30 12:34:39 +01001544template<typename HalVersion>
1545bool ModelToINetworkConverter<HalVersion>::ConvertReLu(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001546{
1547 armnn::ActivationDescriptor desc;
1548 desc.m_Function = armnn::ActivationFunction::ReLu;
1549
1550 return ConvertToActivation(operation, __func__, desc);
1551}
1552
kevmay01bc5f7842018-08-30 12:34:39 +01001553template<typename HalVersion>
1554bool ModelToINetworkConverter<HalVersion>::ConvertReLu1(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001555{
1556 armnn::ActivationDescriptor desc;
1557 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1558 desc.m_A = 1.0f;
1559 desc.m_B = -1.0f;
1560
1561 return ConvertToActivation(operation, __func__, desc);
1562}
1563
kevmay01bc5f7842018-08-30 12:34:39 +01001564template<typename HalVersion>
1565bool ModelToINetworkConverter<HalVersion>::ConvertReLu6(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001566{
1567 armnn::ActivationDescriptor desc;
1568 desc.m_Function = armnn::ActivationFunction::BoundedReLu;
1569 desc.m_A = 6.0f;
1570
1571 return ConvertToActivation(operation, __func__, desc);
1572}
1573
kevmay01bc5f7842018-08-30 12:34:39 +01001574template<typename HalVersion>
1575bool ModelToINetworkConverter<HalVersion>::ConvertSoftmax(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001576{
1577 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1578 if (!input.IsValid())
1579 {
1580 return Fail("%s: Operation has invalid inputs", __func__);
1581 }
1582
telsoa01ce3e84a2018-08-31 09:31:35 +01001583 const Operand* outputOperand = GetOutputOperand(operation, 0);
1584 if (!outputOperand)
1585 {
1586 return Fail("%s: Operation has no outputs", __func__);
1587 }
1588
1589 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
1590
telsoa015307bc12018-03-09 13:51:08 +00001591 armnn::SoftmaxDescriptor desc;
1592 if (!GetInputFloat32(operation, 1, desc.m_Beta))
1593 {
1594 return Fail("%s: Operation has invalid inputs", __func__);
1595 }
1596
1597 if (!IsLayerSupported(__func__,
1598 armnn::IsSoftmaxSupported,
1599 m_Compute,
1600 input.GetTensorInfo(),
telsoa01ce3e84a2018-08-31 09:31:35 +01001601 outInfo,
telsoa015307bc12018-03-09 13:51:08 +00001602 desc))
1603 {
1604 return false;
1605 }
1606
1607 armnn::IConnectableLayer* layer = m_Network->AddSoftmaxLayer(desc);
1608 assert(layer != nullptr);
1609 input.Connect(layer->GetInputSlot(0));
1610
1611 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1612}
1613
kevmay01bc5f7842018-08-30 12:34:39 +01001614template<typename HalVersion>
1615bool ModelToINetworkConverter<HalVersion>::ConvertTanH(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001616{
1617 armnn::ActivationDescriptor desc;
1618 desc.m_Function = armnn::ActivationFunction::TanH;
1619 desc.m_A = 1.0f; // android nn does not support tanH parameters
1620 desc.m_B = 1.0f; // set to 1.0f for unity scaling
1621
1622 return ConvertToActivation(operation, __func__, desc);
1623}
1624
kevmay01bc5f7842018-08-30 12:34:39 +01001625template<typename HalVersion>
1626bool ModelToINetworkConverter<HalVersion>::ConvertReshape(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001627{
1628 const Operand* inputOperand = GetInputOperand(operation, 0);
1629 const Operand* requestedShapeOperand = GetInputOperand(operation, 1);
1630 const Operand* outputOperand = GetOutputOperand(operation, 0);
1631
1632 if (inputOperand == nullptr
1633 || requestedShapeOperand == nullptr
1634 || outputOperand == nullptr)
1635 {
1636 return Fail("%s: Operation has invalid inputs", __func__);
1637 }
1638
1639
1640 if (requestedShapeOperand->dimensions.size() != 1)
1641 {
1642 return Fail("%s: Input 1 expected to be one-dimensional (found %i dimensions)",
1643 __func__, requestedShapeOperand->dimensions.size());
1644 }
1645
1646 std::vector<int32_t> targetDimensions;
1647 if (!GetTensorInt32Values(*requestedShapeOperand, targetDimensions))
1648 {
1649 return Fail("%s: Could not read values of input 1", __func__);
1650 }
1651
1652 const Shape inputOperandShape = GetOperandShape(*inputOperand);
1653
1654 Shape requestedShape;
1655 // targetDimensions may contain special values (e.g. -1). reshapePrepare() is an AndroidNN provided utility
1656 // function that resolves these values into a fully specified tensor shape.
1657 if (!reshapePrepare(inputOperandShape, targetDimensions.data(), targetDimensions.size(), &requestedShape))
1658 {
1659 return Fail("%s: Failed to resolve the requested shape", __func__);
1660 }
1661
1662 const Shape outputOperandShape = GetOperandShape(*outputOperand);
1663 if (!SameShape(requestedShape, outputOperandShape))
1664 {
1665 return Fail("%s: Shape of output operand does not match resolved requested shape", __func__);
1666 }
1667
1668 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1669 if (!input.IsValid())
1670 {
1671 return Fail("%s: Could not read input 0", __func__);
1672 }
1673
1674 if (!IsLayerSupported(__func__,
1675 armnn::IsReshapeSupported,
1676 m_Compute,
1677 input.GetTensorInfo()))
1678 {
1679 return false;
1680 }
1681
1682
1683 armnn::ReshapeDescriptor reshapeDescriptor;
1684 reshapeDescriptor.m_TargetShape = armnn::TensorShape(requestedShape.dimensions.size(),
1685 requestedShape.dimensions.data());
1686
1687 armnn::IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDescriptor);
1688 assert(layer != nullptr);
1689 input.Connect(layer->GetInputSlot(0));
1690
1691 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
1692}
1693
kevmay01bc5f7842018-08-30 12:34:39 +01001694template<typename HalVersion>
1695bool ModelToINetworkConverter<HalVersion>::ConvertResizeBilinear(const neuralnetworks::V1_0::Operation& operation)
telsoa015307bc12018-03-09 13:51:08 +00001696{
1697 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1698 if (!input.IsValid())
1699 {
1700 return Fail("%s: Could not read input 0", __func__);
1701 }
1702
1703 const Operand* output = GetOutputOperand(operation, 0);
1704 if (!output)
1705 {
1706 return Fail("%s: Could not read output 0", __func__);
1707 }
1708
1709 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1710 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1711
1712 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
1713 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
1714
1715 if (!IsLayerSupported(__func__,
1716 armnn::IsResizeBilinearSupported,
1717 m_Compute,
1718 swizzledInputInfo))
1719 {
1720 return false;
1721 }
1722
1723 armnn::ResizeBilinearDescriptor desc;
1724
1725 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_TargetHeight)
1726 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_TargetWidth))
1727 {
1728 return Fail("%s: Operation has invalid inputs", __func__);
1729 }
1730
1731 armnn::IConnectableLayer* layer = m_Network->AddResizeBilinearLayer(desc);
1732 assert(layer != nullptr);
1733 layer->GetOutputSlot(0).SetTensorInfo(swizzledOutputInfo);
1734
1735 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *layer);
1736
1737 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
1738
1739}
1740
kevmay01bc5f7842018-08-30 12:34:39 +01001741template<typename HalVersion>
1742bool ModelToINetworkConverter<HalVersion>::ConvertLstm(const neuralnetworks::V1_0::Operation& operation)
telsoa01ce3e84a2018-08-31 09:31:35 +01001743{
1744 // Inputs:
1745 // 00: The input: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, input_size], where
1746 // “batch_size” corresponds to the batching dimension, and “input_size” is the size of the input.
1747 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
1748 if (!input.IsValid())
1749 {
1750 return Fail("%s: Could not read input 0: input", __func__);
1751 }
1752 // 18: The output state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1753 LayerInputHandle outputStateIn = ConvertToLayerInputHandle(operation, 18);
1754 if (!outputStateIn.IsValid())
1755 {
1756 return Fail("%s: Could not read input 18: outputStateIn", __func__);
1757 }
1758 // 19: The cell state: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1759 LayerInputHandle cellStateIn = ConvertToLayerInputHandle(operation, 19);
1760 if (!cellStateIn.IsValid())
1761 {
1762 return Fail("%s: Could not read input 19: cellStateIn", __func__);
1763 }
1764
1765 // Get the mandatory input tensors:
1766 // 02: The input-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1767 // [num_units, input_size].
1768 const ConstTensorPin inputToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 2);
1769 // 03: The input-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units, input_size].
1770 const ConstTensorPin inputToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 3);
1771 // 04: The input-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1772 // [num_units, input_size].
1773 const ConstTensorPin inputToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 4);
1774 // 06: The recurrent-to-forget weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1775 // [num_units, output_size].
1776 const ConstTensorPin recurrentToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 6);
1777 // 07: The recurrent-to-cell weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1778 // [num_units, output_size].
1779 const ConstTensorPin recurrentToCellWeightsPin = ConvertOperationInputToConstTensorPin(operation, 7);
1780 // 08: The recurrent-to-output weights: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1781 // [num_units, output_size].
1782 const ConstTensorPin recurrentToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 8);
1783 // 13: The forget gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1784 const ConstTensorPin forgetGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 13);
1785 // 14: The cell bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1786 const ConstTensorPin cellBiasPin = ConvertOperationInputToConstTensorPin(operation, 14);
1787 // 15: The output gate bias: A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1788 const ConstTensorPin outputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 15);
1789
1790 if (!inputToForgetWeightsPin.IsValid() ||
1791 !inputToCellWeightsPin.IsValid() ||
1792 !inputToOutputWeightsPin.IsValid() ||
1793 !recurrentToForgetWeightsPin.IsValid() ||
1794 !recurrentToCellWeightsPin.IsValid() ||
1795 !recurrentToOutputWeightsPin.IsValid() ||
1796 !forgetGateBiasPin.IsValid() ||
1797 !cellBiasPin.IsValid() ||
1798 !outputGateBiasPin.IsValid())
1799 {
1800 return Fail("%s: Operation has invalid tensor inputs", __func__);
1801 }
1802
1803 // Get the optional input tensors:
1804 // 01: The input-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1805 // [num_units, input_size], where “num_units” corresponds to the number of cell units.
1806 const ConstTensorPin inputToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 1);
1807 // 05: The recurrent-to-input weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1808 // [num_units, output_size], where “output_size” corresponds to either the number of cell units (i.e.,
1809 // “num_units”), or the second dimension of the “projection_weights”, if defined.
1810 const ConstTensorPin recurrentToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 5);
1811 // 09: The cell-to-input weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1812 const ConstTensorPin cellToInputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 9);
1813 // 10: The cell-to-forget weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1814 const ConstTensorPin cellToForgetWeightsPin = ConvertOperationInputToConstTensorPin(operation, 10);
1815 // 11: The cell-to-output weights: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1816 const ConstTensorPin cellToOutputWeightsPin = ConvertOperationInputToConstTensorPin(operation, 11);
1817 // 12: The input gate bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [num_units].
1818 const ConstTensorPin inputGateBiasPin = ConvertOperationInputToConstTensorPin(operation, 12);
1819 // 16: The projection weights: Optional. A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape
1820 // [output_size, num_units].
1821 const ConstTensorPin projectionWeightsPin = ConvertOperationInputToConstTensorPin(operation, 16);
1822 // 17: The projection bias: Optional. A 1-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [output_size].
1823 const ConstTensorPin projectionBiasPin = ConvertOperationInputToConstTensorPin(operation, 17);
1824
1825 if ((!inputToInputWeightsPin.IsValid() && !inputToInputWeightsPin.IsOptional()) ||
1826 (!recurrentToInputWeightsPin.IsValid() && !recurrentToInputWeightsPin.IsOptional()) ||
1827 (!cellToInputWeightsPin.IsValid() && !cellToInputWeightsPin.IsOptional()) ||
1828 (!cellToForgetWeightsPin.IsValid() && !cellToForgetWeightsPin.IsOptional()) ||
1829 (!cellToOutputWeightsPin.IsValid() && !cellToOutputWeightsPin.IsOptional()) ||
1830 (!inputGateBiasPin.IsValid() && !inputGateBiasPin.IsOptional()) ||
1831 (!projectionWeightsPin.IsValid() && !projectionWeightsPin.IsOptional()) ||
1832 (!projectionBiasPin.IsValid() && !projectionBiasPin.IsOptional()))
1833 {
1834 return Fail("%s: Operation has invalid tensor inputs", __func__);
1835 }
1836
1837 // Get the mandatory input scalars (actually 1-D tensors of size 1):
1838 // 20: The activation function: A value indicating the activation function:
1839 // 0: None; 1: Relu; 3: Relu6; 4: Tanh; 6: Sigmoid.
1840 // 21: The clipping threshold: for the cell state, such that values are bound within [-cell_clip, cell_clip].
1841 // If set to 0.0 then clipping is disabled.
1842 // 22: The clipping threshold: for the output from the projection layer, such that values are bound within
1843 // [-proj_clip, proj_clip]. If set to 0.0 then clipping is disabled.
1844 ActivationFn activation;
1845 float cellClip;
1846 float projClip;
1847 if (!GetInputActivationFunctionFromTensor(operation, 20, activation) ||
1848 !GetInputScalar(operation, 21, OperandType::FLOAT32, cellClip) ||
1849 !GetInputScalar(operation, 22, OperandType::FLOAT32, projClip))
1850 {
1851 return Fail("%s: Operation has invalid scalar inputs", __func__);
1852 }
1853
1854 // Outputs:
1855 // 00: The scratch buffer: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units * 4] with
1856 // CIFG, or [batch_size, num_units * 3] without CIFG.
1857 const Operand* scratchBuffer = GetOutputOperand(operation, 0);
1858 if (!scratchBuffer)
1859 {
1860 return Fail("%s: Could not read output 0: scratchBuffer", __func__);
1861 }
1862 // 01: The output state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size].
1863 const Operand* outputStateOut = GetOutputOperand(operation, 1);
1864 if (!outputStateOut)
1865 {
1866 return Fail("%s: Could not read output 1: outputStateOut", __func__);
1867 }
1868 // 02: The cell state (out): A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, num_units].
1869 const Operand* cellStateOut = GetOutputOperand(operation, 2);
1870 if (!cellStateOut)
1871 {
1872 return Fail("%s: Could not read output 2: cellStateOut", __func__);
1873 }
1874 // 03: The output: A 2-D tensor of ANEURALNETWORKS_TENSOR_FLOAT32, of shape [batch_size, output_size]. This is
1875 // effectively the same as the current “output state (out)” value.
1876 const Operand* output = GetOutputOperand(operation, 3);
1877 if (!output)
1878 {
1879 return Fail("%s: Could not read output 3: output", __func__);
1880 }
1881
1882 // set the params structure for the AddLstmLayer call
1883 armnn::LstmInputParams params;
1884 params.m_InputToInputWeights = inputToInputWeightsPin.GetConstTensorPtr();
1885 params.m_InputToForgetWeights = inputToForgetWeightsPin.GetConstTensorPtr();
1886 params.m_InputToCellWeights = inputToCellWeightsPin.GetConstTensorPtr();
1887 params.m_InputToOutputWeights = inputToOutputWeightsPin.GetConstTensorPtr();
1888 params.m_RecurrentToInputWeights = recurrentToInputWeightsPin.GetConstTensorPtr();
1889 params.m_RecurrentToForgetWeights = recurrentToForgetWeightsPin.GetConstTensorPtr();
1890 params.m_RecurrentToCellWeights = recurrentToCellWeightsPin.GetConstTensorPtr();
1891 params.m_RecurrentToOutputWeights = recurrentToOutputWeightsPin.GetConstTensorPtr();
1892 params.m_CellToInputWeights = cellToInputWeightsPin.GetConstTensorPtr();
1893 params.m_CellToForgetWeights = cellToForgetWeightsPin.GetConstTensorPtr();
1894 params.m_CellToOutputWeights = cellToOutputWeightsPin.GetConstTensorPtr();
1895 params.m_InputGateBias = inputGateBiasPin.GetConstTensorPtr();
1896 params.m_ForgetGateBias = forgetGateBiasPin.GetConstTensorPtr();
1897 params.m_CellBias = cellBiasPin.GetConstTensorPtr();
1898 params.m_OutputGateBias = outputGateBiasPin.GetConstTensorPtr();
1899 params.m_ProjectionWeights = projectionWeightsPin.GetConstTensorPtr();
1900 params.m_ProjectionBias = projectionBiasPin.GetConstTensorPtr();
1901
1902 // set the layer descriptor
1903 armnn::LstmDescriptor desc;
1904 desc.m_ActivationFunc = activation;
1905 desc.m_ClippingThresCell = cellClip;
1906 desc.m_ClippingThresProj = projClip;
1907 desc.m_CifgEnabled = (params.m_InputToInputWeights == nullptr ||
1908 params.m_RecurrentToInputWeights == nullptr ||
1909 params.m_InputGateBias == nullptr);
1910 desc.m_PeepholeEnabled = (params.m_CellToForgetWeights != nullptr ||
1911 params.m_CellToOutputWeights != nullptr);
1912 desc.m_ProjectionEnabled = (params.m_ProjectionWeights != nullptr);
1913
1914 // validate the optional input groups
1915 if (desc.m_CifgEnabled &&
1916 (params.m_InputToInputWeights != nullptr ||
1917 params.m_RecurrentToInputWeights != nullptr ||
1918 params.m_InputGateBias != nullptr))
1919 {
1920 return Fail("%s: All, or none, of input-to-input weights, recurrent-to-input weights,"
1921 " and input gate bias must be provided", __func__);
1922 }
1923
1924 if (!desc.m_ProjectionEnabled && params.m_ProjectionBias != nullptr)
1925 {
1926 return Fail("%s: projection bias should not be provided without projection weights", __func__);
1927 }
1928
1929 if (desc.m_PeepholeEnabled &&
1930 (params.m_CellToForgetWeights == nullptr ||
1931 params.m_CellToOutputWeights == nullptr ||
1932 (!desc.m_CifgEnabled && params.m_CellToInputWeights == nullptr)))
1933 {
1934 return Fail("%s: All, or none, of cell-to-forget weights and cell-to-output weights must be provided"
1935 " and, if CIFG is not enabled, cell-to-input weights must also be provided", __func__);
1936 }
1937
1938 // Check if the layer is supported
1939 // Inputs
1940 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
1941 const armnn::TensorInfo& outputStateInInfo = outputStateIn.GetTensorInfo();
1942 const armnn::TensorInfo& cellStateInInfo = cellStateIn.GetTensorInfo();
1943
1944 // Outputs
1945 const armnn::TensorInfo& scratchBufferInfo = GetTensorInfoForOperand(*scratchBuffer);
1946 const armnn::TensorInfo& outputStateOutInfo = GetTensorInfoForOperand(*outputStateOut);
1947 const armnn::TensorInfo& cellStateOutInfo = GetTensorInfoForOperand(*cellStateOut);
1948 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
1949
1950 // Basic parameters
1951 const armnn::TensorInfo& inputToForgetWeights = params.m_InputToForgetWeights->GetInfo();
1952 const armnn::TensorInfo& inputToCellWeights = params.m_InputToCellWeights->GetInfo();
1953 const armnn::TensorInfo& inputToOutputWeights = params.m_InputToOutputWeights->GetInfo();
1954 const armnn::TensorInfo& recurrentToForgetWeights = params.m_RecurrentToForgetWeights->GetInfo();
1955 const armnn::TensorInfo& recurrentToCellWeights = params.m_RecurrentToCellWeights->GetInfo();
1956 const armnn::TensorInfo& recurrentToOutputWeights = params.m_RecurrentToOutputWeights->GetInfo();
1957 const armnn::TensorInfo& forgetGateBias = params.m_ForgetGateBias->GetInfo();
1958 const armnn::TensorInfo& cellBias = params.m_CellBias->GetInfo();
1959 const armnn::TensorInfo& outputGateBias = params.m_OutputGateBias->GetInfo();
1960
1961 //Optional parameters
1962 const armnn::TensorInfo* inputToInputWeights = nullptr;
1963 const armnn::TensorInfo* recurrentToInputWeights = nullptr;
1964 const armnn::TensorInfo* cellToInputWeights = nullptr;
1965 const armnn::TensorInfo* inputGateBias = nullptr;
1966 const armnn::TensorInfo* projectionWeights = nullptr;
1967 const armnn::TensorInfo* projectionBias = nullptr;
1968 const armnn::TensorInfo* cellToForgetWeights = nullptr;
1969 const armnn::TensorInfo* cellToOutputWeights = nullptr;
1970
1971 if(!desc.m_CifgEnabled)
1972 {
1973 inputToInputWeights = &(params.m_InputToInputWeights->GetInfo());
1974 recurrentToInputWeights = &(params.m_RecurrentToInputWeights->GetInfo());
1975 if (params.m_CellToInputWeights != nullptr)
1976 {
1977 cellToInputWeights = &(params.m_CellToInputWeights->GetInfo());
1978 }
1979 inputGateBias = &(params.m_InputGateBias->GetInfo());
1980 }
1981
1982 if(desc.m_ProjectionEnabled)
1983 {
1984 projectionWeights = &(params.m_ProjectionWeights->GetInfo());
1985 if (params.m_ProjectionBias != nullptr)
1986 {
1987 projectionBias = &(params.m_ProjectionBias->GetInfo());
1988 }
1989 }
1990
1991 if(desc.m_PeepholeEnabled)
1992 {
1993 cellToForgetWeights = &(params.m_CellToForgetWeights->GetInfo());
1994 cellToOutputWeights = &(params.m_CellToOutputWeights->GetInfo());
1995 }
1996
1997 if (!IsLayerSupported(__func__,
1998 armnn::IsLstmSupported,
1999 m_Compute,
2000 inputInfo,
2001 outputStateInInfo,
2002 cellStateInInfo,
2003 scratchBufferInfo,
2004 outputStateOutInfo,
2005 cellStateOutInfo,
2006 outputInfo,
2007 desc,
2008 inputToForgetWeights,
2009 inputToCellWeights,
2010 inputToOutputWeights,
2011 recurrentToForgetWeights,
2012 recurrentToCellWeights,
2013 recurrentToOutputWeights,
2014 forgetGateBias,
2015 cellBias,
2016 outputGateBias,
2017 inputToInputWeights,
2018 recurrentToInputWeights,
2019 cellToInputWeights,
2020 inputGateBias,
2021 projectionWeights,
2022 projectionBias,
2023 cellToForgetWeights,
2024 cellToOutputWeights))
2025 {
2026 return false;
2027 }
2028
2029 // Add the layer
2030 armnn::IConnectableLayer* layer = m_Network->AddLstmLayer(desc, params, "Lstm");
2031
2032 input.Connect(layer->GetInputSlot(0));
2033 outputStateIn.Connect(layer->GetInputSlot(1));
2034 cellStateIn.Connect(layer->GetInputSlot(2));
2035
2036 return (SetupAndTrackLayerOutputSlot(operation, 0, *layer, 0) &&
2037 SetupAndTrackLayerOutputSlot(operation, 1, *layer, 1) &&
2038 SetupAndTrackLayerOutputSlot(operation, 2, *layer, 2) &&
2039 SetupAndTrackLayerOutputSlot(operation, 3, *layer, 3));
2040}
2041
kevmay01bc5f7842018-08-30 12:34:39 +01002042template<typename HalVersion>
2043bool ModelToINetworkConverter<HalVersion>::ConvertToActivation(const neuralnetworks::V1_0::Operation& operation,
telsoa015307bc12018-03-09 13:51:08 +00002044 const char* operationName,
2045 const armnn::ActivationDescriptor& activationDesc)
2046{
2047 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
2048 if (!input.IsValid())
2049 {
2050 return Fail("%s: Input 0 is invalid", operationName);
2051 }
2052
telsoa01ce3e84a2018-08-31 09:31:35 +01002053 const Operand* outputOperand = GetOutputOperand(operation, 0);
2054 if (!outputOperand)
2055 {
2056 return false;
2057 }
2058 const armnn::TensorInfo outInfo = GetTensorInfoForOperand(*outputOperand);
telsoa015307bc12018-03-09 13:51:08 +00002059 if (!IsLayerSupported(__func__,
2060 armnn::IsActivationSupported,
2061 m_Compute,
2062 input.GetTensorInfo(),
telsoa01ce3e84a2018-08-31 09:31:35 +01002063 outInfo,
telsoa015307bc12018-03-09 13:51:08 +00002064 activationDesc))
2065 {
2066 return false;
2067 }
2068
2069 armnn::IConnectableLayer* layer = m_Network->AddActivationLayer(activationDesc);
2070 assert(layer != nullptr);
2071 input.Connect(layer->GetInputSlot(0));
2072
2073 return SetupAndTrackLayerOutputSlot(operation, 0, *layer);
2074}
2075
kevmay01bc5f7842018-08-30 12:34:39 +01002076template<typename HalVersion>
2077bool ModelToINetworkConverter<HalVersion>::ConvertPooling2d(const neuralnetworks::V1_0::Operation& operation,
telsoa015307bc12018-03-09 13:51:08 +00002078 const char* operationName,
2079 armnn::PoolingAlgorithm poolType)
2080{
2081 LayerInputHandle input = ConvertToLayerInputHandle(operation, 0);
2082 if (!input.IsValid())
2083 {
2084 return Fail("%s: Could not read input 0", operationName);
2085 }
2086
2087 const Operand* output = GetOutputOperand(operation, 0);
2088 if (!output)
2089 {
2090 return Fail("%s: Could not read output 0", __func__);
2091 }
2092
2093 const armnn::TensorInfo& inputInfo = input.GetTensorInfo();
2094 const armnn::TensorInfo& outputInfo = GetTensorInfoForOperand(*output);
2095
2096 const armnn::TensorInfo swizzledInputInfo = armnnUtils::Permuted(inputInfo, NHWCToArmNN);
2097 const armnn::TensorInfo swizzledOutputInfo = armnnUtils::Permuted(outputInfo, NHWCToArmNN);
2098
2099 armnn::Pooling2dDescriptor desc;
2100 desc.m_PoolType = poolType;
2101 desc.m_OutputShapeRounding = armnn::OutputShapeRounding::Floor;
2102
2103 ActivationFn activation;
2104
2105 if (operation.inputs.size() == 7)
2106 {
2107 // one input, 6 parameters (padding, stridex, stridey, width, height, activation type)
2108 android::nn::PaddingScheme scheme;
2109
2110 if ( !GetInputPaddingScheme(operation, 1, scheme)
2111 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_StrideX)
2112 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_StrideY)
2113 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PoolWidth)
2114 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_PoolHeight)
2115 || !GetInputActivationFunction(operation, 6, activation))
2116 {
2117 return Fail("%s: Operation has invalid inputs", operationName);
2118 }
2119
2120 const unsigned int inputWidth = swizzledInputInfo.GetShape()[3];
2121 const unsigned int inputHeight = swizzledInputInfo.GetShape()[2];
2122
2123 CalcPadding(inputWidth, desc.m_PoolWidth, desc.m_StrideX, desc.m_PadLeft, desc.m_PadRight, scheme);
2124 CalcPadding(inputHeight, desc.m_PoolHeight, desc.m_StrideY, desc.m_PadTop, desc.m_PadBottom, scheme);
2125 }
2126 else
2127 {
2128 // one input, 9 parameters (padding l r t b, stridex, stridey, width, height, activation type)
2129 if ( !GetInputScalar(operation, 1, OperandType::INT32, desc.m_PadLeft)
2130 || !GetInputScalar(operation, 2, OperandType::INT32, desc.m_PadRight)
2131 || !GetInputScalar(operation, 3, OperandType::INT32, desc.m_PadTop)
2132 || !GetInputScalar(operation, 4, OperandType::INT32, desc.m_PadBottom)
2133 || !GetInputScalar(operation, 5, OperandType::INT32, desc.m_StrideX)
2134 || !GetInputScalar(operation, 6, OperandType::INT32, desc.m_StrideY)
2135 || !GetInputScalar(operation, 7, OperandType::INT32, desc.m_PoolWidth)
2136 || !GetInputScalar(operation, 8, OperandType::INT32, desc.m_PoolHeight)
2137 || !GetInputActivationFunction(operation, 9, activation))
2138 {
2139 return Fail("%s: Operation has invalid inputs", operationName);
2140 }
2141 }
2142
2143 // ArmNN does not accept a pool size of 1, but the ArmNN driver is expected to cope.
2144 // This is mapped to a trivial splitter instead.
2145 armnn::IConnectableLayer* startLayer = nullptr;
2146 if (desc.m_PoolWidth != 1 || desc.m_PoolHeight != 1)
2147 {
2148 if (!IsLayerSupported(__func__,
2149 armnn::IsPooling2dSupported,
2150 m_Compute,
2151 swizzledInputInfo,
2152 swizzledOutputInfo,
2153 desc))
2154 {
2155 return false;
2156 }
2157
2158 startLayer = m_Network->AddPooling2dLayer(desc);
2159 }
2160 else
2161 {
2162 const unsigned int numDims = swizzledOutputInfo.GetNumDimensions();
2163
2164 armnn::ViewsDescriptor viewsDesc(1, numDims);
2165
2166 for (unsigned int i = 0; i < numDims; ++i)
2167 {
2168 viewsDesc.SetViewOriginCoord(0, i, 0);
2169 viewsDesc.SetViewSize(0, i, swizzledOutputInfo.GetShape()[i]);
2170 }
2171
2172 if (!IsLayerSupported(__func__,
2173 armnn::IsSplitterSupported,
2174 m_Compute,
2175 swizzledInputInfo,
2176 viewsDesc))
2177 {
2178 return false;
2179 }
2180
2181 startLayer = m_Network->AddSplitterLayer(viewsDesc);
2182 }
2183
2184 armnn::IConnectableLayer* endLayer = ProcessActivation(swizzledOutputInfo, activation, startLayer);
2185
2186 if (endLayer != nullptr)
2187 {
2188 armnn::IConnectableLayer& outSwizzleLayer = SwizzleInDeswizzleOut(*m_Network, input, *startLayer, *endLayer);
2189 return SetupAndTrackLayerOutputSlot(operation, 0, outSwizzleLayer);
2190 }
2191 else
2192 {
2193 return Fail("%s: ProcessActivation failed", operationName);
2194 }
2195}
2196
kevmay01bc5f7842018-08-30 12:34:39 +01002197template<typename HalVersion>
2198const void* ModelToINetworkConverter<HalVersion>::GetOperandValueReadOnlyAddress(const Operand& operand) const
telsoa015307bc12018-03-09 13:51:08 +00002199{
2200 const void* valueStart = nullptr;
2201
2202 switch (operand.lifetime)
2203 {
2204 case OperandLifeTime::CONSTANT_COPY:
2205 {
2206 // Constant found in model.operandValues
2207 valueStart = &m_Model.operandValues[operand.location.offset];
2208 break;
2209 }
2210 case OperandLifeTime::CONSTANT_REFERENCE:
2211 {
2212 // Constant specified via a Memory object
2213 valueStart = GetMemoryFromPool(operand.location, m_MemPools);
2214 break;
2215 }
2216 default:
2217 {
2218 // Unsupported/invalid (e.g. can't get value of an input to the model)
2219 Fail("%s: unsupported/invalid operand lifetime: %s",
2220 __func__, toString(operand.lifetime).c_str());
2221 valueStart = nullptr;
2222 }
2223 }
2224
2225 return valueStart;
2226}
2227
kevmay01bc5f7842018-08-30 12:34:39 +01002228template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002229template<typename HalOperation>
2230const Operand* ModelToINetworkConverter<HalVersion>::GetInputOperand(const HalOperation& operation,
2231 uint32_t inputIndex) const
telsoa015307bc12018-03-09 13:51:08 +00002232{
2233 if (inputIndex >= operation.inputs.size())
2234 {
2235 Fail("%s: invalid input index: %i out of %i", __func__, inputIndex, operation.inputs.size());
2236 return nullptr;
2237 }
2238
2239 assert(operation.inputs[inputIndex] < m_Model.operands.size()); // Model should have been validated beforehand
2240 return &m_Model.operands[operation.inputs[inputIndex]];
2241}
2242
kevmay01bc5f7842018-08-30 12:34:39 +01002243template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002244template<typename HalOperation>
2245const Operand* ModelToINetworkConverter<HalVersion>::GetOutputOperand(const HalOperation& operation,
2246 uint32_t outputIndex) const
telsoa015307bc12018-03-09 13:51:08 +00002247{
2248 if (outputIndex >= operation.outputs.size())
2249 {
2250 Fail("%s: invalid output index: %i out of %i", __func__, outputIndex, operation.outputs.size());
2251 return nullptr;
2252 }
2253
2254 assert(operation.outputs[outputIndex] < m_Model.operands.size()); // Model should have been validated beforehand
2255 return &m_Model.operands[operation.outputs[outputIndex]];
2256}
2257
kevmay01bc5f7842018-08-30 12:34:39 +01002258template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002259template<typename HalOperation, typename T>
2260bool ModelToINetworkConverter<HalVersion>::GetInputScalar(const HalOperation& operation,
2261 uint32_t inputIndex,
2262 OperandType type,
2263 T& outValue) const
telsoa015307bc12018-03-09 13:51:08 +00002264{
2265 const Operand* operand = GetInputOperand(operation, inputIndex);
2266 if (!operand)
2267 {
2268 return Fail("%s: invalid input operand at index %i", __func__, inputIndex);
2269 }
2270
2271 if (operand->type != type)
2272 {
2273 return Fail("%s: unexpected operand type: %s (should be %s)",
2274 __func__, toString(operand->type).c_str(), toString(type).c_str());
2275 }
2276
2277 if (operand->location.length != sizeof(T))
2278 {
2279 return Fail("%s: incorrect operand location length: %i (should be %i)",
2280 __func__, operand->location.length, sizeof(T));
2281 }
2282
2283 const void* valueAddress = GetOperandValueReadOnlyAddress(*operand);
2284 if (!valueAddress)
2285 {
2286 return Fail("%s: failed to get address for operand", __func__);
2287 }
2288
2289 outValue = *(static_cast<const T*>(valueAddress));
2290 return true;
2291}
2292
kevmay01bc5f7842018-08-30 12:34:39 +01002293template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002294template<typename HalOperation>
2295bool ModelToINetworkConverter<HalVersion>::GetInputInt32(const HalOperation& operation,
2296 uint32_t inputIndex,
2297 int32_t& outValue) const
telsoa015307bc12018-03-09 13:51:08 +00002298{
2299 return GetInputScalar(operation, inputIndex, OperandType::INT32, outValue);
2300}
2301
kevmay01bc5f7842018-08-30 12:34:39 +01002302template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002303template<typename HalOperation>
2304bool ModelToINetworkConverter<HalVersion>::GetInputFloat32(const HalOperation& operation,
2305 uint32_t inputIndex,
2306 float& outValue) const
telsoa015307bc12018-03-09 13:51:08 +00002307{
2308 return GetInputScalar(operation, inputIndex, OperandType::FLOAT32, outValue);
2309}
2310
kevmay01bc5f7842018-08-30 12:34:39 +01002311template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002312template<typename HalOperation>
2313bool ModelToINetworkConverter<HalVersion>::GetInputActivationFunctionImpl(const HalOperation& operation,
2314 uint32_t inputIndex,
2315 OperandType type,
2316 ActivationFn& outActivationFunction) const
telsoa015307bc12018-03-09 13:51:08 +00002317{
telsoa01ce3e84a2018-08-31 09:31:35 +01002318 if (type != OperandType::INT32 && type != OperandType::TENSOR_INT32)
2319 {
2320 return Fail("%s: unexpected operand type: %s (should be %s or %s)",
2321 __func__,
2322 toString(type).c_str(),
2323 toString(OperandType::INT32).c_str(),
2324 toString(OperandType::TENSOR_INT32).c_str());
2325 }
2326
telsoa015307bc12018-03-09 13:51:08 +00002327 int32_t activationFunctionAsInt;
telsoa01ce3e84a2018-08-31 09:31:35 +01002328 if (!GetInputScalar(operation, inputIndex, type, activationFunctionAsInt))
telsoa015307bc12018-03-09 13:51:08 +00002329 {
2330 return Fail("%s: failed to get activation input value", __func__);
2331 }
telsoa015307bc12018-03-09 13:51:08 +00002332 outActivationFunction = static_cast<ActivationFn>(activationFunctionAsInt);
2333 return true;
2334}
2335
kevmay01bc5f7842018-08-30 12:34:39 +01002336template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002337template<typename HalOperation>
2338bool ModelToINetworkConverter<HalVersion>::GetInputActivationFunction(const HalOperation& operation,
2339 uint32_t inputIndex,
2340 ActivationFn& outActivationFunction) const
telsoa01ce3e84a2018-08-31 09:31:35 +01002341{
2342 return GetInputActivationFunctionImpl(operation, inputIndex, OperandType::INT32, outActivationFunction);
2343}
2344
kevmay01bc5f7842018-08-30 12:34:39 +01002345template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002346template<typename HalOperation>
kevmay01bc5f7842018-08-30 12:34:39 +01002347bool ModelToINetworkConverter<HalVersion>::GetInputActivationFunctionFromTensor(
arovir01a15dc112018-09-03 17:12:56 +01002348 const HalOperation& operation,
kevmay01bc5f7842018-08-30 12:34:39 +01002349 uint32_t inputIndex,
2350 ActivationFn& outActivationFunction) const
telsoa01ce3e84a2018-08-31 09:31:35 +01002351{
2352 // This only accepts a 1-D tensor of size 1
2353 return GetInputActivationFunctionImpl(operation, inputIndex, OperandType::INT32, outActivationFunction);
2354}
2355
kevmay01bc5f7842018-08-30 12:34:39 +01002356template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002357template<typename HalOperation>
2358bool ModelToINetworkConverter<HalVersion>::GetOptionalInputActivation(const HalOperation& operation,
2359 uint32_t inputIndex,
2360 ActivationFn& activationFunction) const
telsoa01ce3e84a2018-08-31 09:31:35 +01002361{
2362 if (operation.inputs.size() <= inputIndex)
2363 {
2364 activationFunction = ActivationFn::kActivationNone;
2365 }
2366 else
2367 {
2368 if (!GetInputActivationFunction(operation, inputIndex, activationFunction))
2369 {
2370 return Fail("%s: Operation has invalid inputs", __func__);
2371 }
2372 }
2373 return true;
2374}
2375
kevmay01bc5f7842018-08-30 12:34:39 +01002376template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002377template<typename HalOperation>
2378bool ModelToINetworkConverter<HalVersion>::GetInputPaddingScheme(const HalOperation& operation,
2379 uint32_t inputIndex,
2380 PaddingScheme& outPaddingScheme) const
telsoa015307bc12018-03-09 13:51:08 +00002381{
2382 int32_t paddingSchemeAsInt;
2383 if (!GetInputInt32(operation, inputIndex, paddingSchemeAsInt))
2384 {
2385 return Fail("%s: failed to get padding scheme input value", __func__);
2386 }
2387
2388 outPaddingScheme = static_cast<android::nn::PaddingScheme>(paddingSchemeAsInt);
2389 return true;
2390}
2391
kevmay01bc5f7842018-08-30 12:34:39 +01002392template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002393template<typename HalOperation>
2394LayerInputHandle ModelToINetworkConverter<HalVersion>::ConvertToLayerInputHandle(const HalOperation& operation,
2395 uint32_t inputIndex)
telsoa015307bc12018-03-09 13:51:08 +00002396{
2397 const Operand* operand = GetInputOperand(operation, inputIndex);
2398 if (!operand)
2399 {
2400 Fail("%s: failed to get input operand %i", __func__, inputIndex);
2401 return LayerInputHandle();
2402 }
2403
2404 if (!IsOperandTypeSupportedForTensors(operand->type))
2405 {
2406 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand->type).c_str());
2407 return LayerInputHandle();
2408 }
2409
2410 armnn::TensorInfo operandTensorInfo = GetTensorInfoForOperand(*operand);
2411
2412 switch (operand->lifetime)
2413 {
2414 case OperandLifeTime::TEMPORARY_VARIABLE: // intentional fallthrough
2415 case OperandLifeTime::MODEL_INPUT:
2416 {
2417 // The tensor is either an operand internal to the model, or a model input.
2418 // It can be associated with an ArmNN output slot for an existing layer.
2419
2420 // m_OutputSlotForOperand[...] can be nullptr if the previous layer could not be converted
2421 const uint32_t operandIndex = operation.inputs[inputIndex];
2422 return LayerInputHandle(true, m_OutputSlotForOperand[operandIndex], operandTensorInfo);
2423 break;
2424 }
2425 case OperandLifeTime::CONSTANT_COPY:
2426 case OperandLifeTime::CONSTANT_REFERENCE:
2427 {
2428 // The tensor has an already known constant value, and can be converted into an ArmNN Constant layer.
2429 ConstTensorPin tensorPin = ConvertOperandToConstTensorPin(*operand);
2430 if (tensorPin.IsValid())
2431 {
2432 if (!IsLayerSupported(__func__,
2433 armnn::IsConstantSupported,
2434 m_Compute,
2435 tensorPin.GetConstTensor().GetInfo()))
2436 {
2437 return LayerInputHandle();
2438 }
2439
2440 armnn::IConnectableLayer* constantLayer = m_Network->AddConstantLayer(tensorPin.GetConstTensor());
2441 armnn::IOutputSlot& outputSlot = constantLayer->GetOutputSlot(0);
2442 outputSlot.SetTensorInfo(tensorPin.GetConstTensor().GetInfo());
2443
2444 return LayerInputHandle(true, &outputSlot, operandTensorInfo);
2445 }
2446 else
2447 {
2448 Fail("%s: invalid operand tensor", __func__);
2449 return LayerInputHandle();
2450 }
2451 break;
2452 }
2453 default:
2454 {
2455 // Unsupported lifetime for an input tensor
2456 Fail("%s: unsupported lifetime for input tensor: %s",
2457 __func__, toString(operand->lifetime).c_str());
2458 return LayerInputHandle();
2459 }
2460 }
2461}
2462
kevmay01bc5f7842018-08-30 12:34:39 +01002463template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002464template<typename HalOperation>
kevmay01bc5f7842018-08-30 12:34:39 +01002465ConstTensorPin ModelToINetworkConverter<HalVersion>::ConvertOperationInputToConstTensorPin(
arovir01a15dc112018-09-03 17:12:56 +01002466 const HalOperation& operation,
2467 uint32_t inputIndex,
2468 const armnn::PermutationVector& dimensionMappings,
2469 const armnn::TensorShape* overrideTensorShape,
2470 bool optional)
telsoa015307bc12018-03-09 13:51:08 +00002471{
2472 const Operand* operand = GetInputOperand(operation, inputIndex);
2473 if (!operand)
2474 {
telsoa01ce3e84a2018-08-31 09:31:35 +01002475 Fail("%s: failed to get input operand: index=%u", __func__, inputIndex);
telsoa015307bc12018-03-09 13:51:08 +00002476 return ConstTensorPin();
2477 }
telsoa01ce3e84a2018-08-31 09:31:35 +01002478 return ConvertOperandToConstTensorPin(*operand, dimensionMappings, overrideTensorShape, optional);
telsoa015307bc12018-03-09 13:51:08 +00002479}
2480
kevmay01bc5f7842018-08-30 12:34:39 +01002481template<typename HalVersion>
2482ConstTensorPin ModelToINetworkConverter<HalVersion>::ConvertOperandToConstTensorPin(const Operand& operand,
telsoa01ce3e84a2018-08-31 09:31:35 +01002483 const armnn::PermutationVector& dimensionMappings, const armnn::TensorShape* overrideTensorShape, bool optional)
telsoa015307bc12018-03-09 13:51:08 +00002484{
2485 if (!IsOperandTypeSupportedForTensors(operand.type))
2486 {
2487 Fail("%s: unsupported operand type for tensor %s", __func__, toString(operand.type).c_str());
2488 return ConstTensorPin();
2489 }
2490
2491 if (operand.lifetime != OperandLifeTime::CONSTANT_COPY && operand.lifetime != OperandLifeTime::CONSTANT_REFERENCE)
2492 {
2493 Fail("%s: invalid operand lifetime: %s", __func__, toString(operand.lifetime).c_str());
2494 return ConstTensorPin();
2495 }
2496
2497 const void* const valueStart = GetOperandValueReadOnlyAddress(operand);
2498 if (!valueStart)
2499 {
telsoa01ce3e84a2018-08-31 09:31:35 +01002500 if (optional)
2501 {
2502 // optional tensor with no values is not really an error; return it as invalid, but marked as optional
2503 return ConstTensorPin(true);
2504 }
2505 // mandatory tensor with no values
telsoa015307bc12018-03-09 13:51:08 +00002506 Fail("%s: failed to get operand address", __func__);
2507 return ConstTensorPin();
2508 }
2509
2510 armnn::TensorInfo tensorInfo = GetTensorInfoForOperand(operand);
2511 if (overrideTensorShape != nullptr)
2512 {
2513 tensorInfo.SetShape(*overrideTensorShape);
2514 }
2515 return ConstTensorPin(tensorInfo, valueStart, operand.location.length, dimensionMappings);
2516}
2517
kevmay01bc5f7842018-08-30 12:34:39 +01002518template<typename HalVersion>
2519bool ModelToINetworkConverter<HalVersion>::GetTensorInt32Values(const Operand& operand,
2520 std::vector<int32_t>& outValues) const
telsoa015307bc12018-03-09 13:51:08 +00002521{
2522 if (operand.type != OperandType::TENSOR_INT32)
2523 {
2524 return Fail("%s: invalid operand type: %s", __func__, toString(operand.type).c_str());
2525 }
2526
2527 const void* startAddress = GetOperandValueReadOnlyAddress(operand);
2528 if (!startAddress)
2529 {
2530 return Fail("%s: failed to get operand address", __func__, operand.type);
2531 }
2532
2533 // Check number of bytes is sensible
2534 const uint32_t numBytes = operand.location.length;
2535 if (numBytes % sizeof(int32_t) != 0)
2536 {
2537 return Fail("%s: invalid number of bytes: %i, expected to be a multiple of %i",
2538 __func__, numBytes, sizeof(int32_t));
2539 }
2540
2541 outValues.resize(numBytes / sizeof(int32_t));
2542 memcpy(outValues.data(), startAddress, numBytes);
2543 return true;
2544}
2545
2546// Creates an ArmNN activation layer and connects it to the given layer, if the
2547// passed in AndroidNN activation function requires so.
2548// @return The end layer of the sequence of layers built for the given AndroidNN
2549// activation function or nullptr if an error occurred (e.g. unsupported activation).
2550// Note that the end layer matches the input layer if no activation is required
2551// (the sequence of layers has length 1).
kevmay01bc5f7842018-08-30 12:34:39 +01002552template<typename HalVersion>
2553armnn::IConnectableLayer* ModelToINetworkConverter<HalVersion>::ProcessActivation(const armnn::TensorInfo& tensorInfo,
telsoa015307bc12018-03-09 13:51:08 +00002554 ActivationFn activation, armnn::IConnectableLayer* prevLayer)
2555{
2556 assert(prevLayer->GetNumOutputSlots() == 1);
2557
2558 prevLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2559
2560 armnn::IConnectableLayer* activationLayer = prevLayer;
2561
2562 if (activation != ActivationFn::kActivationNone)
2563 {
2564 armnn::ActivationDescriptor activationDesc;
2565 switch (activation)
2566 {
2567 case ActivationFn::kActivationRelu:
2568 {
2569 activationDesc.m_Function = armnn::ActivationFunction::ReLu;
2570 break;
2571 }
2572 case ActivationFn::kActivationRelu1:
2573 {
2574 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
2575 activationDesc.m_A = 1.0f;
2576 activationDesc.m_B = -1.0f;
2577 break;
2578 }
2579 case ActivationFn::kActivationRelu6:
2580 {
2581 activationDesc.m_Function = armnn::ActivationFunction::BoundedReLu;
2582 activationDesc.m_A = 6.0f;
2583 break;
2584 }
2585 case ActivationFn::kActivationSigmoid:
2586 {
2587 activationDesc.m_Function = armnn::ActivationFunction::Sigmoid;
2588 break;
2589 }
2590 case ActivationFn::kActivationTanh:
2591 {
2592 activationDesc.m_Function = armnn::ActivationFunction::TanH;
2593 activationDesc.m_A = 1.0f;
2594 activationDesc.m_B = 1.0f;
2595 break;
2596 }
2597 default:
2598 {
2599 Fail("%s: Invalid activation enum value %i", __func__, activation);
2600 return nullptr;
2601 }
2602 }
2603
2604 if (!IsLayerSupported(__func__, armnn::IsActivationSupported, m_Compute,
telsoa01ce3e84a2018-08-31 09:31:35 +01002605 prevLayer->GetOutputSlot(0).GetTensorInfo(), tensorInfo, activationDesc))
telsoa015307bc12018-03-09 13:51:08 +00002606 {
2607 return nullptr;
2608 }
2609
2610 activationLayer = m_Network->AddActivationLayer(activationDesc);
2611
2612 prevLayer->GetOutputSlot(0).Connect(activationLayer->GetInputSlot(0));
2613 activationLayer->GetOutputSlot(0).SetTensorInfo(tensorInfo);
2614 }
2615
2616 return activationLayer;
2617}
2618
kevmay01bc5f7842018-08-30 12:34:39 +01002619template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002620template<typename HalOperation>
2621bool ModelToINetworkConverter<HalVersion>::SetupAndTrackLayerOutputSlot(const HalOperation& operation,
2622 uint32_t operationOutputIndex,
2623 armnn::IConnectableLayer& layer,
2624 uint32_t layerOutputIndex)
telsoa015307bc12018-03-09 13:51:08 +00002625{
telsoa01ce3e84a2018-08-31 09:31:35 +01002626 const Operand* outputOperand = GetOutputOperand(operation, operationOutputIndex);
telsoa015307bc12018-03-09 13:51:08 +00002627
telsoa01ce3e84a2018-08-31 09:31:35 +01002628 if ((outputOperand == nullptr) || (operationOutputIndex >= layer.GetNumOutputSlots()))
telsoa015307bc12018-03-09 13:51:08 +00002629 {
2630 return false;
2631 }
2632
telsoa01ce3e84a2018-08-31 09:31:35 +01002633 armnn::IOutputSlot& outputSlot = layer.GetOutputSlot(layerOutputIndex);
telsoa015307bc12018-03-09 13:51:08 +00002634
telsoa01ce3e84a2018-08-31 09:31:35 +01002635 const uint32_t operandIndex = operation.outputs[operationOutputIndex];
telsoa015307bc12018-03-09 13:51:08 +00002636 m_OutputSlotForOperand[operandIndex] = &outputSlot;
2637
2638 outputSlot.SetTensorInfo(GetTensorInfoForOperand(*outputOperand));
2639
2640 return true;
2641}
2642
kevmay01bc5f7842018-08-30 12:34:39 +01002643template<typename HalVersion>
arovir01a15dc112018-09-03 17:12:56 +01002644template<typename HalOperation>
2645bool ModelToINetworkConverter<HalVersion>::SetupAndTrackLayerOutputSlot(const HalOperation& operation,
2646 uint32_t outputIndex,
2647 armnn::IConnectableLayer& layer)
telsoa01ce3e84a2018-08-31 09:31:35 +01002648{
2649 return SetupAndTrackLayerOutputSlot(operation, outputIndex, layer, outputIndex);
2650}
2651
kevmay01bc5f7842018-08-30 12:34:39 +01002652template<typename HalVersion>
2653bool ModelToINetworkConverter<HalVersion>::IsOperationSupported(uint32_t operationIndex) const
telsoa015307bc12018-03-09 13:51:08 +00002654{
2655 std::map<uint32_t, bool>::const_iterator it = m_OperationSupported.find(operationIndex);
2656 assert(it != m_OperationSupported.end());
2657 return it->second;
2658}
2659
kevmay01bc5f7842018-08-30 12:34:39 +01002660template class ModelToINetworkConverter<HalVersion_1_0>;
telsoa015307bc12018-03-09 13:51:08 +00002661
Matteo Martincighe48bdff2018-09-03 13:50:50 +01002662#if defined(ARMNN_ANDROID_NN_V1_1) // Using ::android::hardware::neuralnetworks::V1_1.
kevmay01bc5f7842018-08-30 12:34:39 +01002663template class ModelToINetworkConverter<HalVersion_1_1>;
2664#endif
2665
Matteo Martincighe48bdff2018-09-03 13:50:50 +01002666} // armnn_driver