Fix typo errors from ticket IVGCVSW-6420

  * Typo errors from ticket 'Constant flag in tensor info is not set correctly'.
    Not fixed due to code freeze deadline.

Signed-off-by: Cathal Corbett <cathal.corbett@arm.com>
Change-Id: Id80ba60647d1970115a8cf200f0d71e4fada9b30
diff --git a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
index d8ef37d..892b8e4 100644
--- a/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
+++ b/python/pyarmnn/src/pyarmnn/swig/modules/armnn_tensor.i
@@ -237,7 +237,7 @@
     %feature("docstring",
     "
     Sets the tensor info to be constant.
-    
+
     Args:
         IsConstant (bool): Sets tensor info to constant.
 
diff --git a/src/armnnSerializer/test/LstmSerializationTests.cpp b/src/armnnSerializer/test/LstmSerializationTests.cpp
index 3178bc9..d8f8967 100644
--- a/src/armnnSerializer/test/LstmSerializationTests.cpp
+++ b/src/armnnSerializer/test/LstmSerializationTests.cpp
@@ -1454,7 +1454,8 @@
     armnn::TensorInfo inputToInputWeightsInfo(inputToInputWeightsShape,
                                               armnn::DataType::QAsymmU8,
                                               weightsScale,
-                                              weightsOffset, true);
+                                              weightsOffset,
+                                              true);
     armnn::ConstTensor inputToInputWeights(inputToInputWeightsInfo, inputToInputWeightsData);
 
     armnn::TensorShape inputToForgetWeightsShape = {4, 2};
@@ -1462,7 +1463,8 @@
     armnn::TensorInfo inputToForgetWeightsInfo(inputToForgetWeightsShape,
                                                armnn::DataType::QAsymmU8,
                                                weightsScale,
-                                               weightsOffset, true);
+                                               weightsOffset,
+                                               true);
     armnn::ConstTensor inputToForgetWeights(inputToForgetWeightsInfo, inputToForgetWeightsData);
 
     armnn::TensorShape inputToCellWeightsShape = {4, 2};
@@ -1470,7 +1472,8 @@
     armnn::TensorInfo inputToCellWeightsInfo(inputToCellWeightsShape,
                                              armnn::DataType::QAsymmU8,
                                              weightsScale,
-                                             weightsOffset, true);
+                                             weightsOffset,
+                                             true);
     armnn::ConstTensor inputToCellWeights(inputToCellWeightsInfo, inputToCellWeightsData);
 
     armnn::TensorShape inputToOutputWeightsShape = {4, 2};
@@ -1478,7 +1481,8 @@
     armnn::TensorInfo inputToOutputWeightsInfo(inputToOutputWeightsShape,
                                                armnn::DataType::QAsymmU8,
                                                weightsScale,
-                                               weightsOffset, true);
+                                               weightsOffset,
+                                               true);
     armnn::ConstTensor inputToOutputWeights(inputToOutputWeightsInfo, inputToOutputWeightsData);
 
     // The shape of recurrent weight data is {outputSize, outputSize} = {4, 4}
@@ -1487,7 +1491,8 @@
     armnn::TensorInfo recurrentToInputWeightsInfo(recurrentToInputWeightsShape,
                                                   armnn::DataType::QAsymmU8,
                                                   weightsScale,
-                                                  weightsOffset, true);
+                                                  weightsOffset,
+                                                  true);
     armnn::ConstTensor recurrentToInputWeights(recurrentToInputWeightsInfo, recurrentToInputWeightsData);
 
     armnn::TensorShape recurrentToForgetWeightsShape = {4, 4};
@@ -1495,7 +1500,8 @@
     armnn::TensorInfo recurrentToForgetWeightsInfo(recurrentToForgetWeightsShape,
                                                    armnn::DataType::QAsymmU8,
                                                    weightsScale,
-                                                   weightsOffset, true);
+                                                   weightsOffset,
+                                                   true);
     armnn::ConstTensor recurrentToForgetWeights(recurrentToForgetWeightsInfo, recurrentToForgetWeightsData);
 
     armnn::TensorShape recurrentToCellWeightsShape = {4, 4};
@@ -1503,7 +1509,8 @@
     armnn::TensorInfo recurrentToCellWeightsInfo(recurrentToCellWeightsShape,
                                                  armnn::DataType::QAsymmU8,
                                                  weightsScale,
-                                                 weightsOffset, true);
+                                                 weightsOffset,
+                                                 true);
     armnn::ConstTensor recurrentToCellWeights(recurrentToCellWeightsInfo, recurrentToCellWeightsData);
 
     armnn::TensorShape recurrentToOutputWeightsShape = {4, 4};
@@ -1511,7 +1518,8 @@
     armnn::TensorInfo recurrentToOutputWeightsInfo(recurrentToOutputWeightsShape,
                                                    armnn::DataType::QAsymmU8,
                                                    weightsScale,
-                                                   weightsOffset, true);
+                                                   weightsOffset,
+                                                   true);
     armnn::ConstTensor recurrentToOutputWeights(recurrentToOutputWeightsInfo, recurrentToOutputWeightsData);
 
     // The shape of bias data is {outputSize} = {4}
@@ -1520,7 +1528,8 @@
     armnn::TensorInfo inputGateBiasInfo(inputGateBiasShape,
                                         armnn::DataType::Signed32,
                                         biasScale,
-                                        biasOffset, true);
+                                        biasOffset,
+                                        true);
     armnn::ConstTensor inputGateBias(inputGateBiasInfo, inputGateBiasData);
 
     armnn::TensorShape forgetGateBiasShape = {4};
@@ -1528,7 +1537,8 @@
     armnn::TensorInfo forgetGateBiasInfo(forgetGateBiasShape,
                                          armnn::DataType::Signed32,
                                          biasScale,
-                                         biasOffset, true);
+                                         biasOffset,
+                                         true);
     armnn::ConstTensor forgetGateBias(forgetGateBiasInfo, forgetGateBiasData);
 
     armnn::TensorShape cellBiasShape = {4};
@@ -1536,7 +1546,8 @@
     armnn::TensorInfo cellBiasInfo(cellBiasShape,
                                    armnn::DataType::Signed32,
                                    biasScale,
-                                   biasOffset, true);
+                                   biasOffset,
+                                   true);
     armnn::ConstTensor cellBias(cellBiasInfo, cellBiasData);
 
     armnn::TensorShape outputGateBiasShape = {4};
@@ -1544,7 +1555,8 @@
     armnn::TensorInfo outputGateBiasInfo(outputGateBiasShape,
                                          armnn::DataType::Signed32,
                                          biasScale,
-                                         biasOffset, true);
+                                         biasOffset,
+                                         true);
     armnn::ConstTensor outputGateBias(outputGateBiasInfo, outputGateBiasData);
 
     armnn::QuantizedLstmInputParams params;
@@ -1655,12 +1667,14 @@
     armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
                                        armnn::DataType::QSymmS8,
                                        weightsScale,
-                                       weightsOffset, true);
+                                       weightsOffset,
+                                       true);
 
     armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
                                            armnn::DataType::QSymmS8,
                                            weightsScale,
-                                           weightsOffset, true);
+                                           weightsOffset,
+                                           true);
 
     armnn::TensorInfo biasInfo({numUnits}, armnn::DataType::Signed32, biasScale, biasOffset, true);
 
@@ -1816,22 +1830,26 @@
     armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
                                        armnn::DataType::QSymmS8,
                                        weightsScale,
-                                       weightsOffset, true);
+                                       weightsOffset,
+                                       true);
 
     armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
                                            armnn::DataType::QSymmS8,
                                            weightsScale,
-                                           weightsOffset, true);
+                                           weightsOffset,
+                                           true);
 
     armnn::TensorInfo biasInfo({numUnits},
                                armnn::DataType::Signed32,
                                biasScale,
-                               biasOffset, true);
+                               biasOffset,
+                               true);
 
     armnn::TensorInfo layerNormWeightsInfo({numUnits},
                                            armnn::DataType::QSymmS16,
                                            layerNormScale,
-                                           layerNormOffset, true);
+                                           layerNormOffset,
+                                           true);
 
     // Mandatory params
     std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
@@ -2003,32 +2021,38 @@
     armnn::TensorInfo inputWeightsInfo({numUnits, inputSize},
                                        armnn::DataType::QSymmS8,
                                        weightsScale,
-                                       weightsOffset, true);
+                                       weightsOffset,
+                                       true);
 
     armnn::TensorInfo recurrentWeightsInfo({numUnits, outputSize},
                                            armnn::DataType::QSymmS8,
                                            weightsScale,
-                                           weightsOffset, true);
+                                           weightsOffset,
+                                           true);
 
     armnn::TensorInfo biasInfo({numUnits},
                                armnn::DataType::Signed32,
                                biasScale,
-                               biasOffset, true);
+                               biasOffset,
+                               true);
 
     armnn::TensorInfo peepholeWeightsInfo({numUnits},
                                           armnn::DataType::QSymmS16,
                                           weightsScale,
-                                          weightsOffset, true);
+                                          weightsOffset,
+                                          true);
 
     armnn::TensorInfo layerNormWeightsInfo({numUnits},
                                            armnn::DataType::QSymmS16,
                                            layerNormScale,
-                                           layerNormOffset, true);
+                                           layerNormOffset,
+                                           true);
 
     armnn::TensorInfo projectionWeightsInfo({outputSize, numUnits},
                                              armnn::DataType::QSymmS8,
                                              weightsScale,
-                                             weightsOffset, true);
+                                             weightsOffset,
+                                             true);
 
     // Mandatory params
     std::vector<int8_t> inputToForgetWeightsData = GenerateRandomData<int8_t>(inputWeightsInfo.GetNumElements());
diff --git a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
index e2147fc..7c87f35 100644
--- a/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QLstmEndToEndTestImpl.cpp
@@ -80,22 +80,26 @@
     const armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
                                              armnn::DataType::QSymmS8,
                                              weightsScale,
-                                             weightsOffset, true);
+                                             weightsOffset,
+                                             true);
 
     const armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
                                                  armnn::DataType::QSymmS8,
                                                  weightsScale,
-                                                 weightsOffset, true);
+                                                 weightsOffset,
+                                                 true);
 
     const armnn::TensorInfo biasInfo({outputSize},
                                      armnn::DataType::Signed32,
                                      biasScale,
-                                     biasOffset, true);
+                                     biasOffset,
+                                     true);
 
     const armnn::TensorInfo layerNormWeightsInfo({numUnits},
                                                  armnn::DataType::QSymmS16,
                                                  layerNormScale,
-                                                 layerNormOffset, true);
+                                                 layerNormOffset,
+                                                 true);
 
     // Mandatory params
     const std::vector<int8_t> inputToForgetWeightsVector =
@@ -179,17 +183,20 @@
     const armnn::TensorInfo inputInfo({numBatches , inputSize},
                                       armnn::DataType::QAsymmS8,
                                       inputScale,
-                                      inputOffset, true);
+                                      inputOffset,
+                                      true);
 
     const armnn::TensorInfo cellStateInfo({numBatches , numUnits},
                                           armnn::DataType::QSymmS16,
                                           cellStateScale,
-                                          cellStateOffset, true);
+                                          cellStateOffset,
+                                          true);
 
     const armnn::TensorInfo outputStateInfo({numBatches , outputSize},
                                             armnn::DataType::QAsymmS8,
                                             outputScale,
-                                            outputOffset, true);
+                                            outputOffset,
+                                            true);
 
     // Input tensor data
     const std::vector<int8_t> inputVector         = {90, 102, 13, 26, 38, 102, 13, 26, 51, 64};
diff --git a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
index f178951..d481404 100644
--- a/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
+++ b/src/backends/backendsCommon/test/QuantizedLstmEndToEndTestImpl.cpp
@@ -46,12 +46,14 @@
     armnn::TensorInfo inputWeightsInfo({outputSize, inputSize},
                                        armnn::DataType::QAsymmU8,
                                        weightsScale,
-                                       weightsOffset, true);
+                                       weightsOffset,
+                                       true);
 
     armnn::TensorInfo recurrentWeightsInfo({outputSize, outputSize},
                                            armnn::DataType::QAsymmU8,
                                            weightsScale,
-                                           weightsOffset, true);
+                                           weightsOffset,
+                                           true);
 
     armnn::TensorInfo biasInfo({outputSize}, armnn::DataType::Signed32, biasScale, biasOffset, true);
 
diff --git a/src/backends/cl/test/ClFallbackTests.cpp b/src/backends/cl/test/ClFallbackTests.cpp
index 7cd05d1..cfe2b36 100644
--- a/src/backends/cl/test/ClFallbackTests.cpp
+++ b/src/backends/cl/test/ClFallbackTests.cpp
@@ -540,7 +540,6 @@
 
     std::vector<float> expectedOutput{ 11.0f, -1.0f };
 
-
     InputTensors inputTensors
     {
         { 0, armnn::ConstTensor(runtime->GetInputTensorInfo(netId, 0), inputData0.data()) },
diff --git a/src/backends/neon/test/NeonTensorHandleTests.cpp b/src/backends/neon/test/NeonTensorHandleTests.cpp
index 2e6854a..685a074 100644
--- a/src/backends/neon/test/NeonTensorHandleTests.cpp
+++ b/src/backends/neon/test/NeonTensorHandleTests.cpp
@@ -422,7 +422,7 @@
         TensorInfo inputTensorInfo = runtime->GetInputTensorInfo(networkIdentifier, it.first);
         inputTensorInfo.SetConstant(true);
         inputTensors.push_back({it.first,
-                              ConstTensor(inputTensorInfo, it.second.data())});
+                                ConstTensor(inputTensorInfo, it.second.data())});
     }
     OutputTensors outputTensors;
     outputTensors.reserve(expectedOutputData.size());