IVGCVSW-7785 BugFix: ExpandDims not working when batch!=1
* This commit fixes partially the ticket.
In ToTensorInfo() we assume batch is 1 when it is unknown.
We call OutputTensorInfoFromInputs() to amend this assumption/
However, this does not work for reshape layer.
Therefore, we have to calculate the output shape in the ParseExpandDims().
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: Iedc32a44b4ec0d8b7d2cc0b08f38f0776402f7bd
diff --git a/src/armnnTfLiteParser/TfLiteParser.cpp b/src/armnnTfLiteParser/TfLiteParser.cpp
index 8b2d7a2..6354a1e 100644
--- a/src/armnnTfLiteParser/TfLiteParser.cpp
+++ b/src/armnnTfLiteParser/TfLiteParser.cpp
@@ -1509,57 +1509,57 @@
armnn::TensorInfo inputTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 0);
armnn::TensorInfo outputTensorInfo = ToTensorInfo(outputs[0], true);
-
CheckMatchingQuantization(inputTensorInfo, outputTensorInfo, layerName, "Input 0", "Output 0");
+ armnn::TensorInfo axisTensorInfo = InputTensorInfo(subgraphIndex, operatorIndex, 1);
+
+ BufferRawPtr axisBufferPtr = GetBuffer(m_Model, inputs[1]->buffer);
+ if (axisBufferPtr == nullptr)
+ {
+ throw ParseException(fmt::format("{}: Operation has invalid inputs. Failed to read axis.",
+ CHECK_LOCATION().AsString()));
+ }
+
+ std::vector<int32_t> axisData(axisTensorInfo.GetNumElements());
+ ::memcpy(axisData.data(), axisBufferPtr->data.data(), axisTensorInfo.GetNumBytes());
+ int32_t axis = axisData[0];
+
+ auto inputRank = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
+ auto outputRank = inputRank + 1;
+ if((axis < -1 * outputRank) || (outputRank <= axis))
+ {
+ throw ParseException(fmt::format("{}: Axis {} is not within [-{}, {}) range.",
+ CHECK_LOCATION().AsString(), axis, outputRank, outputRank));
+ }
+
+ axis = axis < 0 ? (axis + outputRank) : axis;
+
+ std::vector<unsigned int> shape(static_cast<unsigned int>(outputRank));
+ unsigned int inputShapeIndex = 0;
+ for (unsigned int i = 0; i < static_cast<unsigned int>(outputRank); ++i)
+ {
+ if (i == static_cast<unsigned int>(axis))
+ {
+ shape[i] = 1;
+ }
+ else
+ {
+ shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
+ ++inputShapeIndex;
+ }
+ }
+
ReshapeDescriptor reshapeDesc;
-
- if (outputTensorInfo.GetShape().AreAllDimensionsSpecified())
- {
- reshapeDesc.m_TargetShape = outputTensorInfo.GetShape();
- }
- else
- {
- int32_t axis = inputs[1]->shape[0];
-
- int32_t inputDimSize = static_cast<int32_t>(inputTensorInfo.GetShape().GetNumDimensions());
-
- if (axis > inputDimSize || axis < 0 - (inputDimSize + 1))
- {
- throw ParseException("axis must be in range [0 - (inputDimSize + 1), inputDimSize] inclusive");
- }
-
- if(axis < 0)
- {
- axis = inputDimSize + axis + 1;
- }
-
- std::vector<unsigned int> shape(static_cast<unsigned int>(inputDimSize) + 1);
- unsigned int inputShapeIndex = 0;
- for (unsigned int i = 0; i < static_cast<unsigned int>(inputDimSize + 1); ++i)
- {
- if (i == static_cast<unsigned int>(axis))
- {
- shape[i] = 1;
- }
- else
- {
- shape[i] = inputTensorInfo.GetShape()[inputShapeIndex];
- ++inputShapeIndex;
- }
- }
-
- reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(inputDimSize + 1), shape.data());
- }
+ reshapeDesc.m_TargetShape = TensorShape(static_cast<unsigned int>(outputRank), shape.data());
+ outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
IConnectableLayer* layer = m_Network->AddReshapeLayer(reshapeDesc, layerName.c_str());
ARMNN_ASSERT(layer != nullptr);
-
- reshapeDesc.m_TargetShape = OutputTensorInfoFromInputs(subgraphIndex, operatorIndex, layer, 0, {0}).GetShape();
- outputTensorInfo.SetShape(reshapeDesc.m_TargetShape);
-
layer->GetOutputSlot(0).SetTensorInfo(outputTensorInfo);
+ auto outputTensorIds = GetOutputTensorIds(m_Model, subgraphIndex, operatorIndex);
+ m_TensorInfos[outputTensorIds[0]] = outputTensorInfo;
+
auto inputTensorIndexes = AsUnsignedVector(GetInputTensorIds(m_Model, subgraphIndex, operatorIndex));
RegisterInputSlots(subgraphIndex, operatorIndex, layer, {inputTensorIndexes[0]});
diff --git a/src/armnnTfLiteParser/test/ExpandDims.cpp b/src/armnnTfLiteParser/test/ExpandDims.cpp
index a9f021f..b43c3c6 100644
--- a/src/armnnTfLiteParser/test/ExpandDims.cpp
+++ b/src/armnnTfLiteParser/test/ExpandDims.cpp
@@ -1,12 +1,10 @@
//
-// Copyright © 2021 Arm Ltd. All rights reserved.
+// Copyright © 2021, 2023 Arm Ltd. All rights reserved.
// SPDX-License-Identifier: MIT
//
#include "ParserFlatbuffersFixture.hpp"
-#include "../TfLiteParser.hpp"
#include <string>
-#include <iostream>
TEST_SUITE("TensorflowLiteParser_ExpandDims")
{
@@ -47,15 +45,12 @@
}
},
{
- "shape": [ 1 ],
- "type": "UINT8",
+ "shape": [],
+ "type": "INT32",
"buffer": 2,
"name": "expand_dims",
"quantization": {
- "min": [ 0.0 ],
- "max": [ 255.0 ],
- "scale": [ 1.0 ],
- "zero_point": [ 0 ],
+ "details_type": "NONE",
}
},
],
@@ -77,7 +72,7 @@
]
}
)";
- SetupSingleInputSingleOutput("inputTensor", "outputTensor");
+ Setup();
}
};