IVGCVSW-6494 Add CpuAcc Batch MatMul Workload Fp32
Signed-off-by: Teresa Charlin <teresa.charlinreyes@arm.com>
Change-Id: I2def6995f81d33e68f1ea45d8d19a1e6294049b1
diff --git a/src/backends/backendsCommon/WorkloadUtils.cpp b/src/backends/backendsCommon/WorkloadUtils.cpp
index b045530..3aea667 100644
--- a/src/backends/backendsCommon/WorkloadUtils.cpp
+++ b/src/backends/backendsCommon/WorkloadUtils.cpp
@@ -341,4 +341,24 @@
return keyIndices;
}
+armnn::PermutationVector GeneratePermutationVectorOnLastTwoDimensions(unsigned int rank)
+{
+ armnn::PermutationVector permutationVector{};
+ switch (rank)
+ {
+ case 2:
+ permutationVector = {1U, 0U};
+ break;
+ case 3:
+ permutationVector = {0U, 2U, 1U};
+ break;
+ case 4:
+ permutationVector = {0U, 1U, 3U, 2U};
+ break;
+ default:
+ throw Exception("Invalid number of dimensions.");
+ }
+ return permutationVector;
+}
+
} // namespace armnn
diff --git a/src/backends/backendsCommon/WorkloadUtils.hpp b/src/backends/backendsCommon/WorkloadUtils.hpp
index 0e54873..3d8d927 100644
--- a/src/backends/backendsCommon/WorkloadUtils.hpp
+++ b/src/backends/backendsCommon/WorkloadUtils.hpp
@@ -258,4 +258,10 @@
/// \return - A map with names and values for N, ND, K, W, C
std::map<std::string, unsigned int> CalculateGatherNdKeyIndices(TensorInfo inputInfo0, TensorInfo inputInfo1);
+/// Generates a permutation vector of size rank that permutes the 2 most right dimensions
+///
+/// \param rank - Tensor rank, i.e. number of dimensions in the tensors
+/// \return - A permutation vector that permutes the 2 last dimensions
+armnn::PermutationVector GeneratePermutationVectorOnLastTwoDimensions(unsigned int rank);
+
} //namespace armnn
diff --git a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp
index 6fcc35a..74bd97f 100644
--- a/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp
+++ b/src/backends/backendsCommon/test/layerTests/BatchMatMulTestImpl.cpp
@@ -71,20 +71,9 @@
{
auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({2,2}, ArmnnType, qScale, qOffset);
@@ -160,20 +149,9 @@
{
auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({1,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({1,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({1,2,2}, ArmnnType, qScale, qOffset);
@@ -249,20 +227,9 @@
{
auto descriptor = armnn::BatchMatMulDescriptor(); // Default arbitrary layout is treated the same as NCHW
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({1,1,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({1,1,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({1,1,2,2}, ArmnnType, qScale, qOffset);
@@ -343,20 +310,9 @@
armnn::DataLayout::NHWC,
armnn::DataLayout::NHWC);
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({1,2,2,1}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({1,2,2,1}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({1,2,2,1}, ArmnnType, qScale, qOffset);
@@ -432,20 +388,9 @@
{
auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({2,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({2,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({2,2,2}, ArmnnType, qScale, qOffset);
@@ -530,20 +475,9 @@
{
auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({2,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({1,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({2,2,2}, ArmnnType, qScale, qOffset);
@@ -625,20 +559,9 @@
{
auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({2,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({2,2,2}, ArmnnType, qScale, qOffset);
@@ -725,20 +648,9 @@
armnn::DataLayout::NDHWC,
armnn::DataLayout::NHWC);
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({1,1,2,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({1,2,2,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({1,1,2,2,2}, ArmnnType, qScale, qOffset);
@@ -823,20 +735,9 @@
{
auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({1,1}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({1,1}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({1,1}, ArmnnType, qScale, qOffset);
@@ -909,20 +810,9 @@
{
auto descriptor = armnn::BatchMatMulDescriptor(); // Arbitrary layout with no transpose/adjointing
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({2,5,3}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({2,3,4}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({2,5,4}, ArmnnType, qScale, qOffset);
@@ -1024,20 +914,9 @@
false,
false);
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({2,3}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({2,3}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({3,3}, ArmnnType, qScale, qOffset);
@@ -1117,20 +996,9 @@
true,
false);
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({3,3}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({3,3}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({3,3}, ArmnnType, qScale, qOffset);
@@ -1227,20 +1095,9 @@
armnn::DataLayout::NHWC,
armnn::DataLayout::NHWC);
- float qScale = 0.0f;
+ float qScale = 1.0f;
int32_t qOffset = 0;
- switch(ArmnnType)
- {
- case armnn::DataType::QAsymmS8:
- case armnn::DataType::QAsymmU8:
- case armnn::DataType::QSymmS16:
- qScale = 1.0f;
- break;
- default:
- break;
- }
-
armnn::TensorInfo inputXInfo({1,4,4,2}, ArmnnType, qScale, qOffset);
armnn::TensorInfo inputYInfo({2,2,4,1}, ArmnnType, qScale, qOffset);
armnn::TensorInfo outputInfo({2,4,2,2}, ArmnnType, qScale, qOffset);