IVGCVSW-2092 Port LSTMCell::Eval to ArmNN
* Ported Google's LSTM implementation to RefLstmFloat32Workload
* Fixed the code throughout because of an error in the docs around the
scratch buffer size
* Updated IsLstmSupported
* Added the unit tests
!android-nn-driver:127
Change-Id: I5577b7e39ca52df1a7f102a9b437df6aa99520b6
diff --git a/src/backends/reference/workloads/RefLstmFloat32Workload.hpp b/src/backends/reference/workloads/RefLstmFloat32Workload.hpp
index 1f634d3..a2dead8 100644
--- a/src/backends/reference/workloads/RefLstmFloat32Workload.hpp
+++ b/src/backends/reference/workloads/RefLstmFloat32Workload.hpp
@@ -5,6 +5,8 @@
#pragma once
+#include <armnn/TypesUtils.hpp>
+
#include <backendsCommon/Workload.hpp>
#include <backendsCommon/WorkloadData.hpp>
@@ -14,8 +16,28 @@
class RefLstmFloat32Workload : public Float32Workload<LstmQueueDescriptor>
{
public:
- using Float32Workload<LstmQueueDescriptor>::Float32Workload;
+ explicit RefLstmFloat32Workload(const LstmQueueDescriptor& descriptor, const WorkloadInfo& info);
+
virtual void Execute() const override;
+
+private:
+ std::unique_ptr<ScopedCpuTensorHandle> m_InputToInputWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_InputToForgetWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_InputToCellWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_InputToOutputWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToInputWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToForgetWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToCellWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_RecurrentToOutputWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_CellToInputWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_CellToForgetWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_CellToOutputWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_InputGateBiasTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_ForgetGateBiasTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_CellBiasTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_OutputGateBiasTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionWeightsTensor;
+ std::unique_ptr<ScopedCpuTensorHandle> m_ProjectionBiasTensor;
};
} //namespace armnn