IVGCVSW-7968 Update Doxygen docu for 24.02

Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
Change-Id: I8c1e45815c6cf78f80d6f2c0959a5bbba6cd11de
diff --git a/24.02/classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload.html b/24.02/classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload.html
new file mode 100644
index 0000000..aae7852
--- /dev/null
+++ b/24.02/classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload.html
@@ -0,0 +1,733 @@
+<!-- HTML header for doxygen 1.8.17-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.17"/>
+<meta name="viewport" content="width=device-width, initial-scale=1"/>
+<title>Arm NN: NeonUnidirectionalSequenceLstmFloatWorkload Class Reference</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script>
+<script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="customdoxygen.css" rel="stylesheet" type="text/css"/>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 15rem; margin-top: .5rem; margin-left 13px"/>
+  <td id="projectalign" style="padding-left: 0.9em;">
+   <div id="projectname">
+   &#160;<span id="projectnumber">24.02</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.17 -->
+<script type="text/javascript">
+/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+/* @license-end */
+</script>
+<script type="text/javascript" src="menudata.js"></script>
+<script type="text/javascript" src="menu.js"></script>
+<script type="text/javascript">
+/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
+$(function() {
+  initMenu('',true,false,'search.php','Search');
+  $(document).ready(function() { init_search(); });
+});
+/* @license-end */</script>
+<div id="main-nav"></div>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
+$(document).ready(function(){initNavTree('classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload.html',''); initResizable(); });
+/* @license-end */
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="summary">
+<a href="#pub-methods">Public Member Functions</a> &#124;
+<a href="classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload-members.html">List of all members</a>  </div>
+  <div class="headertitle">
+<div class="title">NeonUnidirectionalSequenceLstmFloatWorkload Class Reference</div>  </div>
+</div><!--header-->
+<div class="contents">
+
+<p><code>#include &lt;<a class="el" href="_neon_unidirectional_sequence_lstm_float_workload_8hpp_source.html">NeonUnidirectionalSequenceLstmFloatWorkload.hpp</a>&gt;</code></p>
+<div class="dynheader">
+Inheritance diagram for NeonUnidirectionalSequenceLstmFloatWorkload:</div>
+<div class="dyncontent">
+<div class="center"><iframe scrolling="no" frameborder="0" src="classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload__inherit__graph.svg" width="266" height="291"><p><b>This browser is not able to show SVG: try Firefox, Chrome, Safari, or Opera instead.</b></p></iframe>
+</div>
+<center><span class="legend">[<a target="top" href="graph_legend.html">legend</a>]</span></center></div>
+<div class="dynheader">
+Collaboration diagram for NeonUnidirectionalSequenceLstmFloatWorkload:</div>
+<div class="dyncontent">
+<div class="center"><iframe scrolling="no" frameborder="0" src="classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload__coll__graph.svg" width="271" height="308"><p><b>This browser is not able to show SVG: try Firefox, Chrome, Safari, or Opera instead.</b></p></iframe>
+</div>
+<center><span class="legend">[<a target="top" href="graph_legend.html">legend</a>]</span></center></div>
+<table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-methods"></a>
+Public Member Functions</h2></td></tr>
+<tr class="memitem:acceeccc54cc2871ec72da81e48e7ef1c"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload.html#acceeccc54cc2871ec72da81e48e7ef1c">NeonUnidirectionalSequenceLstmFloatWorkload</a> (const <a class="el" href="structarmnn_1_1_unidirectional_sequence_lstm_queue_descriptor.html">UnidirectionalSequenceLstmQueueDescriptor</a> &amp;descriptor, const <a class="el" href="structarmnn_1_1_workload_info.html">WorkloadInfo</a> &amp;info)</td></tr>
+<tr class="separator:acceeccc54cc2871ec72da81e48e7ef1c"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:ae071e8822437c78baea75c3aef3a263a"><td class="memItemLeft" align="right" valign="top">virtual void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload.html#ae071e8822437c78baea75c3aef3a263a">Execute</a> () const override</td></tr>
+<tr class="separator:ae071e8822437c78baea75c3aef3a263a"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="inherit_header pub_methods_classarmnn_1_1_typed_workload"><td colspan="2" onclick="javascript:toggleInherit('pub_methods_classarmnn_1_1_typed_workload')"><img src="closed.png" alt="-"/>&#160;Public Member Functions inherited from <a class="el" href="classarmnn_1_1_typed_workload.html">TypedWorkload&lt; QueueDescriptor, DataTypes &gt;</a></td></tr>
+<tr class="memitem:aa617fec9998f9650150a758b68498865 inherit pub_methods_classarmnn_1_1_typed_workload"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_typed_workload.html#aa617fec9998f9650150a758b68498865">TypedWorkload</a> (const <a class="el" href="structarmnn_1_1_queue_descriptor.html">QueueDescriptor</a> &amp;descriptor, const <a class="el" href="structarmnn_1_1_workload_info.html">WorkloadInfo</a> &amp;info)</td></tr>
+<tr class="separator:aa617fec9998f9650150a758b68498865 inherit pub_methods_classarmnn_1_1_typed_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="inherit_header pub_methods_classarmnn_1_1_base_workload"><td colspan="2" onclick="javascript:toggleInherit('pub_methods_classarmnn_1_1_base_workload')"><img src="closed.png" alt="-"/>&#160;Public Member Functions inherited from <a class="el" href="classarmnn_1_1_base_workload.html">BaseWorkload&lt; QueueDescriptor &gt;</a></td></tr>
+<tr class="memitem:af2ef420610280dc5a661cd3d4836d5a2 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#af2ef420610280dc5a661cd3d4836d5a2">BaseWorkload</a> (const <a class="el" href="structarmnn_1_1_queue_descriptor.html">QueueDescriptor</a> &amp;descriptor, const <a class="el" href="structarmnn_1_1_workload_info.html">WorkloadInfo</a> &amp;info)</td></tr>
+<tr class="separator:af2ef420610280dc5a661cd3d4836d5a2 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:a163c04b26f9804eafc598a047128f887 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">virtual const std::string &amp;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#a163c04b26f9804eafc598a047128f887">GetName</a> () const override</td></tr>
+<tr class="separator:a163c04b26f9804eafc598a047128f887 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:ae1c43d025fc90382d7aff7a500937e2c inherit pub_methods_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#ae1c43d025fc90382d7aff7a500937e2c">ExecuteAsync</a> (<a class="el" href="structarmnn_1_1experimental_1_1_execution_data.html">ExecutionData</a> &amp;executionData) override</td></tr>
+<tr class="separator:ae1c43d025fc90382d7aff7a500937e2c inherit pub_methods_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:a81627f96ba06d76e147f7d392a8117ed inherit pub_methods_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#a81627f96ba06d76e147f7d392a8117ed">PostAllocationConfigure</a> () override</td></tr>
+<tr class="separator:a81627f96ba06d76e147f7d392a8117ed inherit pub_methods_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:a965cf380c7adf547d0f14b3f6d1da249 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">const <a class="el" href="structarmnn_1_1_queue_descriptor.html">QueueDescriptor</a> &amp;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#a965cf380c7adf547d0f14b3f6d1da249">GetData</a> () const</td></tr>
+<tr class="separator:a965cf380c7adf547d0f14b3f6d1da249 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:aaff95a48875d8fb4a616352906660ca9 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">arm::pipe::ProfilingGuid&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#aaff95a48875d8fb4a616352906660ca9">GetGuid</a> () const final</td></tr>
+<tr class="separator:aaff95a48875d8fb4a616352906660ca9 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:a0c326c344355d8423217e9431781f2ee inherit pub_methods_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">virtual bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#a0c326c344355d8423217e9431781f2ee">SupportsTensorHandleReplacement</a> () const override</td></tr>
+<tr class="separator:a0c326c344355d8423217e9431781f2ee inherit pub_methods_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:ab0a67f8179ddb997dda0070a6661f837 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#ab0a67f8179ddb997dda0070a6661f837">ReplaceInputTensorHandle</a> (<a class="el" href="classarmnn_1_1_i_tensor_handle.html">ITensorHandle</a> *tensorHandle, unsigned int slot) override</td></tr>
+<tr class="separator:ab0a67f8179ddb997dda0070a6661f837 inherit pub_methods_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:acc08590544f05c641d21c724aedf26dd inherit pub_methods_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#acc08590544f05c641d21c724aedf26dd">ReplaceOutputTensorHandle</a> (<a class="el" href="classarmnn_1_1_i_tensor_handle.html">ITensorHandle</a> *tensorHandle, unsigned int slot) override</td></tr>
+<tr class="separator:acc08590544f05c641d21c724aedf26dd inherit pub_methods_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="inherit_header pub_methods_classarmnn_1_1_i_workload"><td colspan="2" onclick="javascript:toggleInherit('pub_methods_classarmnn_1_1_i_workload')"><img src="closed.png" alt="-"/>&#160;Public Member Functions inherited from <a class="el" href="classarmnn_1_1_i_workload.html">IWorkload</a></td></tr>
+<tr class="memitem:a69c83c02ae8de866bc7a46c49e69c1ba inherit pub_methods_classarmnn_1_1_i_workload"><td class="memItemLeft" align="right" valign="top">virtual&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_i_workload.html#a69c83c02ae8de866bc7a46c49e69c1ba">~IWorkload</a> ()</td></tr>
+<tr class="separator:a69c83c02ae8de866bc7a46c49e69c1ba inherit pub_methods_classarmnn_1_1_i_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:ab81312bd5e64cbae2803de9f243bdb32 inherit pub_methods_classarmnn_1_1_i_workload"><td class="memItemLeft" align="right" valign="top">virtual void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_i_workload.html#ab81312bd5e64cbae2803de9f243bdb32">RegisterDebugCallback</a> (const <a class="el" href="namespacearmnn.html#a15f3ad9b5e4e3d46b0a6dda246a7bc28">DebugCallbackFunction</a> &amp;)</td></tr>
+<tr class="separator:ab81312bd5e64cbae2803de9f243bdb32 inherit pub_methods_classarmnn_1_1_i_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:a2d2834d1029217934f504e3e59579081 inherit pub_methods_classarmnn_1_1_i_workload"><td class="memItemLeft" align="right" valign="top">virtual <a class="el" href="classarmnn_1_1_optional.html">armnn::Optional</a>&lt; <a class="el" href="structarmnn_1_1_memory_requirements.html">armnn::MemoryRequirements</a> &gt;&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_i_workload.html#a2d2834d1029217934f504e3e59579081">GetMemoryRequirements</a> ()</td></tr>
+<tr class="separator:a2d2834d1029217934f504e3e59579081 inherit pub_methods_classarmnn_1_1_i_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+</table><table class="memberdecls">
+<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="inherited"></a>
+Additional Inherited Members</h2></td></tr>
+<tr class="inherit_header pro_attribs_classarmnn_1_1_base_workload"><td colspan="2" onclick="javascript:toggleInherit('pro_attribs_classarmnn_1_1_base_workload')"><img src="closed.png" alt="-"/>&#160;Protected Attributes inherited from <a class="el" href="classarmnn_1_1_base_workload.html">BaseWorkload&lt; QueueDescriptor &gt;</a></td></tr>
+<tr class="memitem:afb8d2c8817c75de9d01a4c0e0d5c160b inherit pro_attribs_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top"><a class="el" href="structarmnn_1_1_queue_descriptor.html">QueueDescriptor</a>&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a></td></tr>
+<tr class="separator:afb8d2c8817c75de9d01a4c0e0d5c160b inherit pro_attribs_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:a4c2b3ca86eec6c199364671af267cd2c inherit pro_attribs_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">const arm::pipe::ProfilingGuid&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#a4c2b3ca86eec6c199364671af267cd2c">m_Guid</a></td></tr>
+<tr class="separator:a4c2b3ca86eec6c199364671af267cd2c inherit pro_attribs_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+<tr class="memitem:a77806f89d6edb879d3f6c6b6b18168a7 inherit pro_attribs_classarmnn_1_1_base_workload"><td class="memItemLeft" align="right" valign="top">const std::string&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_base_workload.html#a77806f89d6edb879d3f6c6b6b18168a7">m_Name</a></td></tr>
+<tr class="separator:a77806f89d6edb879d3f6c6b6b18168a7 inherit pro_attribs_classarmnn_1_1_base_workload"><td class="memSeparator" colspan="2">&#160;</td></tr>
+</table>
+<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
+<div class="textblock">
+<p class="definition">Definition at line <a class="el" href="_neon_unidirectional_sequence_lstm_float_workload_8hpp_source.html#l00021">21</a> of file <a class="el" href="_neon_unidirectional_sequence_lstm_float_workload_8hpp_source.html">NeonUnidirectionalSequenceLstmFloatWorkload.hpp</a>.</p>
+</div><h2 class="groupheader">Constructor &amp; Destructor Documentation</h2>
+<a id="acceeccc54cc2871ec72da81e48e7ef1c"></a>
+<h2 class="memtitle"><span class="permalink"><a href="#acceeccc54cc2871ec72da81e48e7ef1c">&#9670;&nbsp;</a></span>NeonUnidirectionalSequenceLstmFloatWorkload()</h2>
+
+<div class="memitem">
+<div class="memproto">
+      <table class="memname">
+        <tr>
+          <td class="memname"><a class="el" href="classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload.html">NeonUnidirectionalSequenceLstmFloatWorkload</a> </td>
+          <td>(</td>
+          <td class="paramtype">const <a class="el" href="structarmnn_1_1_unidirectional_sequence_lstm_queue_descriptor.html">UnidirectionalSequenceLstmQueueDescriptor</a> &amp;&#160;</td>
+          <td class="paramname"><em>descriptor</em>, </td>
+        </tr>
+        <tr>
+          <td class="paramkey"></td>
+          <td></td>
+          <td class="paramtype">const <a class="el" href="structarmnn_1_1_workload_info.html">WorkloadInfo</a> &amp;&#160;</td>
+          <td class="paramname"><em>info</em>&#160;</td>
+        </tr>
+        <tr>
+          <td></td>
+          <td>)</td>
+          <td></td><td></td>
+        </tr>
+      </table>
+</div><div class="memdoc">
+
+<p class="definition">Definition at line <a class="el" href="_neon_unidirectional_sequence_lstm_float_workload_8cpp_source.html#l00032">32</a> of file <a class="el" href="_neon_unidirectional_sequence_lstm_float_workload_8cpp_source.html">NeonUnidirectionalSequenceLstmFloatWorkload.cpp</a>.</p>
+<div class="fragment"><div class="line"><a name="l00033"></a><span class="lineno">   33</span>&#160;    : FloatWorkload&lt;UnidirectionalSequenceLstmQueueDescriptor&gt;(descriptor, info)</div>
+<div class="line"><a name="l00034"></a><span class="lineno">   34</span>&#160;{</div>
+<div class="line"><a name="l00035"></a><span class="lineno">   35</span>&#160;    <span class="comment">// Report Profiling Details</span></div>
+<div class="line"><a name="l00036"></a><span class="lineno">   36</span>&#160;    <a class="code" href="_profiling_8hpp.html#a786492a3881a4c760ab1eec2149f4aba">ARMNN_REPORT_PROFILING_WORKLOAD_DESC</a>(<span class="stringliteral">&quot;NeonUnidirectionalSequenceLstmFloatWorkload_Construct&quot;</span>,</div>
+<div class="line"><a name="l00037"></a><span class="lineno">   37</span>&#160;                                         descriptor.m_Parameters,</div>
+<div class="line"><a name="l00038"></a><span class="lineno">   38</span>&#160;                                         info,</div>
+<div class="line"><a name="l00039"></a><span class="lineno">   39</span>&#160;                                         <a class="code" href="classarmnn_1_1_base_workload.html#aaff95a48875d8fb4a616352906660ca9">GetGuid</a>());</div>
+<div class="line"><a name="l00040"></a><span class="lineno">   40</span>&#160; </div>
+<div class="line"><a name="l00041"></a><span class="lineno">   41</span>&#160;    <span class="keyword">const</span> arm_compute::ITensor&amp; input = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[0])-&gt;GetTensor();</div>
+<div class="line"><a name="l00042"></a><span class="lineno">   42</span>&#160;    arm_compute::ITensor&amp; output = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">m_Outputs</a>[2])-&gt;GetTensor();</div>
+<div class="line"><a name="l00043"></a><span class="lineno">   43</span>&#160; </div>
+<div class="line"><a name="l00044"></a><span class="lineno">   44</span>&#160;    TensorInfo inputInfo = <a class="code" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">info</a>.m_InputTensorInfos[0];</div>
+<div class="line"><a name="l00045"></a><span class="lineno">   45</span>&#160;    TensorInfo outputInfo = <a class="code" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">info</a>.m_OutputTensorInfos[0];</div>
+<div class="line"><a name="l00046"></a><span class="lineno">   46</span>&#160; </div>
+<div class="line"><a name="l00047"></a><span class="lineno">   47</span>&#160;    <a class="code" href="namespacearmnn.html#ad8ed01ff3ff33333d8e19db4d2818bb6">arm_compute::DataType</a> armComputeDataType = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[0])-&gt;GetDataType();</div>
+<div class="line"><a name="l00048"></a><span class="lineno">   48</span>&#160;    <a class="code" href="namespacearmnn.html#ad8ed01ff3ff33333d8e19db4d2818bb6">armnn::DataType</a> armnnDataType = GetArmNNDataType(armComputeDataType);</div>
+<div class="line"><a name="l00049"></a><span class="lineno">   49</span>&#160; </div>
+<div class="line"><a name="l00050"></a><span class="lineno">   50</span>&#160;    TensorShape inputLayerShape = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[0])-&gt;GetShape();</div>
+<div class="line"><a name="l00051"></a><span class="lineno">   51</span>&#160;    TensorShape cellStateLayerShape = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[2])-&gt;GetShape();</div>
+<div class="line"><a name="l00052"></a><span class="lineno">   52</span>&#160;    TensorShape outputLayerShape = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">m_Outputs</a>[2])-&gt;GetShape();</div>
+<div class="line"><a name="l00053"></a><span class="lineno">   53</span>&#160; </div>
+<div class="line"><a name="l00054"></a><span class="lineno">   54</span>&#160;    <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> maxTime = <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_TimeMajor ? inputLayerShape[0] : inputLayerShape[1];</div>
+<div class="line"><a name="l00055"></a><span class="lineno">   55</span>&#160;    <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> batchSize = <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_TimeMajor ? inputLayerShape[1] : inputLayerShape[0];</div>
+<div class="line"><a name="l00056"></a><span class="lineno">   56</span>&#160;    <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> inputSize = inputLayerShape[2];</div>
+<div class="line"><a name="l00057"></a><span class="lineno">   57</span>&#160;    <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> outputSize = outputLayerShape[2];</div>
+<div class="line"><a name="l00058"></a><span class="lineno">   58</span>&#160;    <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> numUnits = cellStateLayerShape[1];</div>
+<div class="line"><a name="l00059"></a><span class="lineno">   59</span>&#160; </div>
+<div class="line"><a name="l00060"></a><span class="lineno">   60</span>&#160;    <span class="keyword">const</span> TensorShape timeMajorShapeInput({maxTime, batchSize, inputSize});</div>
+<div class="line"><a name="l00061"></a><span class="lineno">   61</span>&#160;    <span class="keyword">const</span> TensorShape timeMajorShapeOutput({maxTime, batchSize, outputSize});</div>
+<div class="line"><a name="l00062"></a><span class="lineno">   62</span>&#160; </div>
+<div class="line"><a name="l00063"></a><span class="lineno">   63</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00064"></a><span class="lineno">   64</span>&#160;    <span class="comment">// Permute: performed if Unidirectional Sequence Layer inputs/outputs are in batch major format.</span></div>
+<div class="line"><a name="l00065"></a><span class="lineno">   65</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00066"></a><span class="lineno">   66</span>&#160;    <span class="keywordflow">if</span> (!<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_TimeMajor)</div>
+<div class="line"><a name="l00067"></a><span class="lineno">   67</span>&#160;    {</div>
+<div class="line"><a name="l00068"></a><span class="lineno">   68</span>&#160;        std::unique_ptr&lt;arm_compute::NEPermute&gt; layer(<span class="keyword">new</span> arm_compute::NEPermute());</div>
+<div class="line"><a name="l00069"></a><span class="lineno">   69</span>&#160; </div>
+<div class="line"><a name="l00070"></a><span class="lineno">   70</span>&#160;        TensorInfo permuteOutInfo = inputInfo;</div>
+<div class="line"><a name="l00071"></a><span class="lineno">   71</span>&#160;        permuteOutInfo.SetShape(timeMajorShapeInput);</div>
+<div class="line"><a name="l00072"></a><span class="lineno">   72</span>&#160;        BuildArmComputeTensor(m_PermuteFirstOut, permuteOutInfo);</div>
+<div class="line"><a name="l00073"></a><span class="lineno">   73</span>&#160;        armcomputetensorutils::InitialiseArmComputeTensorEmpty(m_PermuteFirstOut);</div>
+<div class="line"><a name="l00074"></a><span class="lineno">   74</span>&#160; </div>
+<div class="line"><a name="l00075"></a><span class="lineno">   75</span>&#160;        <span class="comment">// Permute to time major format.</span></div>
+<div class="line"><a name="l00076"></a><span class="lineno">   76</span>&#160;        layer-&gt;configure(&amp;input, &amp;m_PermuteFirstOut, arm_compute::PermutationVector(0U,2U,1U));</div>
+<div class="line"><a name="l00077"></a><span class="lineno">   77</span>&#160;        m_Permute1.reset(layer.release());</div>
+<div class="line"><a name="l00078"></a><span class="lineno">   78</span>&#160;    }</div>
+<div class="line"><a name="l00079"></a><span class="lineno">   79</span>&#160; </div>
+<div class="line"><a name="l00080"></a><span class="lineno">   80</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00081"></a><span class="lineno">   81</span>&#160;    <span class="comment">// Split and Concat Tensors</span></div>
+<div class="line"><a name="l00082"></a><span class="lineno">   82</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00083"></a><span class="lineno">   83</span>&#160;    <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i = 0; i &lt; maxTime; ++i)</div>
+<div class="line"><a name="l00084"></a><span class="lineno">   84</span>&#160;    {</div>
+<div class="line"><a name="l00085"></a><span class="lineno">   85</span>&#160;        arm_compute::Tensor splitter_out;</div>
+<div class="line"><a name="l00086"></a><span class="lineno">   86</span>&#160;        arm_compute::Tensor concat_in;</div>
+<div class="line"><a name="l00087"></a><span class="lineno">   87</span>&#160; </div>
+<div class="line"><a name="l00088"></a><span class="lineno">   88</span>&#160;        <span class="keyword">auto</span> splitterTensorInfo = inputInfo;</div>
+<div class="line"><a name="l00089"></a><span class="lineno">   89</span>&#160;        <span class="keyword">auto</span> concatTensorInfo = outputInfo;</div>
+<div class="line"><a name="l00090"></a><span class="lineno">   90</span>&#160;        splitterTensorInfo.SetShape({batchSize, inputSize});</div>
+<div class="line"><a name="l00091"></a><span class="lineno">   91</span>&#160;        concatTensorInfo.SetShape({batchSize, outputSize});</div>
+<div class="line"><a name="l00092"></a><span class="lineno">   92</span>&#160;        BuildArmComputeTensor(splitter_out, splitterTensorInfo);</div>
+<div class="line"><a name="l00093"></a><span class="lineno">   93</span>&#160;        BuildArmComputeTensor(concat_in, concatTensorInfo);</div>
+<div class="line"><a name="l00094"></a><span class="lineno">   94</span>&#160; </div>
+<div class="line"><a name="l00095"></a><span class="lineno">   95</span>&#160;        armcomputetensorutils::InitialiseArmComputeTensorEmpty(splitter_out);</div>
+<div class="line"><a name="l00096"></a><span class="lineno">   96</span>&#160;        armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_in);</div>
+<div class="line"><a name="l00097"></a><span class="lineno">   97</span>&#160; </div>
+<div class="line"><a name="l00098"></a><span class="lineno">   98</span>&#160;        <span class="comment">// append to std::vector&lt;arm_compute::Tensor&gt;</span></div>
+<div class="line"><a name="l00099"></a><span class="lineno">   99</span>&#160;        m_SplitterOutputsTensors.push_back(std::move(splitter_out));</div>
+<div class="line"><a name="l00100"></a><span class="lineno">  100</span>&#160;        m_ConcatInputsTensors.push_back(std::move(concat_in));</div>
+<div class="line"><a name="l00101"></a><span class="lineno">  101</span>&#160;    }</div>
+<div class="line"><a name="l00102"></a><span class="lineno">  102</span>&#160; </div>
+<div class="line"><a name="l00103"></a><span class="lineno">  103</span>&#160;    <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i = 0; i &lt; maxTime; ++i)</div>
+<div class="line"><a name="l00104"></a><span class="lineno">  104</span>&#160;    {</div>
+<div class="line"><a name="l00105"></a><span class="lineno">  105</span>&#160;        <span class="comment">// append to std::vector&lt;arm_compute::ITensor*&gt;</span></div>
+<div class="line"><a name="l00106"></a><span class="lineno">  106</span>&#160;        m_SplitterOutputs.push_back(&amp;m_SplitterOutputsTensors[i]);</div>
+<div class="line"><a name="l00107"></a><span class="lineno">  107</span>&#160;        m_ConcatInputs.push_back(&amp;m_ConcatInputsTensors[i]);</div>
+<div class="line"><a name="l00108"></a><span class="lineno">  108</span>&#160;    }</div>
+<div class="line"><a name="l00109"></a><span class="lineno">  109</span>&#160; </div>
+<div class="line"><a name="l00110"></a><span class="lineno">  110</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00111"></a><span class="lineno">  111</span>&#160;    <span class="comment">// Split</span></div>
+<div class="line"><a name="l00112"></a><span class="lineno">  112</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00113"></a><span class="lineno">  113</span>&#160;    <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> numberDimensions = 3;</div>
+<div class="line"><a name="l00114"></a><span class="lineno">  114</span>&#160;    <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> dimension = 0; <span class="comment">// splitting on 0-dimension (i.e. maxTime dimension)</span></div>
+<div class="line"><a name="l00115"></a><span class="lineno">  115</span>&#160; </div>
+<div class="line"><a name="l00116"></a><span class="lineno">  116</span>&#160;    <span class="keywordflow">if</span> (maxTime != 1) <span class="comment">// ACL split does not work with only one element to split.</span></div>
+<div class="line"><a name="l00117"></a><span class="lineno">  117</span>&#160;    {</div>
+<div class="line"><a name="l00118"></a><span class="lineno">  118</span>&#160;        ViewsDescriptor splitterDesc(maxTime, numberDimensions);</div>
+<div class="line"><a name="l00119"></a><span class="lineno">  119</span>&#160;        <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> splitterDimSizes[3] = {1, batchSize, inputSize};</div>
+<div class="line"><a name="l00120"></a><span class="lineno">  120</span>&#160;        <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> outputIdx = 0u; outputIdx &lt; maxTime; ++outputIdx)</div>
+<div class="line"><a name="l00121"></a><span class="lineno">  121</span>&#160;        {</div>
+<div class="line"><a name="l00122"></a><span class="lineno">  122</span>&#160;            splitterDesc.SetViewOriginCoord(outputIdx, dimension, splitterDimSizes[dimension] * outputIdx);</div>
+<div class="line"><a name="l00123"></a><span class="lineno">  123</span>&#160;            <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> dimIdx = 0u; dimIdx &lt; numberDimensions; ++dimIdx)</div>
+<div class="line"><a name="l00124"></a><span class="lineno">  124</span>&#160;            {</div>
+<div class="line"><a name="l00125"></a><span class="lineno">  125</span>&#160;                splitterDesc.SetViewSize(outputIdx, dimIdx, splitterDimSizes[dimIdx]);</div>
+<div class="line"><a name="l00126"></a><span class="lineno">  126</span>&#160;            }</div>
+<div class="line"><a name="l00127"></a><span class="lineno">  127</span>&#160;        }</div>
+<div class="line"><a name="l00128"></a><span class="lineno">  128</span>&#160; </div>
+<div class="line"><a name="l00129"></a><span class="lineno">  129</span>&#160;        std::set&lt;unsigned int&gt; splitAxis = <a class="code" href="namespacearmnn.html#a8cbabc875597b3bed0ccdc0adb289fde">ComputeSplitAxis</a>(splitterDesc, timeMajorShapeInput);</div>
+<div class="line"><a name="l00130"></a><span class="lineno">  130</span>&#160; </div>
+<div class="line"><a name="l00131"></a><span class="lineno">  131</span>&#160;        std::unique_ptr&lt;arm_compute::NESplit&gt; split_layer(<span class="keyword">new</span> arm_compute::NESplit());</div>
+<div class="line"><a name="l00132"></a><span class="lineno">  132</span>&#160;        <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span>                          aclAxisSplit = CalcAclAxis(splitterDesc.GetNumDimensions(),</div>
+<div class="line"><a name="l00133"></a><span class="lineno">  133</span>&#160;                                                                         *splitAxis.begin());</div>
+<div class="line"><a name="l00134"></a><span class="lineno">  134</span>&#160;        <span class="keywordflow">if</span> (!<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_TimeMajor)</div>
+<div class="line"><a name="l00135"></a><span class="lineno">  135</span>&#160;        {</div>
+<div class="line"><a name="l00136"></a><span class="lineno">  136</span>&#160;            split_layer-&gt;configure(&amp;m_PermuteFirstOut, m_SplitterOutputs, aclAxisSplit);</div>
+<div class="line"><a name="l00137"></a><span class="lineno">  137</span>&#160;        } <span class="keywordflow">else</span></div>
+<div class="line"><a name="l00138"></a><span class="lineno">  138</span>&#160;        {</div>
+<div class="line"><a name="l00139"></a><span class="lineno">  139</span>&#160;            split_layer-&gt;configure(&amp;input, m_SplitterOutputs, aclAxisSplit);</div>
+<div class="line"><a name="l00140"></a><span class="lineno">  140</span>&#160;        }</div>
+<div class="line"><a name="l00141"></a><span class="lineno">  141</span>&#160; </div>
+<div class="line"><a name="l00142"></a><span class="lineno">  142</span>&#160;        split_layer-&gt;prepare();</div>
+<div class="line"><a name="l00143"></a><span class="lineno">  143</span>&#160;        m_Splitter.reset(split_layer.release());</div>
+<div class="line"><a name="l00144"></a><span class="lineno">  144</span>&#160;    }</div>
+<div class="line"><a name="l00145"></a><span class="lineno">  145</span>&#160; </div>
+<div class="line"><a name="l00146"></a><span class="lineno">  146</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00147"></a><span class="lineno">  147</span>&#160;    <span class="comment">// Lstm</span></div>
+<div class="line"><a name="l00148"></a><span class="lineno">  148</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00149"></a><span class="lineno">  149</span>&#160;    arm_compute::LSTMParams&lt;arm_compute::ITensor&gt; lstm_param;</div>
+<div class="line"><a name="l00150"></a><span class="lineno">  150</span>&#160; </div>
+<div class="line"><a name="l00151"></a><span class="lineno">  151</span>&#160;    m_InputToForgetWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00152"></a><span class="lineno">  152</span>&#160;    BuildArmComputeTensor(*m_InputToForgetWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputToForgetWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00153"></a><span class="lineno">  153</span>&#160; </div>
+<div class="line"><a name="l00154"></a><span class="lineno">  154</span>&#160;    m_InputToCellWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00155"></a><span class="lineno">  155</span>&#160;    BuildArmComputeTensor(*m_InputToCellWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputToCellWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00156"></a><span class="lineno">  156</span>&#160; </div>
+<div class="line"><a name="l00157"></a><span class="lineno">  157</span>&#160;    m_InputToOutputWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00158"></a><span class="lineno">  158</span>&#160;    BuildArmComputeTensor(*m_InputToOutputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputToOutputWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00159"></a><span class="lineno">  159</span>&#160; </div>
+<div class="line"><a name="l00160"></a><span class="lineno">  160</span>&#160;    m_RecurrentToForgetWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00161"></a><span class="lineno">  161</span>&#160;    BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_RecurrentToForgetWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00162"></a><span class="lineno">  162</span>&#160; </div>
+<div class="line"><a name="l00163"></a><span class="lineno">  163</span>&#160;    m_RecurrentToCellWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00164"></a><span class="lineno">  164</span>&#160;    BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_RecurrentToCellWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00165"></a><span class="lineno">  165</span>&#160; </div>
+<div class="line"><a name="l00166"></a><span class="lineno">  166</span>&#160;    m_RecurrentToOutputWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00167"></a><span class="lineno">  167</span>&#160;    BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_RecurrentToOutputWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00168"></a><span class="lineno">  168</span>&#160; </div>
+<div class="line"><a name="l00169"></a><span class="lineno">  169</span>&#160;    m_ForgetGateBiasTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00170"></a><span class="lineno">  170</span>&#160;    BuildArmComputeTensor(*m_ForgetGateBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ForgetGateBias-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00171"></a><span class="lineno">  171</span>&#160; </div>
+<div class="line"><a name="l00172"></a><span class="lineno">  172</span>&#160;    m_CellBiasTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00173"></a><span class="lineno">  173</span>&#160;    BuildArmComputeTensor(*m_CellBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellBias-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00174"></a><span class="lineno">  174</span>&#160; </div>
+<div class="line"><a name="l00175"></a><span class="lineno">  175</span>&#160;    m_OutputGateBiasTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00176"></a><span class="lineno">  176</span>&#160;    BuildArmComputeTensor(*m_OutputGateBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_OutputGateBias-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00177"></a><span class="lineno">  177</span>&#160; </div>
+<div class="line"><a name="l00178"></a><span class="lineno">  178</span>&#160;    <span class="comment">// for future reference: check the AndroidNN API for the logic here</span></div>
+<div class="line"><a name="l00179"></a><span class="lineno">  179</span>&#160;    <span class="keywordflow">if</span> (!<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_CifgEnabled)</div>
+<div class="line"><a name="l00180"></a><span class="lineno">  180</span>&#160;    {</div>
+<div class="line"><a name="l00181"></a><span class="lineno">  181</span>&#160;        m_InputToInputWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00182"></a><span class="lineno">  182</span>&#160;        BuildArmComputeTensor(*m_InputToInputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputToInputWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00183"></a><span class="lineno">  183</span>&#160; </div>
+<div class="line"><a name="l00184"></a><span class="lineno">  184</span>&#160;        m_RecurrentToInputWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00185"></a><span class="lineno">  185</span>&#160;        BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_RecurrentToInputWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00186"></a><span class="lineno">  186</span>&#160; </div>
+<div class="line"><a name="l00187"></a><span class="lineno">  187</span>&#160;        m_CellToInputWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00188"></a><span class="lineno">  188</span>&#160;        <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellToInputWeights != <span class="keyword">nullptr</span>)</div>
+<div class="line"><a name="l00189"></a><span class="lineno">  189</span>&#160;        {</div>
+<div class="line"><a name="l00190"></a><span class="lineno">  190</span>&#160;            BuildArmComputeTensor(*m_CellToInputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellToInputWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00191"></a><span class="lineno">  191</span>&#160;        }</div>
+<div class="line"><a name="l00192"></a><span class="lineno">  192</span>&#160; </div>
+<div class="line"><a name="l00193"></a><span class="lineno">  193</span>&#160;        m_InputGateBiasTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00194"></a><span class="lineno">  194</span>&#160;        BuildArmComputeTensor(*m_InputGateBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputGateBias-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00195"></a><span class="lineno">  195</span>&#160; </div>
+<div class="line"><a name="l00196"></a><span class="lineno">  196</span>&#160;        lstm_param.set_cifg_params(m_InputToInputWeightsTensor.get(),</div>
+<div class="line"><a name="l00197"></a><span class="lineno">  197</span>&#160;                                   m_RecurrentToInputWeightsTensor.get(),</div>
+<div class="line"><a name="l00198"></a><span class="lineno">  198</span>&#160;                                   <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellToInputWeights ? m_CellToInputWeightsTensor.get() : <span class="keyword">nullptr</span>,</div>
+<div class="line"><a name="l00199"></a><span class="lineno">  199</span>&#160;                                   m_InputGateBiasTensor.get());</div>
+<div class="line"><a name="l00200"></a><span class="lineno">  200</span>&#160;    }</div>
+<div class="line"><a name="l00201"></a><span class="lineno">  201</span>&#160; </div>
+<div class="line"><a name="l00202"></a><span class="lineno">  202</span>&#160;    <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_ProjectionEnabled)</div>
+<div class="line"><a name="l00203"></a><span class="lineno">  203</span>&#160;    {</div>
+<div class="line"><a name="l00204"></a><span class="lineno">  204</span>&#160;        m_ProjectionWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00205"></a><span class="lineno">  205</span>&#160;        BuildArmComputeTensor(*m_ProjectionWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ProjectionWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00206"></a><span class="lineno">  206</span>&#160; </div>
+<div class="line"><a name="l00207"></a><span class="lineno">  207</span>&#160;        m_ProjectionBiasTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00208"></a><span class="lineno">  208</span>&#160;        <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ProjectionBias != <span class="keyword">nullptr</span>)</div>
+<div class="line"><a name="l00209"></a><span class="lineno">  209</span>&#160;        {</div>
+<div class="line"><a name="l00210"></a><span class="lineno">  210</span>&#160;            BuildArmComputeTensor(*m_ProjectionBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ProjectionBias-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00211"></a><span class="lineno">  211</span>&#160;        }</div>
+<div class="line"><a name="l00212"></a><span class="lineno">  212</span>&#160; </div>
+<div class="line"><a name="l00213"></a><span class="lineno">  213</span>&#160;        lstm_param.set_projection_params(m_ProjectionWeightsTensor.get(),</div>
+<div class="line"><a name="l00214"></a><span class="lineno">  214</span>&#160;                                         <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ProjectionBias ? m_ProjectionBiasTensor.get() : <span class="keyword">nullptr</span>);</div>
+<div class="line"><a name="l00215"></a><span class="lineno">  215</span>&#160;    }</div>
+<div class="line"><a name="l00216"></a><span class="lineno">  216</span>&#160; </div>
+<div class="line"><a name="l00217"></a><span class="lineno">  217</span>&#160;    <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_PeepholeEnabled)</div>
+<div class="line"><a name="l00218"></a><span class="lineno">  218</span>&#160;    {</div>
+<div class="line"><a name="l00219"></a><span class="lineno">  219</span>&#160;        m_CellToForgetWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00220"></a><span class="lineno">  220</span>&#160;        BuildArmComputeTensor(*m_CellToForgetWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellToForgetWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00221"></a><span class="lineno">  221</span>&#160; </div>
+<div class="line"><a name="l00222"></a><span class="lineno">  222</span>&#160;        m_CellToOutputWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00223"></a><span class="lineno">  223</span>&#160;        BuildArmComputeTensor(*m_CellToOutputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellToOutputWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00224"></a><span class="lineno">  224</span>&#160; </div>
+<div class="line"><a name="l00225"></a><span class="lineno">  225</span>&#160;        lstm_param.set_peephole_params(m_CellToForgetWeightsTensor.get(), m_CellToOutputWeightsTensor.get());</div>
+<div class="line"><a name="l00226"></a><span class="lineno">  226</span>&#160;    }</div>
+<div class="line"><a name="l00227"></a><span class="lineno">  227</span>&#160; </div>
+<div class="line"><a name="l00228"></a><span class="lineno">  228</span>&#160;    <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_LayerNormEnabled)</div>
+<div class="line"><a name="l00229"></a><span class="lineno">  229</span>&#160;    {</div>
+<div class="line"><a name="l00230"></a><span class="lineno">  230</span>&#160;        m_InputLayerNormWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00231"></a><span class="lineno">  231</span>&#160;        <span class="keywordflow">if</span> (!<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_CifgEnabled)</div>
+<div class="line"><a name="l00232"></a><span class="lineno">  232</span>&#160;        {</div>
+<div class="line"><a name="l00233"></a><span class="lineno">  233</span>&#160;            BuildArmComputeTensor(*m_InputLayerNormWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputLayerNormWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00234"></a><span class="lineno">  234</span>&#160;        }</div>
+<div class="line"><a name="l00235"></a><span class="lineno">  235</span>&#160; </div>
+<div class="line"><a name="l00236"></a><span class="lineno">  236</span>&#160;        m_ForgetLayerNormWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00237"></a><span class="lineno">  237</span>&#160;        BuildArmComputeTensor(*m_ForgetLayerNormWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ForgetLayerNormWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00238"></a><span class="lineno">  238</span>&#160; </div>
+<div class="line"><a name="l00239"></a><span class="lineno">  239</span>&#160;        m_CellLayerNormWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00240"></a><span class="lineno">  240</span>&#160;        BuildArmComputeTensor(*m_CellLayerNormWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellLayerNormWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00241"></a><span class="lineno">  241</span>&#160; </div>
+<div class="line"><a name="l00242"></a><span class="lineno">  242</span>&#160;        m_OutputLayerNormWeightsTensor = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00243"></a><span class="lineno">  243</span>&#160;        BuildArmComputeTensor(*m_OutputLayerNormWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_OutputLayerNormWeights-&gt;GetTensorInfo());</div>
+<div class="line"><a name="l00244"></a><span class="lineno">  244</span>&#160; </div>
+<div class="line"><a name="l00245"></a><span class="lineno">  245</span>&#160;        <span class="keyword">auto</span> inputNormWeightTensor = <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_CifgEnabled ? nullptr : m_InputLayerNormWeightsTensor.get();</div>
+<div class="line"><a name="l00246"></a><span class="lineno">  246</span>&#160;        lstm_param.set_layer_normalization_params(inputNormWeightTensor,</div>
+<div class="line"><a name="l00247"></a><span class="lineno">  247</span>&#160;                                                  m_ForgetLayerNormWeightsTensor.get(),</div>
+<div class="line"><a name="l00248"></a><span class="lineno">  248</span>&#160;                                                  m_CellLayerNormWeightsTensor.get(),</div>
+<div class="line"><a name="l00249"></a><span class="lineno">  249</span>&#160;                                                  m_OutputLayerNormWeightsTensor.get());</div>
+<div class="line"><a name="l00250"></a><span class="lineno">  250</span>&#160;    }</div>
+<div class="line"><a name="l00251"></a><span class="lineno">  251</span>&#160; </div>
+<div class="line"><a name="l00252"></a><span class="lineno">  252</span>&#160;    arm_compute::ITensor&amp; output_state_in = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[1])-&gt;GetTensor();</div>
+<div class="line"><a name="l00253"></a><span class="lineno">  253</span>&#160;    arm_compute::ITensor&amp; cell_state_in   = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[2])-&gt;GetTensor();</div>
+<div class="line"><a name="l00254"></a><span class="lineno">  254</span>&#160; </div>
+<div class="line"><a name="l00255"></a><span class="lineno">  255</span>&#160;    arm_compute::ITensor&amp; output_state_out = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[1])-&gt;GetTensor();</div>
+<div class="line"><a name="l00256"></a><span class="lineno">  256</span>&#160;    arm_compute::ITensor&amp; cell_state_out = <span class="keyword">static_cast&lt;</span>IAclTensorHandle*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[2])-&gt;GetTensor();</div>
+<div class="line"><a name="l00257"></a><span class="lineno">  257</span>&#160; </div>
+<div class="line"><a name="l00258"></a><span class="lineno">  258</span>&#160;    m_ScratchBuffer = std::make_unique&lt;arm_compute::Tensor&gt;();</div>
+<div class="line"><a name="l00259"></a><span class="lineno">  259</span>&#160;    <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_CifgEnabled)</div>
+<div class="line"><a name="l00260"></a><span class="lineno">  260</span>&#160;    {</div>
+<div class="line"><a name="l00261"></a><span class="lineno">  261</span>&#160;        <span class="comment">// scratch_buffer [num_units * 3, batch_size] with CIFG</span></div>
+<div class="line"><a name="l00262"></a><span class="lineno">  262</span>&#160;        BuildArmComputeTensor(*m_ScratchBuffer, TensorInfo({batchSize, numUnits * 3}, armnnDataType));</div>
+<div class="line"><a name="l00263"></a><span class="lineno">  263</span>&#160;    }</div>
+<div class="line"><a name="l00264"></a><span class="lineno">  264</span>&#160;    <span class="keywordflow">else</span></div>
+<div class="line"><a name="l00265"></a><span class="lineno">  265</span>&#160;    {</div>
+<div class="line"><a name="l00266"></a><span class="lineno">  266</span>&#160;        <span class="comment">// scratch_buffer [num_units * 4, batch_size] without CIFG</span></div>
+<div class="line"><a name="l00267"></a><span class="lineno">  267</span>&#160;        BuildArmComputeTensor(*m_ScratchBuffer, TensorInfo({batchSize, numUnits * 4}, armnnDataType));</div>
+<div class="line"><a name="l00268"></a><span class="lineno">  268</span>&#160;    }</div>
+<div class="line"><a name="l00269"></a><span class="lineno">  269</span>&#160; </div>
+<div class="line"><a name="l00270"></a><span class="lineno">  270</span>&#160;    <span class="comment">// Need to be set at negative threshold to be compatible for ACL</span></div>
+<div class="line"><a name="l00271"></a><span class="lineno">  271</span>&#160;    <span class="keywordtype">float</span> cell_threshold       = <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_ClippingThresCell;</div>
+<div class="line"><a name="l00272"></a><span class="lineno">  272</span>&#160;    <span class="keywordtype">float</span> projection_threshold = <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_ClippingThresProj;</div>
+<div class="line"><a name="l00273"></a><span class="lineno">  273</span>&#160; </div>
+<div class="line"><a name="l00274"></a><span class="lineno">  274</span>&#160;    <span class="comment">// For preparing the object for the class ActivationLayerInfo, consider 5 situations</span></div>
+<div class="line"><a name="l00275"></a><span class="lineno">  275</span>&#160;    arm_compute::ActivationLayerInfo activationLayerInfo =</div>
+<div class="line"><a name="l00276"></a><span class="lineno">  276</span>&#160;        <a class="code" href="namespacearmnn.html#aa1e93ef5f9ee3dbb5e7faa9578f180ae">ConvertLstmActivationFuncToAclLayerInfo</a>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_ActivationFunc);</div>
+<div class="line"><a name="l00277"></a><span class="lineno">  277</span>&#160; </div>
+<div class="line"><a name="l00278"></a><span class="lineno">  278</span>&#160;    <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i = 0; i != maxTime; ++i)</div>
+<div class="line"><a name="l00279"></a><span class="lineno">  279</span>&#160;    {</div>
+<div class="line"><a name="l00280"></a><span class="lineno">  280</span>&#160;        <span class="comment">// Set LSTM input and output ITensors depending on:</span></div>
+<div class="line"><a name="l00281"></a><span class="lineno">  281</span>&#160;        <span class="comment">// input format (timeMajor) &amp; number of LSTM batches (maxTime).</span></div>
+<div class="line"><a name="l00282"></a><span class="lineno">  282</span>&#160;        arm_compute::ITensor* outputLSTM;</div>
+<div class="line"><a name="l00283"></a><span class="lineno">  283</span>&#160;        arm_compute::ITensor* inputLSTM;</div>
+<div class="line"><a name="l00284"></a><span class="lineno">  284</span>&#160; </div>
+<div class="line"><a name="l00285"></a><span class="lineno">  285</span>&#160;        <span class="comment">// If there is only one LSTM time major batch, we will not concat OR permute.</span></div>
+<div class="line"><a name="l00286"></a><span class="lineno">  286</span>&#160;        <span class="comment">// Set input of LSTM to be first input ITensor.</span></div>
+<div class="line"><a name="l00287"></a><span class="lineno">  287</span>&#160;        <span class="comment">// Set output of LSTM to be final output ITensor.</span></div>
+<div class="line"><a name="l00288"></a><span class="lineno">  288</span>&#160;        <span class="comment">// LSTM input/output cannot be &gt; 2 dimensions so need to resize its TensorInfo.</span></div>
+<div class="line"><a name="l00289"></a><span class="lineno">  289</span>&#160;        <span class="keywordflow">if</span> (maxTime == 1 &amp;&amp; <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_TimeMajor)</div>
+<div class="line"><a name="l00290"></a><span class="lineno">  290</span>&#160;        {</div>
+<div class="line"><a name="l00291"></a><span class="lineno">  291</span>&#160;            TensorShape inputShape = <a class="code" href="namespacearmnn_utils.html#ab53d94ea22b51c6bcdf9584644bd67bb">GetTensorShape</a>(input.info()-&gt;tensor_shape(), 1U);</div>
+<div class="line"><a name="l00292"></a><span class="lineno">  292</span>&#160;            TensorShape outputShape = <a class="code" href="namespacearmnn_utils.html#ab53d94ea22b51c6bcdf9584644bd67bb">GetTensorShape</a>((&amp;output)-&gt;<a class="code" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">info</a>()-&gt;tensor_shape(), 1U);</div>
+<div class="line"><a name="l00293"></a><span class="lineno">  293</span>&#160; </div>
+<div class="line"><a name="l00294"></a><span class="lineno">  294</span>&#160;            TensorShape inputShapeShrink({inputShape[1], inputShape[2]});</div>
+<div class="line"><a name="l00295"></a><span class="lineno">  295</span>&#160;            TensorShape outputShapeShrink({outputShape[1], outputShape[2]});</div>
+<div class="line"><a name="l00296"></a><span class="lineno">  296</span>&#160; </div>
+<div class="line"><a name="l00297"></a><span class="lineno">  297</span>&#160;            <span class="keyword">auto</span> acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);</div>
+<div class="line"><a name="l00298"></a><span class="lineno">  298</span>&#160;            <span class="keyword">auto</span> acl_output_shape_shrink = BuildArmComputeTensorShape(outputShapeShrink);</div>
+<div class="line"><a name="l00299"></a><span class="lineno">  299</span>&#160; </div>
+<div class="line"><a name="l00300"></a><span class="lineno">  300</span>&#160;            input.info()-&gt;set_tensor_shape(acl_input_shape_shrink);</div>
+<div class="line"><a name="l00301"></a><span class="lineno">  301</span>&#160;            inputLSTM = <span class="keyword">const_cast&lt;</span>arm_compute::ITensor*<span class="keyword">&gt;</span>(&amp;input);</div>
+<div class="line"><a name="l00302"></a><span class="lineno">  302</span>&#160; </div>
+<div class="line"><a name="l00303"></a><span class="lineno">  303</span>&#160;            output.info()-&gt;set_tensor_shape(acl_output_shape_shrink);</div>
+<div class="line"><a name="l00304"></a><span class="lineno">  304</span>&#160;            outputLSTM = &amp;output;</div>
+<div class="line"><a name="l00305"></a><span class="lineno">  305</span>&#160;        }</div>
+<div class="line"><a name="l00306"></a><span class="lineno">  306</span>&#160;        <span class="comment">// If there is only one LSTM batch major batch, we will not concat, only permute.</span></div>
+<div class="line"><a name="l00307"></a><span class="lineno">  307</span>&#160;        <span class="comment">// Set input of LSTM to be output of initial permute.</span></div>
+<div class="line"><a name="l00308"></a><span class="lineno">  308</span>&#160;        <span class="comment">// Set output of LSTM to be first element of m_ConcatInputs &amp; use that value later in permute.</span></div>
+<div class="line"><a name="l00309"></a><span class="lineno">  309</span>&#160;        <span class="comment">// LSTM output cannot be &gt; 2 dimensions so need to resize its TensorInfo.</span></div>
+<div class="line"><a name="l00310"></a><span class="lineno">  310</span>&#160;        <span class="keywordflow">else</span> <span class="keywordflow">if</span> (maxTime == 1 &amp;&amp; !<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_TimeMajor)</div>
+<div class="line"><a name="l00311"></a><span class="lineno">  311</span>&#160;        {</div>
+<div class="line"><a name="l00312"></a><span class="lineno">  312</span>&#160;            TensorShape inputShape = <a class="code" href="namespacearmnn_utils.html#ab53d94ea22b51c6bcdf9584644bd67bb">GetTensorShape</a>(m_PermuteFirstOut.info()-&gt;tensor_shape(), 1U);</div>
+<div class="line"><a name="l00313"></a><span class="lineno">  313</span>&#160;            TensorShape inputShapeShrink({inputShape[1], inputShape[2]});</div>
+<div class="line"><a name="l00314"></a><span class="lineno">  314</span>&#160;            <span class="keyword">auto</span> acl_input_shape_shrink = BuildArmComputeTensorShape(inputShapeShrink);</div>
+<div class="line"><a name="l00315"></a><span class="lineno">  315</span>&#160;            m_PermuteFirstOut.info()-&gt;set_tensor_shape(acl_input_shape_shrink);</div>
+<div class="line"><a name="l00316"></a><span class="lineno">  316</span>&#160;            inputLSTM = &amp;m_PermuteFirstOut;</div>
+<div class="line"><a name="l00317"></a><span class="lineno">  317</span>&#160; </div>
+<div class="line"><a name="l00318"></a><span class="lineno">  318</span>&#160;            outputLSTM = <span class="keyword">const_cast&lt;</span>arm_compute::ITensor*<span class="keyword">&gt;</span>(m_ConcatInputs[i]);</div>
+<div class="line"><a name="l00319"></a><span class="lineno">  319</span>&#160;        }</div>
+<div class="line"><a name="l00320"></a><span class="lineno">  320</span>&#160;        <span class="comment">// Batch major AND/OR 2+ LSTM batches so will use concat AND/OR permute later on.</span></div>
+<div class="line"><a name="l00321"></a><span class="lineno">  321</span>&#160;        <span class="keywordflow">else</span></div>
+<div class="line"><a name="l00322"></a><span class="lineno">  322</span>&#160;        {</div>
+<div class="line"><a name="l00323"></a><span class="lineno">  323</span>&#160;            inputLSTM = m_SplitterOutputs[i];</div>
+<div class="line"><a name="l00324"></a><span class="lineno">  324</span>&#160;            outputLSTM = <span class="keyword">const_cast&lt;</span>arm_compute::ITensor*<span class="keyword">&gt;</span>(m_ConcatInputs[i]);</div>
+<div class="line"><a name="l00325"></a><span class="lineno">  325</span>&#160;        }</div>
+<div class="line"><a name="l00326"></a><span class="lineno">  326</span>&#160; </div>
+<div class="line"><a name="l00327"></a><span class="lineno">  327</span>&#160;        std::unique_ptr&lt;arm_compute::NELSTMLayer&gt; lstm_layer(<span class="keyword">new</span> arm_compute::NELSTMLayer());</div>
+<div class="line"><a name="l00328"></a><span class="lineno">  328</span>&#160;        lstm_layer-&gt;configure(inputLSTM,</div>
+<div class="line"><a name="l00329"></a><span class="lineno">  329</span>&#160;                              m_InputToForgetWeightsTensor.get(),</div>
+<div class="line"><a name="l00330"></a><span class="lineno">  330</span>&#160;                              m_InputToCellWeightsTensor.get(),</div>
+<div class="line"><a name="l00331"></a><span class="lineno">  331</span>&#160;                              m_InputToOutputWeightsTensor.get(),</div>
+<div class="line"><a name="l00332"></a><span class="lineno">  332</span>&#160;                              m_RecurrentToForgetWeightsTensor.get(),</div>
+<div class="line"><a name="l00333"></a><span class="lineno">  333</span>&#160;                              m_RecurrentToCellWeightsTensor.get(),</div>
+<div class="line"><a name="l00334"></a><span class="lineno">  334</span>&#160;                              m_RecurrentToOutputWeightsTensor.get(),</div>
+<div class="line"><a name="l00335"></a><span class="lineno">  335</span>&#160;                              m_ForgetGateBiasTensor.get(),</div>
+<div class="line"><a name="l00336"></a><span class="lineno">  336</span>&#160;                              m_CellBiasTensor.get(),</div>
+<div class="line"><a name="l00337"></a><span class="lineno">  337</span>&#160;                              m_OutputGateBiasTensor.get(),</div>
+<div class="line"><a name="l00338"></a><span class="lineno">  338</span>&#160;                              &amp;output_state_in,</div>
+<div class="line"><a name="l00339"></a><span class="lineno">  339</span>&#160;                              &amp;cell_state_in,</div>
+<div class="line"><a name="l00340"></a><span class="lineno">  340</span>&#160;                              m_ScratchBuffer.get(),</div>
+<div class="line"><a name="l00341"></a><span class="lineno">  341</span>&#160;                              &amp;output_state_out,</div>
+<div class="line"><a name="l00342"></a><span class="lineno">  342</span>&#160;                              &amp;cell_state_out,</div>
+<div class="line"><a name="l00343"></a><span class="lineno">  343</span>&#160;                              outputLSTM,</div>
+<div class="line"><a name="l00344"></a><span class="lineno">  344</span>&#160;                              lstm_param,</div>
+<div class="line"><a name="l00345"></a><span class="lineno">  345</span>&#160;                              activationLayerInfo,</div>
+<div class="line"><a name="l00346"></a><span class="lineno">  346</span>&#160;                              cell_threshold,</div>
+<div class="line"><a name="l00347"></a><span class="lineno">  347</span>&#160;                              projection_threshold);</div>
+<div class="line"><a name="l00348"></a><span class="lineno">  348</span>&#160; </div>
+<div class="line"><a name="l00349"></a><span class="lineno">  349</span>&#160;        m_Layers.emplace_back(std::move(lstm_layer));</div>
+<div class="line"><a name="l00350"></a><span class="lineno">  350</span>&#160;    }</div>
+<div class="line"><a name="l00351"></a><span class="lineno">  351</span>&#160; </div>
+<div class="line"><a name="l00352"></a><span class="lineno">  352</span>&#160;    armcomputetensorutils::InitialiseArmComputeTensorEmpty(*m_ScratchBuffer);</div>
+<div class="line"><a name="l00353"></a><span class="lineno">  353</span>&#160; </div>
+<div class="line"><a name="l00354"></a><span class="lineno">  354</span>&#160;    <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_InputToForgetWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputToForgetWeights);</div>
+<div class="line"><a name="l00355"></a><span class="lineno">  355</span>&#160;    <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_InputToCellWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputToCellWeights);</div>
+<div class="line"><a name="l00356"></a><span class="lineno">  356</span>&#160;    <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_InputToOutputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputToOutputWeights);</div>
+<div class="line"><a name="l00357"></a><span class="lineno">  357</span>&#160;    <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_RecurrentToForgetWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_RecurrentToForgetWeights);</div>
+<div class="line"><a name="l00358"></a><span class="lineno">  358</span>&#160;    <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_RecurrentToCellWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_RecurrentToCellWeights);</div>
+<div class="line"><a name="l00359"></a><span class="lineno">  359</span>&#160;    <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_RecurrentToOutputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_RecurrentToOutputWeights);</div>
+<div class="line"><a name="l00360"></a><span class="lineno">  360</span>&#160;    <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_ForgetGateBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ForgetGateBias);</div>
+<div class="line"><a name="l00361"></a><span class="lineno">  361</span>&#160;    <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_CellBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellBias);</div>
+<div class="line"><a name="l00362"></a><span class="lineno">  362</span>&#160;    <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_OutputGateBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_OutputGateBias);</div>
+<div class="line"><a name="l00363"></a><span class="lineno">  363</span>&#160; </div>
+<div class="line"><a name="l00364"></a><span class="lineno">  364</span>&#160;    <span class="keywordflow">if</span> (!<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_CifgEnabled)</div>
+<div class="line"><a name="l00365"></a><span class="lineno">  365</span>&#160;    {</div>
+<div class="line"><a name="l00366"></a><span class="lineno">  366</span>&#160;        <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_InputToInputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputToInputWeights);</div>
+<div class="line"><a name="l00367"></a><span class="lineno">  367</span>&#160;        <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_RecurrentToInputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_RecurrentToInputWeights);</div>
+<div class="line"><a name="l00368"></a><span class="lineno">  368</span>&#160;        <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellToInputWeights != <span class="keyword">nullptr</span>)</div>
+<div class="line"><a name="l00369"></a><span class="lineno">  369</span>&#160;        {</div>
+<div class="line"><a name="l00370"></a><span class="lineno">  370</span>&#160;            <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_CellToInputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellToInputWeights);</div>
+<div class="line"><a name="l00371"></a><span class="lineno">  371</span>&#160;        }</div>
+<div class="line"><a name="l00372"></a><span class="lineno">  372</span>&#160;        <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_InputGateBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputGateBias);</div>
+<div class="line"><a name="l00373"></a><span class="lineno">  373</span>&#160;    }</div>
+<div class="line"><a name="l00374"></a><span class="lineno">  374</span>&#160; </div>
+<div class="line"><a name="l00375"></a><span class="lineno">  375</span>&#160;    <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_ProjectionEnabled)</div>
+<div class="line"><a name="l00376"></a><span class="lineno">  376</span>&#160;    {</div>
+<div class="line"><a name="l00377"></a><span class="lineno">  377</span>&#160;        <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_ProjectionWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ProjectionWeights);</div>
+<div class="line"><a name="l00378"></a><span class="lineno">  378</span>&#160;        <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ProjectionBias != <span class="keyword">nullptr</span>)</div>
+<div class="line"><a name="l00379"></a><span class="lineno">  379</span>&#160;        {</div>
+<div class="line"><a name="l00380"></a><span class="lineno">  380</span>&#160;            <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_ProjectionBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ProjectionBias);</div>
+<div class="line"><a name="l00381"></a><span class="lineno">  381</span>&#160;        }</div>
+<div class="line"><a name="l00382"></a><span class="lineno">  382</span>&#160;    }</div>
+<div class="line"><a name="l00383"></a><span class="lineno">  383</span>&#160; </div>
+<div class="line"><a name="l00384"></a><span class="lineno">  384</span>&#160;    <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_PeepholeEnabled)</div>
+<div class="line"><a name="l00385"></a><span class="lineno">  385</span>&#160;    {</div>
+<div class="line"><a name="l00386"></a><span class="lineno">  386</span>&#160;        <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_CellToForgetWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellToForgetWeights);</div>
+<div class="line"><a name="l00387"></a><span class="lineno">  387</span>&#160;        <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_CellToOutputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellToOutputWeights);</div>
+<div class="line"><a name="l00388"></a><span class="lineno">  388</span>&#160;    }</div>
+<div class="line"><a name="l00389"></a><span class="lineno">  389</span>&#160; </div>
+<div class="line"><a name="l00390"></a><span class="lineno">  390</span>&#160;    <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_LayerNormEnabled)</div>
+<div class="line"><a name="l00391"></a><span class="lineno">  391</span>&#160;    {</div>
+<div class="line"><a name="l00392"></a><span class="lineno">  392</span>&#160;        <span class="keywordflow">if</span> (!<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_CifgEnabled)</div>
+<div class="line"><a name="l00393"></a><span class="lineno">  393</span>&#160;        {</div>
+<div class="line"><a name="l00394"></a><span class="lineno">  394</span>&#160;            <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_InputLayerNormWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_InputLayerNormWeights);</div>
+<div class="line"><a name="l00395"></a><span class="lineno">  395</span>&#160;        }</div>
+<div class="line"><a name="l00396"></a><span class="lineno">  396</span>&#160;        <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_ForgetLayerNormWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_ForgetLayerNormWeights);</div>
+<div class="line"><a name="l00397"></a><span class="lineno">  397</span>&#160;        <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_CellLayerNormWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_CellLayerNormWeights);</div>
+<div class="line"><a name="l00398"></a><span class="lineno">  398</span>&#160;        <a class="code" href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">InitializeArmComputeTensorData</a>(*m_OutputLayerNormWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_OutputLayerNormWeights);</div>
+<div class="line"><a name="l00399"></a><span class="lineno">  399</span>&#160;    }</div>
+<div class="line"><a name="l00400"></a><span class="lineno">  400</span>&#160; </div>
+<div class="line"><a name="l00401"></a><span class="lineno">  401</span>&#160;    <span class="comment">// Force Compute Library to perform the necessary copying and reshaping.</span></div>
+<div class="line"><a name="l00402"></a><span class="lineno">  402</span>&#160;    <span class="comment">// After which delete all the input tensors that will no longer be needed.</span></div>
+<div class="line"><a name="l00403"></a><span class="lineno">  403</span>&#160;    <span class="keywordflow">for</span> (uint32_t i = 0; i &lt; m_Layers.size(); ++i)</div>
+<div class="line"><a name="l00404"></a><span class="lineno">  404</span>&#160;    {</div>
+<div class="line"><a name="l00405"></a><span class="lineno">  405</span>&#160;        m_Layers[i]-&gt;prepare();</div>
+<div class="line"><a name="l00406"></a><span class="lineno">  406</span>&#160;    }</div>
+<div class="line"><a name="l00407"></a><span class="lineno">  407</span>&#160; </div>
+<div class="line"><a name="l00408"></a><span class="lineno">  408</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00409"></a><span class="lineno">  409</span>&#160;    <span class="comment">// Concat</span></div>
+<div class="line"><a name="l00410"></a><span class="lineno">  410</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00411"></a><span class="lineno">  411</span>&#160; </div>
+<div class="line"><a name="l00412"></a><span class="lineno">  412</span>&#160;    <span class="comment">// Expand dimensions of LSTM outputs adding one empty dimension to fit concatenate inputs.</span></div>
+<div class="line"><a name="l00413"></a><span class="lineno">  413</span>&#160;    TensorShape shape = <a class="code" href="namespacearmnn_utils.html#ab53d94ea22b51c6bcdf9584644bd67bb">GetTensorShape</a>(m_ConcatInputs[0]-&gt;<a class="code" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">info</a>()-&gt;tensor_shape(), 1U);</div>
+<div class="line"><a name="l00414"></a><span class="lineno">  414</span>&#160;    TensorShape shapeExpandTimeMajor({1, shape[0], shape[1]});</div>
+<div class="line"><a name="l00415"></a><span class="lineno">  415</span>&#160;    TensorShape shapeExpandBatchMajor({shape[0], 1, shape[1]});</div>
+<div class="line"><a name="l00416"></a><span class="lineno">  416</span>&#160; </div>
+<div class="line"><a name="l00417"></a><span class="lineno">  417</span>&#160;    <span class="keywordflow">if</span> (maxTime != 1) <span class="comment">// ACL concat does not work with only one element to concatenate.</span></div>
+<div class="line"><a name="l00418"></a><span class="lineno">  418</span>&#160;    {</div>
+<div class="line"><a name="l00419"></a><span class="lineno">  419</span>&#160;        <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i = 0; i &lt; maxTime; ++i)</div>
+<div class="line"><a name="l00420"></a><span class="lineno">  420</span>&#160;        {</div>
+<div class="line"><a name="l00421"></a><span class="lineno">  421</span>&#160;            m_ConcatInputs[i]-&gt;info()-&gt;set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));</div>
+<div class="line"><a name="l00422"></a><span class="lineno">  422</span>&#160;        }</div>
+<div class="line"><a name="l00423"></a><span class="lineno">  423</span>&#160; </div>
+<div class="line"><a name="l00424"></a><span class="lineno">  424</span>&#160;        <a class="code" href="namespacearmnn.html#a7863c179ff92feec660c48ab7b95ae55">ConcatDescriptor</a>  concatDescriptor(maxTime, numberDimensions);  <span class="comment">// maxTime = num inputs (aka. number of views).</span></div>
+<div class="line"><a name="l00425"></a><span class="lineno">  425</span>&#160;        <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> inputIdx = 0u; inputIdx &lt; maxTime; ++inputIdx)</div>
+<div class="line"><a name="l00426"></a><span class="lineno">  426</span>&#160;        {</div>
+<div class="line"><a name="l00427"></a><span class="lineno">  427</span>&#160;            concatDescriptor.SetViewOriginCoord(inputIdx, dimension, inputIdx);</div>
+<div class="line"><a name="l00428"></a><span class="lineno">  428</span>&#160;            concatDescriptor.SetConcatAxis(dimension);</div>
+<div class="line"><a name="l00429"></a><span class="lineno">  429</span>&#160;        }</div>
+<div class="line"><a name="l00430"></a><span class="lineno">  430</span>&#160; </div>
+<div class="line"><a name="l00431"></a><span class="lineno">  431</span>&#160;        m_Concat.reset(<span class="keyword">new</span> arm_compute::NEConcatenateLayer());</div>
+<div class="line"><a name="l00432"></a><span class="lineno">  432</span>&#160;        <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> aclAxisConcat = CalcAclAxis(concatDescriptor.GetNumDimensions(), concatDescriptor.GetConcatAxis());</div>
+<div class="line"><a name="l00433"></a><span class="lineno">  433</span>&#160;        <span class="keywordflow">if</span> (!<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_TimeMajor)</div>
+<div class="line"><a name="l00434"></a><span class="lineno">  434</span>&#160;        {</div>
+<div class="line"><a name="l00435"></a><span class="lineno">  435</span>&#160;            TensorInfo concatOutputTensorInfo = outputInfo;</div>
+<div class="line"><a name="l00436"></a><span class="lineno">  436</span>&#160;            concatOutputTensorInfo.SetShape(timeMajorShapeOutput);</div>
+<div class="line"><a name="l00437"></a><span class="lineno">  437</span>&#160;            BuildArmComputeTensor(concat_out, concatOutputTensorInfo);</div>
+<div class="line"><a name="l00438"></a><span class="lineno">  438</span>&#160;            armcomputetensorutils::InitialiseArmComputeTensorEmpty(concat_out);</div>
+<div class="line"><a name="l00439"></a><span class="lineno">  439</span>&#160; </div>
+<div class="line"><a name="l00440"></a><span class="lineno">  440</span>&#160;            m_Concat-&gt;configure(m_ConcatInputs, &amp;concat_out, aclAxisConcat);</div>
+<div class="line"><a name="l00441"></a><span class="lineno">  441</span>&#160;        }</div>
+<div class="line"><a name="l00442"></a><span class="lineno">  442</span>&#160;        <span class="keywordflow">else</span></div>
+<div class="line"><a name="l00443"></a><span class="lineno">  443</span>&#160;        {</div>
+<div class="line"><a name="l00444"></a><span class="lineno">  444</span>&#160;            m_Concat-&gt;configure(m_ConcatInputs, &amp;output, aclAxisConcat);</div>
+<div class="line"><a name="l00445"></a><span class="lineno">  445</span>&#160;        }</div>
+<div class="line"><a name="l00446"></a><span class="lineno">  446</span>&#160; </div>
+<div class="line"><a name="l00447"></a><span class="lineno">  447</span>&#160;        m_Concat-&gt;prepare();</div>
+<div class="line"><a name="l00448"></a><span class="lineno">  448</span>&#160;    }</div>
+<div class="line"><a name="l00449"></a><span class="lineno">  449</span>&#160;    <span class="comment">// If only one LSTM batch, we do not concat and/or permute.</span></div>
+<div class="line"><a name="l00450"></a><span class="lineno">  450</span>&#160;    <span class="comment">// Must ensure final output info is expanded to correct batch major dimensions.</span></div>
+<div class="line"><a name="l00451"></a><span class="lineno">  451</span>&#160;    <span class="keywordflow">else</span></div>
+<div class="line"><a name="l00452"></a><span class="lineno">  452</span>&#160;    {</div>
+<div class="line"><a name="l00453"></a><span class="lineno">  453</span>&#160;        <span class="keywordflow">if</span> (!<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_TimeMajor)</div>
+<div class="line"><a name="l00454"></a><span class="lineno">  454</span>&#160;        {</div>
+<div class="line"><a name="l00455"></a><span class="lineno">  455</span>&#160;            output.info()-&gt;set_tensor_shape(BuildArmComputeTensorShape(shapeExpandBatchMajor));</div>
+<div class="line"><a name="l00456"></a><span class="lineno">  456</span>&#160;        }</div>
+<div class="line"><a name="l00457"></a><span class="lineno">  457</span>&#160;        <span class="keywordflow">else</span></div>
+<div class="line"><a name="l00458"></a><span class="lineno">  458</span>&#160;        {</div>
+<div class="line"><a name="l00459"></a><span class="lineno">  459</span>&#160;            output.info()-&gt;set_tensor_shape(BuildArmComputeTensorShape(shapeExpandTimeMajor));</div>
+<div class="line"><a name="l00460"></a><span class="lineno">  460</span>&#160;        }</div>
+<div class="line"><a name="l00461"></a><span class="lineno">  461</span>&#160;    }</div>
+<div class="line"><a name="l00462"></a><span class="lineno">  462</span>&#160; </div>
+<div class="line"><a name="l00463"></a><span class="lineno">  463</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00464"></a><span class="lineno">  464</span>&#160;    <span class="comment">// Permute: only done if input/output are in batch major format.</span></div>
+<div class="line"><a name="l00465"></a><span class="lineno">  465</span>&#160;    <span class="comment">//</span></div>
+<div class="line"><a name="l00466"></a><span class="lineno">  466</span>&#160;    <span class="keywordflow">if</span> (!<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.m_Parameters.m_TimeMajor)</div>
+<div class="line"><a name="l00467"></a><span class="lineno">  467</span>&#160;    {</div>
+<div class="line"><a name="l00468"></a><span class="lineno">  468</span>&#160;        <span class="comment">// Output now time major. Permute output back to batch major.</span></div>
+<div class="line"><a name="l00469"></a><span class="lineno">  469</span>&#160;        std::unique_ptr&lt;arm_compute::NEPermute&gt; layer(<span class="keyword">new</span> arm_compute::NEPermute());</div>
+<div class="line"><a name="l00470"></a><span class="lineno">  470</span>&#160;        <span class="keywordflow">if</span> (maxTime != 1)</div>
+<div class="line"><a name="l00471"></a><span class="lineno">  471</span>&#160;        {</div>
+<div class="line"><a name="l00472"></a><span class="lineno">  472</span>&#160;            layer-&gt;configure(&amp;concat_out, &amp;output, arm_compute::PermutationVector(0U, 2U, 1U));</div>
+<div class="line"><a name="l00473"></a><span class="lineno">  473</span>&#160;        }</div>
+<div class="line"><a name="l00474"></a><span class="lineno">  474</span>&#160;        <span class="keywordflow">else</span></div>
+<div class="line"><a name="l00475"></a><span class="lineno">  475</span>&#160;        {</div>
+<div class="line"><a name="l00476"></a><span class="lineno">  476</span>&#160;            layer-&gt;configure(m_ConcatInputs[0], &amp;output, arm_compute::PermutationVector(0U, 2U, 1U));</div>
+<div class="line"><a name="l00477"></a><span class="lineno">  477</span>&#160;        }</div>
+<div class="line"><a name="l00478"></a><span class="lineno">  478</span>&#160;        m_Permute2.reset(layer.release());</div>
+<div class="line"><a name="l00479"></a><span class="lineno">  479</span>&#160;    }</div>
+<div class="line"><a name="l00480"></a><span class="lineno">  480</span>&#160; </div>
+<div class="line"><a name="l00481"></a><span class="lineno">  481</span>&#160;    FreeUnusedTensors();</div>
+<div class="line"><a name="l00482"></a><span class="lineno">  482</span>&#160;}</div>
+</div><!-- fragment -->
+<p class="reference">References <a class="el" href="_profiling_8hpp_source.html#l00227">ARMNN_REPORT_PROFILING_WORKLOAD_DESC</a>, <a class="el" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">armnn::info</a>, and <a class="el" href="_workload_data_8hpp_source.html#l00066">QueueDescriptorWithParameters&lt; LayerDescriptor &gt;::m_Parameters</a>.</p>
+
+</div>
+</div>
+<h2 class="groupheader">Member Function Documentation</h2>
+<a id="ae071e8822437c78baea75c3aef3a263a"></a>
+<h2 class="memtitle"><span class="permalink"><a href="#ae071e8822437c78baea75c3aef3a263a">&#9670;&nbsp;</a></span>Execute()</h2>
+
+<div class="memitem">
+<div class="memproto">
+<table class="mlabels">
+  <tr>
+  <td class="mlabels-left">
+      <table class="memname">
+        <tr>
+          <td class="memname">void Execute </td>
+          <td>(</td>
+          <td class="paramname"></td><td>)</td>
+          <td> const</td>
+        </tr>
+      </table>
+  </td>
+  <td class="mlabels-right">
+<span class="mlabels"><span class="mlabel">override</span><span class="mlabel">virtual</span></span>  </td>
+  </tr>
+</table>
+</div><div class="memdoc">
+
+<p>Implements <a class="el" href="classarmnn_1_1_i_workload.html#a72ae00e6604850c8798c5e0d825ee7e4">IWorkload</a>.</p>
+
+<p class="definition">Definition at line <a class="el" href="_neon_unidirectional_sequence_lstm_float_workload_8cpp_source.html#l00484">484</a> of file <a class="el" href="_neon_unidirectional_sequence_lstm_float_workload_8cpp_source.html">NeonUnidirectionalSequenceLstmFloatWorkload.cpp</a>.</p>
+<div class="fragment"><div class="line"><a name="l00485"></a><span class="lineno">  485</span>&#160;{</div>
+<div class="line"><a name="l00486"></a><span class="lineno">  486</span>&#160;    <a class="code" href="_neon_workload_utils_8hpp.html#a7f97eedf3c9436b110df92c947bbb55d">ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID</a>(<span class="stringliteral">&quot;NeonUnidirectionalSequenceLstmFloatWorkload_Execute&quot;</span>);</div>
+<div class="line"><a name="l00487"></a><span class="lineno">  487</span>&#160;    <span class="keywordflow">if</span> (m_Permute1)</div>
+<div class="line"><a name="l00488"></a><span class="lineno">  488</span>&#160;    {</div>
+<div class="line"><a name="l00489"></a><span class="lineno">  489</span>&#160;        m_Permute1-&gt;run();</div>
+<div class="line"><a name="l00490"></a><span class="lineno">  490</span>&#160;    }</div>
+<div class="line"><a name="l00491"></a><span class="lineno">  491</span>&#160;    <span class="keywordflow">if</span> (m_Splitter)</div>
+<div class="line"><a name="l00492"></a><span class="lineno">  492</span>&#160;    {</div>
+<div class="line"><a name="l00493"></a><span class="lineno">  493</span>&#160;        m_Splitter-&gt;run();</div>
+<div class="line"><a name="l00494"></a><span class="lineno">  494</span>&#160;    }</div>
+<div class="line"><a name="l00495"></a><span class="lineno">  495</span>&#160;    <span class="keywordflow">for</span> (uint32_t i = 0; i &lt; m_Layers.size(); ++i)</div>
+<div class="line"><a name="l00496"></a><span class="lineno">  496</span>&#160;    {</div>
+<div class="line"><a name="l00497"></a><span class="lineno">  497</span>&#160;        m_Layers[i]-&gt;run();</div>
+<div class="line"><a name="l00498"></a><span class="lineno">  498</span>&#160;    }</div>
+<div class="line"><a name="l00499"></a><span class="lineno">  499</span>&#160;    <span class="keywordflow">if</span> (m_Concat)</div>
+<div class="line"><a name="l00500"></a><span class="lineno">  500</span>&#160;    {</div>
+<div class="line"><a name="l00501"></a><span class="lineno">  501</span>&#160;        m_Concat-&gt;run();</div>
+<div class="line"><a name="l00502"></a><span class="lineno">  502</span>&#160;    }</div>
+<div class="line"><a name="l00503"></a><span class="lineno">  503</span>&#160;    <span class="keywordflow">if</span> (m_Permute2)</div>
+<div class="line"><a name="l00504"></a><span class="lineno">  504</span>&#160;    {</div>
+<div class="line"><a name="l00505"></a><span class="lineno">  505</span>&#160;        m_Permute2-&gt;run();</div>
+<div class="line"><a name="l00506"></a><span class="lineno">  506</span>&#160;    }</div>
+<div class="line"><a name="l00507"></a><span class="lineno">  507</span>&#160;}</div>
+</div><!-- fragment -->
+<p class="reference">References <a class="el" href="_neon_workload_utils_8hpp_source.html#l00032">ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID</a>.</p>
+
+</div>
+</div>
+<hr/>The documentation for this class was generated from the following files:<ul>
+<li>src/backends/neon/workloads/<a class="el" href="_neon_unidirectional_sequence_lstm_float_workload_8hpp_source.html">NeonUnidirectionalSequenceLstmFloatWorkload.hpp</a></li>
+<li>src/backends/neon/workloads/<a class="el" href="_neon_unidirectional_sequence_lstm_float_workload_8cpp_source.html">NeonUnidirectionalSequenceLstmFloatWorkload.cpp</a></li>
+</ul>
+</div><!-- contents -->
+</div><!-- doc-content -->
+<div class="ttc" id="anamespacearmnn_html_a7863c179ff92feec660c48ab7b95ae55"><div class="ttname"><a href="namespacearmnn.html#a7863c179ff92feec660c48ab7b95ae55">armnn::ConcatDescriptor</a></div><div class="ttdeci">OriginsDescriptor ConcatDescriptor</div><div class="ttdef"><b>Definition:</b> <a href="_descriptors_fwd_8hpp_source.html#l00059">DescriptorsFwd.hpp:59</a></div></div>
+<div class="ttc" id="anamespacearmnn_html_a8cbabc875597b3bed0ccdc0adb289fde"><div class="ttname"><a href="namespacearmnn.html#a8cbabc875597b3bed0ccdc0adb289fde">armnn::ComputeSplitAxis</a></div><div class="ttdeci">std::set&lt; unsigned int &gt; ComputeSplitAxis(const armnn::SplitterDescriptor &amp;desc, const TensorShape &amp;input)</div><div class="ttdef"><b>Definition:</b> <a href="_arm_compute_utils_8hpp_source.html#l00246">ArmComputeUtils.hpp:246</a></div></div>
+<div class="ttc" id="anamespacearmnn_html_a611208865d55ea576cc89ac86d7c19b7"><div class="ttname"><a href="namespacearmnn.html#a611208865d55ea576cc89ac86d7c19b7">armnn::InitializeArmComputeTensorData</a></div><div class="ttdeci">void InitializeArmComputeTensorData(arm_compute::Tensor &amp;tensor, TensorInfo tensorInfo, const ITensorHandle *handle)</div><div class="ttdef"><b>Definition:</b> <a href="_neon_workload_utils_8hpp_source.html#l00068">NeonWorkloadUtils.hpp:68</a></div></div>
+<div class="ttc" id="anamespacearmnn_html_ad8ed01ff3ff33333d8e19db4d2818bb6"><div class="ttname"><a href="namespacearmnn.html#ad8ed01ff3ff33333d8e19db4d2818bb6">armnn::DataType</a></div><div class="ttdeci">DataType</div><div class="ttdef"><b>Definition:</b> <a href="_types_8hpp_source.html#l00048">Types.hpp:48</a></div></div>
+<div class="ttc" id="anamespacearmnn_html_aa1e93ef5f9ee3dbb5e7faa9578f180ae"><div class="ttname"><a href="namespacearmnn.html#aa1e93ef5f9ee3dbb5e7faa9578f180ae">armnn::ConvertLstmActivationFuncToAclLayerInfo</a></div><div class="ttdeci">arm_compute::ActivationLayerInfo ConvertLstmActivationFuncToAclLayerInfo(uint32_t activationFunction)</div><div class="ttdef"><b>Definition:</b> <a href="_arm_compute_utils_8hpp_source.html#l00118">ArmComputeUtils.hpp:118</a></div></div>
+<div class="ttc" id="anamespacearmnn_html_a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c"><div class="ttname"><a href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">armnn::BoostLogSeverityMapping::info</a></div><div class="ttdeci">@ info</div></div>
+<div class="ttc" id="astructarmnn_1_1_queue_descriptor_html_a6abd491bb99ffe88bd472c1ae5a1ed1a"><div class="ttname"><a href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">armnn::QueueDescriptor::m_Outputs</a></div><div class="ttdeci">std::vector&lt; ITensorHandle * &gt; m_Outputs</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00027">WorkloadData.hpp:27</a></div></div>
+<div class="ttc" id="a_profiling_8hpp_html_a786492a3881a4c760ab1eec2149f4aba"><div class="ttname"><a href="_profiling_8hpp.html#a786492a3881a4c760ab1eec2149f4aba">ARMNN_REPORT_PROFILING_WORKLOAD_DESC</a></div><div class="ttdeci">#define ARMNN_REPORT_PROFILING_WORKLOAD_DESC(name, desc, infos, guid)</div><div class="ttdef"><b>Definition:</b> <a href="_profiling_8hpp_source.html#l00227">Profiling.hpp:227</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_base_workload_html_aaff95a48875d8fb4a616352906660ca9"><div class="ttname"><a href="classarmnn_1_1_base_workload.html#aaff95a48875d8fb4a616352906660ca9">armnn::BaseWorkload::GetGuid</a></div><div class="ttdeci">arm::pipe::ProfilingGuid GetGuid() const final</div><div class="ttdef"><b>Definition:</b> <a href="_workload_8hpp_source.html#l00067">Workload.hpp:67</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_base_workload_html_afb8d2c8817c75de9d01a4c0e0d5c160b"><div class="ttname"><a href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">armnn::BaseWorkload::m_Data</a></div><div class="ttdeci">QueueDescriptor m_Data</div><div class="ttdef"><b>Definition:</b> <a href="_workload_8hpp_source.html#l00089">Workload.hpp:89</a></div></div>
+<div class="ttc" id="a_neon_workload_utils_8hpp_html_a7f97eedf3c9436b110df92c947bbb55d"><div class="ttname"><a href="_neon_workload_utils_8hpp.html#a7f97eedf3c9436b110df92c947bbb55d">ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID</a></div><div class="ttdeci">#define ARMNN_SCOPED_PROFILING_EVENT_NEON_NAME_GUID(label)</div><div class="ttdoc">Creates a profiling event that uses GetGuid() and GetName() from the calling class.</div><div class="ttdef"><b>Definition:</b> <a href="_neon_workload_utils_8hpp_source.html#l00032">NeonWorkloadUtils.hpp:32</a></div></div>
+<div class="ttc" id="anamespacearmnn_utils_html_ab53d94ea22b51c6bcdf9584644bd67bb"><div class="ttname"><a href="namespacearmnn_utils.html#ab53d94ea22b51c6bcdf9584644bd67bb">armnnUtils::GetTensorShape</a></div><div class="ttdeci">armnn::TensorShape GetTensorShape(unsigned int numberOfBatches, unsigned int numberOfChannels, unsigned int height, unsigned int width, const armnn::DataLayout dataLayout)</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_utils_8cpp_source.html#l00021">TensorUtils.cpp:21</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_queue_descriptor_html_a4b50e46a6810018f3edecfb68b2a76b3"><div class="ttname"><a href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">armnn::QueueDescriptor::m_Inputs</a></div><div class="ttdeci">std::vector&lt; ITensorHandle * &gt; m_Inputs</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00026">WorkloadData.hpp:26</a></div></div>
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="navelem"><a class="el" href="namespacearmnn.html">armnn</a></li><li class="navelem"><a class="el" href="classarmnn_1_1_neon_unidirectional_sequence_lstm_float_workload.html">NeonUnidirectionalSequenceLstmFloatWorkload</a></li>
+    <li class="footer">Generated on Wed Feb 14 2024 16:36:24 for Arm NN by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.17 </li>
+  </ul>
+</div>
+</body>
+</html>