IVGCVSW-7702 Update Doxygen Docu for 23.08

Signed-off-by: Nikhil Raj <nikhil.raj@arm.com>
Change-Id: I357a9f7e47614589327c1ac5d95b6224ff77103d
diff --git a/latest/_cl_quantized_lstm_workload_8cpp_source.html b/latest/_cl_quantized_lstm_workload_8cpp_source.html
new file mode 100644
index 0000000..3ee4490
--- /dev/null
+++ b/latest/_cl_quantized_lstm_workload_8cpp_source.html
@@ -0,0 +1,324 @@
+<!-- HTML header for doxygen 1.8.17-->
+<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
+<html xmlns="http://www.w3.org/1999/xhtml">
+<head>
+<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
+<meta http-equiv="X-UA-Compatible" content="IE=9"/>
+<meta name="generator" content="Doxygen 1.8.17"/>
+<meta name="viewport" content="width=device-width, initial-scale=1"/>
+<title>Arm NN: src/backends/cl/workloads/ClQuantizedLstmWorkload.cpp Source File</title>
+<link href="tabs.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="jquery.js"></script>
+<script type="text/javascript" src="dynsections.js"></script>
+<link href="navtree.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="resize.js"></script>
+<script type="text/javascript" src="navtreedata.js"></script>
+<script type="text/javascript" src="navtree.js"></script>
+<link href="search/search.css" rel="stylesheet" type="text/css"/>
+<script type="text/javascript" src="search/searchdata.js"></script>
+<script type="text/javascript" src="search/search.js"></script>
+<script type="text/x-mathjax-config">
+  MathJax.Hub.Config({
+    extensions: ["tex2jax.js"],
+    jax: ["input/TeX","output/HTML-CSS"],
+});
+</script>
+<script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
+<link href="doxygen.css" rel="stylesheet" type="text/css" />
+<link href="customdoxygen.css" rel="stylesheet" type="text/css"/>
+</head>
+<body>
+<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
+<div id="titlearea">
+<table cellspacing="0" cellpadding="0">
+ <tbody>
+ <tr style="height: 56px;">
+  <img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 15rem; margin-top: .5rem; margin-left 13px"/>
+  <td id="projectalign" style="padding-left: 0.9em;">
+   <div id="projectname">
+   &#160;<span id="projectnumber">23.08</span>
+   </div>
+  </td>
+ </tr>
+ </tbody>
+</table>
+</div>
+<!-- end header part -->
+<!-- Generated by Doxygen 1.8.17 -->
+<script type="text/javascript">
+/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
+var searchBox = new SearchBox("searchBox", "search",false,'Search');
+/* @license-end */
+</script>
+<script type="text/javascript" src="menudata.js"></script>
+<script type="text/javascript" src="menu.js"></script>
+<script type="text/javascript">
+/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
+$(function() {
+  initMenu('',true,false,'search.php','Search');
+  $(document).ready(function() { init_search(); });
+});
+/* @license-end */</script>
+<div id="main-nav"></div>
+</div><!-- top -->
+<div id="side-nav" class="ui-resizable side-nav-resizable">
+  <div id="nav-tree">
+    <div id="nav-tree-contents">
+      <div id="nav-sync" class="sync"></div>
+    </div>
+  </div>
+  <div id="splitbar" style="-moz-user-select:none;" 
+       class="ui-resizable-handle">
+  </div>
+</div>
+<script type="text/javascript">
+/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
+$(document).ready(function(){initNavTree('_cl_quantized_lstm_workload_8cpp_source.html',''); initResizable(); });
+/* @license-end */
+</script>
+<div id="doc-content">
+<!-- window showing the filter options -->
+<div id="MSearchSelectWindow"
+     onmouseover="return searchBox.OnSearchSelectShow()"
+     onmouseout="return searchBox.OnSearchSelectHide()"
+     onkeydown="return searchBox.OnSearchSelectKey(event)">
+</div>
+
+<!-- iframe showing the search results (closed by default) -->
+<div id="MSearchResultsWindow">
+<iframe src="javascript:void(0)" frameborder="0" 
+        name="MSearchResults" id="MSearchResults">
+</iframe>
+</div>
+
+<div class="header">
+  <div class="headertitle">
+<div class="title">ClQuantizedLstmWorkload.cpp</div>  </div>
+</div><!--header-->
+<div class="contents">
+<a href="_cl_quantized_lstm_workload_8cpp.html">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno">    1</span>&#160;<span class="comment">//</span></div>
+<div class="line"><a name="l00002"></a><span class="lineno">    2</span>&#160;<span class="comment">// Copyright © 2019-2023 Arm Ltd and Contributors. All rights reserved.</span></div>
+<div class="line"><a name="l00003"></a><span class="lineno">    3</span>&#160;<span class="comment">// SPDX-License-Identifier: MIT</span></div>
+<div class="line"><a name="l00004"></a><span class="lineno">    4</span>&#160;<span class="comment">//</span></div>
+<div class="line"><a name="l00005"></a><span class="lineno">    5</span>&#160; </div>
+<div class="line"><a name="l00006"></a><span class="lineno">    6</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_cl_quantized_lstm_workload_8hpp.html">ClQuantizedLstmWorkload.hpp</a>&quot;</span></div>
+<div class="line"><a name="l00007"></a><span class="lineno">    7</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_cl_workload_utils_8hpp.html">ClWorkloadUtils.hpp</a>&quot;</span></div>
+<div class="line"><a name="l00008"></a><span class="lineno">    8</span>&#160; </div>
+<div class="line"><a name="l00009"></a><span class="lineno">    9</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_tensor_handle_8hpp.html">armnn/backends/TensorHandle.hpp</a>&gt;</span></div>
+<div class="line"><a name="l00010"></a><span class="lineno">   10</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_arm_compute_tensor_utils_8hpp.html">aclCommon/ArmComputeTensorUtils.hpp</a>&gt;</span></div>
+<div class="line"><a name="l00011"></a><span class="lineno">   11</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_cl_tensor_handle_8hpp.html">cl/ClTensorHandle.hpp</a>&gt;</span></div>
+<div class="line"><a name="l00012"></a><span class="lineno">   12</span>&#160; </div>
+<div class="line"><a name="l00013"></a><span class="lineno">   13</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacearmnn.html">armnn</a></div>
+<div class="line"><a name="l00014"></a><span class="lineno">   14</span>&#160;{</div>
+<div class="line"><a name="l00015"></a><span class="lineno">   15</span>&#160; </div>
+<div class="line"><a name="l00016"></a><span class="lineno">   16</span>&#160;<span class="keyword">using namespace </span>armcomputetensorutils;</div>
+<div class="line"><a name="l00017"></a><span class="lineno">   17</span>&#160; </div>
+<div class="line"><a name="l00018"></a><span class="lineno"><a class="line" href="namespacearmnn.html#a5fb7fe07abfb2373103d842b47a24726">   18</a></span>&#160;<a class="code" href="namespacearmnn.html#a67a0db04d321a74b7e7fcfd3f1a3f70b">arm_compute::Status</a> <a class="code" href="namespacearmnn.html#a5fb7fe07abfb2373103d842b47a24726">ClQuantizedLstmWorkloadValidate</a>(<span class="keyword">const</span> <a class="code" href="classarmnn_1_1_tensor_info.html">TensorInfo</a>&amp; input, <span class="keyword">const</span> <a class="code" href="classarmnn_1_1_tensor_info.html">TensorInfo</a>&amp; previousCellStateIn,</div>
+<div class="line"><a name="l00019"></a><span class="lineno">   19</span>&#160;                                                    <span class="keyword">const</span> <a class="code" href="classarmnn_1_1_tensor_info.html">TensorInfo</a>&amp; previousOutputIn, <span class="keyword">const</span> <a class="code" href="classarmnn_1_1_tensor_info.html">TensorInfo</a>&amp; cellStateOut,</div>
+<div class="line"><a name="l00020"></a><span class="lineno">   20</span>&#160;                                                    <span class="keyword">const</span> <a class="code" href="classarmnn_1_1_tensor_info.html">TensorInfo</a>&amp; output,</div>
+<div class="line"><a name="l00021"></a><span class="lineno">   21</span>&#160;                                                    <span class="keyword">const</span> <a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html">QuantizedLstmInputParamsInfo</a>&amp; paramsInfo)</div>
+<div class="line"><a name="l00022"></a><span class="lineno">   22</span>&#160;{</div>
+<div class="line"><a name="l00023"></a><span class="lineno">   23</span>&#160;    <span class="comment">// Inputs</span></div>
+<div class="line"><a name="l00024"></a><span class="lineno">   24</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclInputInfo               = BuildArmComputeTensorInfo(input);</div>
+<div class="line"><a name="l00025"></a><span class="lineno">   25</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclPreviousCellStateInInfo = BuildArmComputeTensorInfo(previousCellStateIn);</div>
+<div class="line"><a name="l00026"></a><span class="lineno">   26</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclPreviousOutputInInfo    = BuildArmComputeTensorInfo(previousOutputIn);</div>
+<div class="line"><a name="l00027"></a><span class="lineno">   27</span>&#160; </div>
+<div class="line"><a name="l00028"></a><span class="lineno">   28</span>&#160;    <span class="comment">// Outputs</span></div>
+<div class="line"><a name="l00029"></a><span class="lineno">   29</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclCellStateOutInfo        = BuildArmComputeTensorInfo(cellStateOut);</div>
+<div class="line"><a name="l00030"></a><span class="lineno">   30</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclOutputInfo              = BuildArmComputeTensorInfo(output);</div>
+<div class="line"><a name="l00031"></a><span class="lineno">   31</span>&#160; </div>
+<div class="line"><a name="l00032"></a><span class="lineno">   32</span>&#160;    <span class="comment">// Basic parameters</span></div>
+<div class="line"><a name="l00033"></a><span class="lineno">   33</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclInputToInputWeightsInfo</div>
+<div class="line"><a name="l00034"></a><span class="lineno">   34</span>&#160;                                  = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#afa2b04197a764428a8c3a648de8058fc">GetInputToInputWeights</a>());</div>
+<div class="line"><a name="l00035"></a><span class="lineno">   35</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclInputToForgetWeightsInfo</div>
+<div class="line"><a name="l00036"></a><span class="lineno">   36</span>&#160;                                  = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#a7dac08f19a1b235d5256d39136848a09">GetInputToForgetWeights</a>());</div>
+<div class="line"><a name="l00037"></a><span class="lineno">   37</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclInputToCellWeightsInfo</div>
+<div class="line"><a name="l00038"></a><span class="lineno">   38</span>&#160;                                  = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#a3b3c26330a05bf4ea40f8a6b402be354">GetInputToCellWeights</a>());</div>
+<div class="line"><a name="l00039"></a><span class="lineno">   39</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclInputToOutputWeightsInfo</div>
+<div class="line"><a name="l00040"></a><span class="lineno">   40</span>&#160;                                  = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#a800adf0f61e84d706060f63037c1a336">GetInputToOutputWeights</a>());</div>
+<div class="line"><a name="l00041"></a><span class="lineno">   41</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclRecurrentToInputWeightsInfo</div>
+<div class="line"><a name="l00042"></a><span class="lineno">   42</span>&#160;                                  = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#ad159f9edbddeeb6cf6ff0ba042481ba8">GetRecurrentToInputWeights</a>());</div>
+<div class="line"><a name="l00043"></a><span class="lineno">   43</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclRecurrentToForgetWeightsInfo</div>
+<div class="line"><a name="l00044"></a><span class="lineno">   44</span>&#160;                                  = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#a534af7e4f3a6d50a6dab05abc245133d">GetRecurrentToForgetWeights</a>());</div>
+<div class="line"><a name="l00045"></a><span class="lineno">   45</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclRecurrentToCellWeightsInfo</div>
+<div class="line"><a name="l00046"></a><span class="lineno">   46</span>&#160;                                  = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#ae5bfdd423b16f990c1713ef9f91f947b">GetRecurrentToCellWeights</a>());</div>
+<div class="line"><a name="l00047"></a><span class="lineno">   47</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclRecurrentToOutputWeightsInfo</div>
+<div class="line"><a name="l00048"></a><span class="lineno">   48</span>&#160;                                  = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#afe4d25acd31b98dee6f6b28d4d756071">GetRecurrentToOutputWeights</a>());</div>
+<div class="line"><a name="l00049"></a><span class="lineno">   49</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclInputGateBiasInfo  = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#ae1d5a487fcd13852927c8a2b9f9dfeb6">GetInputGateBias</a>());</div>
+<div class="line"><a name="l00050"></a><span class="lineno">   50</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclForgetGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#ac81393ef433b0c7c337f9f0d55f41ae4">GetForgetGateBias</a>());</div>
+<div class="line"><a name="l00051"></a><span class="lineno">   51</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclCellBiasInfo       = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#ad5f4be37766b41f342dd196cb1c6e141">GetCellBias</a>());</div>
+<div class="line"><a name="l00052"></a><span class="lineno">   52</span>&#160;    <span class="keyword">const</span> arm_compute::TensorInfo aclOutputGateBiasInfo = BuildArmComputeTensorInfo(paramsInfo.<a class="code" href="structarmnn_1_1_quantized_lstm_input_params_info.html#ae0da94ba17ce67b95b5b9d6e5adc4271">GetOutputGateBias</a>());</div>
+<div class="line"><a name="l00053"></a><span class="lineno">   53</span>&#160; </div>
+<div class="line"><a name="l00054"></a><span class="lineno">   54</span>&#160;    <span class="keywordflow">return</span> arm_compute::CLLSTMLayerQuantized::validate(&amp;aclInputInfo, &amp;aclInputToInputWeightsInfo,</div>
+<div class="line"><a name="l00055"></a><span class="lineno">   55</span>&#160;                                                       &amp;aclInputToForgetWeightsInfo, &amp;aclInputToCellWeightsInfo,</div>
+<div class="line"><a name="l00056"></a><span class="lineno">   56</span>&#160;                                                       &amp;aclInputToOutputWeightsInfo, &amp;aclRecurrentToInputWeightsInfo,</div>
+<div class="line"><a name="l00057"></a><span class="lineno">   57</span>&#160;                                                       &amp;aclRecurrentToForgetWeightsInfo, &amp;aclRecurrentToCellWeightsInfo,</div>
+<div class="line"><a name="l00058"></a><span class="lineno">   58</span>&#160;                                                       &amp;aclRecurrentToOutputWeightsInfo, &amp;aclInputGateBiasInfo,</div>
+<div class="line"><a name="l00059"></a><span class="lineno">   59</span>&#160;                                                       &amp;aclForgetGateBiasInfo, &amp;aclCellBiasInfo, &amp;aclOutputGateBiasInfo,</div>
+<div class="line"><a name="l00060"></a><span class="lineno">   60</span>&#160;                                                       &amp;aclPreviousCellStateInInfo, &amp;aclPreviousOutputInInfo,</div>
+<div class="line"><a name="l00061"></a><span class="lineno">   61</span>&#160;                                                       &amp;aclCellStateOutInfo, &amp;aclOutputInfo);</div>
+<div class="line"><a name="l00062"></a><span class="lineno">   62</span>&#160;}</div>
+<div class="line"><a name="l00063"></a><span class="lineno">   63</span>&#160; </div>
+<div class="line"><a name="l00064"></a><span class="lineno"><a class="line" href="classarmnn_1_1_cl_quantized_lstm_workload.html#a004528fc09cdcdec10388f011fbfc184">   64</a></span>&#160;<a class="code" href="classarmnn_1_1_cl_quantized_lstm_workload.html#a004528fc09cdcdec10388f011fbfc184">ClQuantizedLstmWorkload::ClQuantizedLstmWorkload</a>(<span class="keyword">const</span> <a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html">QuantizedLstmQueueDescriptor</a> &amp;descriptor,</div>
+<div class="line"><a name="l00065"></a><span class="lineno">   65</span>&#160;                                                 <span class="keyword">const</span> <a class="code" href="structarmnn_1_1_workload_info.html">WorkloadInfo</a> &amp;info,</div>
+<div class="line"><a name="l00066"></a><span class="lineno">   66</span>&#160;                                                 <span class="keyword">const</span> arm_compute::CLCompileContext&amp; clCompileContext)</div>
+<div class="line"><a name="l00067"></a><span class="lineno">   67</span>&#160;                                                 : <a class="code" href="classarmnn_1_1_cl_base_workload.html">ClBaseWorkload</a>&lt;<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html">QuantizedLstmQueueDescriptor</a>&gt;(descriptor, <a class="code" href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">info</a>)</div>
+<div class="line"><a name="l00068"></a><span class="lineno">   68</span>&#160;{</div>
+<div class="line"><a name="l00069"></a><span class="lineno">   69</span>&#160;    m_InputToInputWeightsTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00070"></a><span class="lineno">   70</span>&#160;    BuildArmComputeTensor(*m_InputToInputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#ab160eba2493d5fe52185c0986dcb190c">m_InputToInputWeights</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00071"></a><span class="lineno">   71</span>&#160; </div>
+<div class="line"><a name="l00072"></a><span class="lineno">   72</span>&#160;    m_InputToForgetWeightsTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00073"></a><span class="lineno">   73</span>&#160;    BuildArmComputeTensor(*m_InputToForgetWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#aab77f54a037658ca9b2bf9cc8a1fadf1">m_InputToForgetWeights</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00074"></a><span class="lineno">   74</span>&#160; </div>
+<div class="line"><a name="l00075"></a><span class="lineno">   75</span>&#160;    m_InputToCellWeightsTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00076"></a><span class="lineno">   76</span>&#160;    BuildArmComputeTensor(*m_InputToCellWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a841439e3b8dc157a7368b19c9ecb7d03">m_InputToCellWeights</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00077"></a><span class="lineno">   77</span>&#160; </div>
+<div class="line"><a name="l00078"></a><span class="lineno">   78</span>&#160;    m_InputToOutputWeightsTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00079"></a><span class="lineno">   79</span>&#160;    BuildArmComputeTensor(*m_InputToOutputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a17ba1c8bcc71a55a95b2a3913f8cb203">m_InputToOutputWeights</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00080"></a><span class="lineno">   80</span>&#160; </div>
+<div class="line"><a name="l00081"></a><span class="lineno">   81</span>&#160;    m_RecurrentToInputWeightsTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00082"></a><span class="lineno">   82</span>&#160;    BuildArmComputeTensor(*m_RecurrentToInputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a299587d4f3fca029492700f3e2585bd8">m_RecurrentToInputWeights</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00083"></a><span class="lineno">   83</span>&#160; </div>
+<div class="line"><a name="l00084"></a><span class="lineno">   84</span>&#160;    m_RecurrentToForgetWeightsTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00085"></a><span class="lineno">   85</span>&#160;    BuildArmComputeTensor(*m_RecurrentToForgetWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#adf8571dd1867ee91082bd005f94f2610">m_RecurrentToForgetWeights</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00086"></a><span class="lineno">   86</span>&#160; </div>
+<div class="line"><a name="l00087"></a><span class="lineno">   87</span>&#160;    m_RecurrentToCellWeightsTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00088"></a><span class="lineno">   88</span>&#160;    BuildArmComputeTensor(*m_RecurrentToCellWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#ac18c8b8b2039267d8282e91b4162d8aa">m_RecurrentToCellWeights</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00089"></a><span class="lineno">   89</span>&#160; </div>
+<div class="line"><a name="l00090"></a><span class="lineno">   90</span>&#160;    m_RecurrentToOutputWeightsTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00091"></a><span class="lineno">   91</span>&#160;    BuildArmComputeTensor(*m_RecurrentToOutputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a4c27716f61bb68e8ea0bd4e8389ba01a">m_RecurrentToOutputWeights</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00092"></a><span class="lineno">   92</span>&#160; </div>
+<div class="line"><a name="l00093"></a><span class="lineno">   93</span>&#160;    m_InputGateBiasTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00094"></a><span class="lineno">   94</span>&#160;    BuildArmComputeTensor(*m_InputGateBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a44eb7524badcca9b2073359e3814c98b">m_InputGateBias</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00095"></a><span class="lineno">   95</span>&#160; </div>
+<div class="line"><a name="l00096"></a><span class="lineno">   96</span>&#160;    m_ForgetGateBiasTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00097"></a><span class="lineno">   97</span>&#160;    BuildArmComputeTensor(*m_ForgetGateBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a29fa293fffbf9c6f00cd75db1dc0a52a">m_ForgetGateBias</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00098"></a><span class="lineno">   98</span>&#160; </div>
+<div class="line"><a name="l00099"></a><span class="lineno">   99</span>&#160;    m_CellBiasTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00100"></a><span class="lineno">  100</span>&#160;    BuildArmComputeTensor(*m_CellBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a6e9593869b82984de198fed27f72cdcf">m_CellBias</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00101"></a><span class="lineno">  101</span>&#160; </div>
+<div class="line"><a name="l00102"></a><span class="lineno">  102</span>&#160;    m_OutputGateBiasTensor = std::make_unique&lt;arm_compute::CLTensor&gt;();</div>
+<div class="line"><a name="l00103"></a><span class="lineno">  103</span>&#160;    BuildArmComputeTensor(*m_OutputGateBiasTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a5ff4158b1b363b898d0da04c42d37ce0">m_OutputGateBias</a>-&gt;<a class="code" href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">GetTensorInfo</a>());</div>
+<div class="line"><a name="l00104"></a><span class="lineno">  104</span>&#160; </div>
+<div class="line"><a name="l00105"></a><span class="lineno">  105</span>&#160;    <span class="keyword">const</span> arm_compute::ICLTensor&amp; inputTensor         = <span class="keyword">static_cast&lt;</span><a class="code" href="classarmnn_1_1_i_cl_tensor_handle.html">IClTensorHandle</a>*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[0])-&gt;GetTensor();</div>
+<div class="line"><a name="l00106"></a><span class="lineno">  106</span>&#160;    arm_compute::ICLTensor&amp; cellStateInTensor         = <span class="keyword">static_cast&lt;</span><a class="code" href="classarmnn_1_1_i_cl_tensor_handle.html">IClTensorHandle</a>*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[1])-&gt;GetTensor();</div>
+<div class="line"><a name="l00107"></a><span class="lineno">  107</span>&#160;    <span class="keyword">const</span> arm_compute::ICLTensor&amp; outputStateInTensor = <span class="keyword">static_cast&lt;</span><a class="code" href="classarmnn_1_1_i_cl_tensor_handle.html">IClTensorHandle</a>*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">m_Inputs</a>[2])-&gt;GetTensor();</div>
+<div class="line"><a name="l00108"></a><span class="lineno">  108</span>&#160; </div>
+<div class="line"><a name="l00109"></a><span class="lineno">  109</span>&#160;    arm_compute::ICLTensor&amp; cellStateOutTensor        = <span class="keyword">static_cast&lt;</span><a class="code" href="classarmnn_1_1_i_cl_tensor_handle.html">IClTensorHandle</a>*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">m_Outputs</a>[0])-&gt;GetTensor();</div>
+<div class="line"><a name="l00110"></a><span class="lineno">  110</span>&#160;    arm_compute::ICLTensor&amp; outputStateOutTensor      = <span class="keyword">static_cast&lt;</span><a class="code" href="classarmnn_1_1_i_cl_tensor_handle.html">IClTensorHandle</a>*<span class="keyword">&gt;</span>(<a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">m_Outputs</a>[1])-&gt;GetTensor();</div>
+<div class="line"><a name="l00111"></a><span class="lineno">  111</span>&#160; </div>
+<div class="line"><a name="l00112"></a><span class="lineno">  112</span>&#160;    {</div>
+<div class="line"><a name="l00113"></a><span class="lineno">  113</span>&#160;        <a class="code" href="_cl_workload_utils_8hpp.html#a2d57ef1645138f5f8a6dbd2ce92dc072">ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID</a>(<span class="stringliteral">&quot;ClQuantizedLstmWorkload_configure&quot;</span>);</div>
+<div class="line"><a name="l00114"></a><span class="lineno">  114</span>&#160;        m_QuantizedLstmLayer.configure(clCompileContext, &amp;inputTensor, m_InputToInputWeightsTensor.get(),</div>
+<div class="line"><a name="l00115"></a><span class="lineno">  115</span>&#160;                                       m_InputToForgetWeightsTensor.get(),</div>
+<div class="line"><a name="l00116"></a><span class="lineno">  116</span>&#160;                                       m_InputToCellWeightsTensor.get(), m_InputToOutputWeightsTensor.get(),</div>
+<div class="line"><a name="l00117"></a><span class="lineno">  117</span>&#160;                                       m_RecurrentToInputWeightsTensor.get(), m_RecurrentToForgetWeightsTensor.get(),</div>
+<div class="line"><a name="l00118"></a><span class="lineno">  118</span>&#160;                                       m_RecurrentToCellWeightsTensor.get(), m_RecurrentToOutputWeightsTensor.get(),</div>
+<div class="line"><a name="l00119"></a><span class="lineno">  119</span>&#160;                                       m_InputGateBiasTensor.get(), m_ForgetGateBiasTensor.get(),</div>
+<div class="line"><a name="l00120"></a><span class="lineno">  120</span>&#160;                                       m_CellBiasTensor.get(),</div>
+<div class="line"><a name="l00121"></a><span class="lineno">  121</span>&#160;                                       m_OutputGateBiasTensor.get(), &amp;cellStateInTensor, &amp;outputStateInTensor,</div>
+<div class="line"><a name="l00122"></a><span class="lineno">  122</span>&#160;                                       &amp;cellStateOutTensor, &amp;outputStateOutTensor);</div>
+<div class="line"><a name="l00123"></a><span class="lineno">  123</span>&#160;    }</div>
+<div class="line"><a name="l00124"></a><span class="lineno">  124</span>&#160; </div>
+<div class="line"><a name="l00125"></a><span class="lineno">  125</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_InputToInputWeightsTensor,      <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#ab160eba2493d5fe52185c0986dcb190c">m_InputToInputWeights</a>);</div>
+<div class="line"><a name="l00126"></a><span class="lineno">  126</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_InputToForgetWeightsTensor,     <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#aab77f54a037658ca9b2bf9cc8a1fadf1">m_InputToForgetWeights</a>);</div>
+<div class="line"><a name="l00127"></a><span class="lineno">  127</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_InputToCellWeightsTensor,       <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a841439e3b8dc157a7368b19c9ecb7d03">m_InputToCellWeights</a>);</div>
+<div class="line"><a name="l00128"></a><span class="lineno">  128</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_InputToOutputWeightsTensor,     <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a17ba1c8bcc71a55a95b2a3913f8cb203">m_InputToOutputWeights</a>);</div>
+<div class="line"><a name="l00129"></a><span class="lineno">  129</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_RecurrentToInputWeightsTensor,  <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a299587d4f3fca029492700f3e2585bd8">m_RecurrentToInputWeights</a>);</div>
+<div class="line"><a name="l00130"></a><span class="lineno">  130</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_RecurrentToForgetWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#adf8571dd1867ee91082bd005f94f2610">m_RecurrentToForgetWeights</a>);</div>
+<div class="line"><a name="l00131"></a><span class="lineno">  131</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_RecurrentToCellWeightsTensor,   <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#ac18c8b8b2039267d8282e91b4162d8aa">m_RecurrentToCellWeights</a>);</div>
+<div class="line"><a name="l00132"></a><span class="lineno">  132</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_RecurrentToOutputWeightsTensor, <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a4c27716f61bb68e8ea0bd4e8389ba01a">m_RecurrentToOutputWeights</a>);</div>
+<div class="line"><a name="l00133"></a><span class="lineno">  133</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_InputGateBiasTensor,            <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a44eb7524badcca9b2073359e3814c98b">m_InputGateBias</a>);</div>
+<div class="line"><a name="l00134"></a><span class="lineno">  134</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_ForgetGateBiasTensor,           <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a29fa293fffbf9c6f00cd75db1dc0a52a">m_ForgetGateBias</a>);</div>
+<div class="line"><a name="l00135"></a><span class="lineno">  135</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_CellBiasTensor,                 <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a6e9593869b82984de198fed27f72cdcf">m_CellBias</a>);</div>
+<div class="line"><a name="l00136"></a><span class="lineno">  136</span>&#160;    <a class="code" href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">InitializeArmComputeClTensorData</a>(*m_OutputGateBiasTensor,           <a class="code" href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">m_Data</a>.<a class="code" href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a5ff4158b1b363b898d0da04c42d37ce0">m_OutputGateBias</a>);</div>
+<div class="line"><a name="l00137"></a><span class="lineno">  137</span>&#160; </div>
+<div class="line"><a name="l00138"></a><span class="lineno">  138</span>&#160;    m_QuantizedLstmLayer.prepare();</div>
+<div class="line"><a name="l00139"></a><span class="lineno">  139</span>&#160;    FreeUnusedTensors();</div>
+<div class="line"><a name="l00140"></a><span class="lineno">  140</span>&#160;}</div>
+<div class="line"><a name="l00141"></a><span class="lineno">  141</span>&#160; </div>
+<div class="line"><a name="l00142"></a><span class="lineno"><a class="line" href="classarmnn_1_1_cl_quantized_lstm_workload.html#ae071e8822437c78baea75c3aef3a263a">  142</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="classarmnn_1_1_cl_quantized_lstm_workload.html#ae071e8822437c78baea75c3aef3a263a">ClQuantizedLstmWorkload::Execute</a>()<span class="keyword"> const</span></div>
+<div class="line"><a name="l00143"></a><span class="lineno">  143</span>&#160;<span class="keyword"></span>{</div>
+<div class="line"><a name="l00144"></a><span class="lineno">  144</span>&#160;    <a class="code" href="_cl_workload_utils_8hpp.html#a2d57ef1645138f5f8a6dbd2ce92dc072">ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID</a>(<span class="stringliteral">&quot;ClQuantizedLstmWorkload_Execute&quot;</span>);</div>
+<div class="line"><a name="l00145"></a><span class="lineno">  145</span>&#160;    <a class="code" href="namespacearmnn.html#aff5bee79757341daf750c7dd7c123a15">RunClFunction</a>(m_QuantizedLstmLayer, <a class="code" href="_exceptions_8hpp.html#aa3be76aec4ce713822a5ea1ecbb7bc61">CHECK_LOCATION</a>());</div>
+<div class="line"><a name="l00146"></a><span class="lineno">  146</span>&#160;}</div>
+<div class="line"><a name="l00147"></a><span class="lineno">  147</span>&#160; </div>
+<div class="line"><a name="l00148"></a><span class="lineno">  148</span>&#160;<span class="keywordtype">void</span> ClQuantizedLstmWorkload::FreeUnusedTensors()</div>
+<div class="line"><a name="l00149"></a><span class="lineno">  149</span>&#160;{</div>
+<div class="line"><a name="l00150"></a><span class="lineno">  150</span>&#160;    FreeTensorIfUnused(m_InputToInputWeightsTensor);</div>
+<div class="line"><a name="l00151"></a><span class="lineno">  151</span>&#160;    FreeTensorIfUnused(m_InputToForgetWeightsTensor);</div>
+<div class="line"><a name="l00152"></a><span class="lineno">  152</span>&#160;    FreeTensorIfUnused(m_InputToCellWeightsTensor);</div>
+<div class="line"><a name="l00153"></a><span class="lineno">  153</span>&#160;    FreeTensorIfUnused(m_InputToOutputWeightsTensor);</div>
+<div class="line"><a name="l00154"></a><span class="lineno">  154</span>&#160;    FreeTensorIfUnused(m_RecurrentToInputWeightsTensor);</div>
+<div class="line"><a name="l00155"></a><span class="lineno">  155</span>&#160;    FreeTensorIfUnused(m_RecurrentToForgetWeightsTensor);</div>
+<div class="line"><a name="l00156"></a><span class="lineno">  156</span>&#160;    FreeTensorIfUnused(m_RecurrentToCellWeightsTensor);</div>
+<div class="line"><a name="l00157"></a><span class="lineno">  157</span>&#160;    FreeTensorIfUnused(m_RecurrentToOutputWeightsTensor);</div>
+<div class="line"><a name="l00158"></a><span class="lineno">  158</span>&#160;    FreeTensorIfUnused(m_InputGateBiasTensor);</div>
+<div class="line"><a name="l00159"></a><span class="lineno">  159</span>&#160;    FreeTensorIfUnused(m_ForgetGateBiasTensor);</div>
+<div class="line"><a name="l00160"></a><span class="lineno">  160</span>&#160;    FreeTensorIfUnused(m_CellBiasTensor);</div>
+<div class="line"><a name="l00161"></a><span class="lineno">  161</span>&#160;    FreeTensorIfUnused(m_OutputGateBiasTensor);</div>
+<div class="line"><a name="l00162"></a><span class="lineno">  162</span>&#160;}</div>
+<div class="line"><a name="l00163"></a><span class="lineno">  163</span>&#160; </div>
+<div class="line"><a name="l00164"></a><span class="lineno">  164</span>&#160;} <span class="comment">// namespace armnn</span></div>
+</div><!-- fragment --></div><!-- contents -->
+</div><!-- doc-content -->
+<div class="ttc" id="anamespacearmnn_html_aff5bee79757341daf750c7dd7c123a15"><div class="ttname"><a href="namespacearmnn.html#aff5bee79757341daf750c7dd7c123a15">armnn::RunClFunction</a></div><div class="ttdeci">void RunClFunction(arm_compute::IFunction &amp;function, const CheckLocation &amp;location)</div><div class="ttdef"><b>Definition:</b> <a href="_cl_workload_utils_8hpp_source.html#l00168">ClWorkloadUtils.hpp:168</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_a6e9593869b82984de198fed27f72cdcf"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a6e9593869b82984de198fed27f72cdcf">armnn::QuantizedLstmQueueDescriptor::m_CellBias</a></div><div class="ttdeci">const ConstTensorHandle * m_CellBias</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00640">WorkloadData.hpp:640</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_cl_quantized_lstm_workload_html_a004528fc09cdcdec10388f011fbfc184"><div class="ttname"><a href="classarmnn_1_1_cl_quantized_lstm_workload.html#a004528fc09cdcdec10388f011fbfc184">armnn::ClQuantizedLstmWorkload::ClQuantizedLstmWorkload</a></div><div class="ttdeci">ClQuantizedLstmWorkload(const QuantizedLstmQueueDescriptor &amp;descriptor, const WorkloadInfo &amp;info, const arm_compute::CLCompileContext &amp;clCompileContext)</div><div class="ttdef"><b>Definition:</b> <a href="_cl_quantized_lstm_workload_8cpp_source.html#l00064">ClQuantizedLstmWorkload.cpp:64</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_ac81393ef433b0c7c337f9f0d55f41ae4"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#ac81393ef433b0c7c337f9f0d55f41ae4">armnn::QuantizedLstmInputParamsInfo::GetForgetGateBias</a></div><div class="ttdeci">const TensorInfo &amp; GetForgetGateBias() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00203">QuantizedLstmParams.hpp:203</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_a299587d4f3fca029492700f3e2585bd8"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a299587d4f3fca029492700f3e2585bd8">armnn::QuantizedLstmQueueDescriptor::m_RecurrentToInputWeights</a></div><div class="ttdeci">const ConstTensorHandle * m_RecurrentToInputWeights</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00633">WorkloadData.hpp:633</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_a7dac08f19a1b235d5256d39136848a09"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#a7dac08f19a1b235d5256d39136848a09">armnn::QuantizedLstmInputParamsInfo::GetInputToForgetWeights</a></div><div class="ttdeci">const TensorInfo &amp; GetInputToForgetWeights() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00169">QuantizedLstmParams.hpp:169</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_aab77f54a037658ca9b2bf9cc8a1fadf1"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#aab77f54a037658ca9b2bf9cc8a1fadf1">armnn::QuantizedLstmQueueDescriptor::m_InputToForgetWeights</a></div><div class="ttdeci">const ConstTensorHandle * m_InputToForgetWeights</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00629">WorkloadData.hpp:629</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_a534af7e4f3a6d50a6dab05abc245133d"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#a534af7e4f3a6d50a6dab05abc245133d">armnn::QuantizedLstmInputParamsInfo::GetRecurrentToForgetWeights</a></div><div class="ttdeci">const TensorInfo &amp; GetRecurrentToForgetWeights() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00186">QuantizedLstmParams.hpp:186</a></div></div>
+<div class="ttc" id="a_cl_quantized_lstm_workload_8hpp_html"><div class="ttname"><a href="_cl_quantized_lstm_workload_8hpp.html">ClQuantizedLstmWorkload.hpp</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_tensor_info_html"><div class="ttname"><a href="classarmnn_1_1_tensor_info.html">armnn::TensorInfo</a></div><div class="ttdef"><b>Definition:</b> <a href="_tensor_8hpp_source.html#l00152">Tensor.hpp:152</a></div></div>
+<div class="ttc" id="a_exceptions_8hpp_html_aa3be76aec4ce713822a5ea1ecbb7bc61"><div class="ttname"><a href="_exceptions_8hpp.html#aa3be76aec4ce713822a5ea1ecbb7bc61">CHECK_LOCATION</a></div><div class="ttdeci">#define CHECK_LOCATION()</div><div class="ttdef"><b>Definition:</b> <a href="_exceptions_8hpp_source.html#l00203">Exceptions.hpp:203</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_cl_base_workload_html"><div class="ttname"><a href="classarmnn_1_1_cl_base_workload.html">armnn::ClBaseWorkload</a></div><div class="ttdef"><b>Definition:</b> <a href="_cl_base_workload_8hpp_source.html#l00013">ClBaseWorkload.hpp:13</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_a4c27716f61bb68e8ea0bd4e8389ba01a"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a4c27716f61bb68e8ea0bd4e8389ba01a">armnn::QuantizedLstmQueueDescriptor::m_RecurrentToOutputWeights</a></div><div class="ttdeci">const ConstTensorHandle * m_RecurrentToOutputWeights</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00636">WorkloadData.hpp:636</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_a44eb7524badcca9b2073359e3814c98b"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a44eb7524badcca9b2073359e3814c98b">armnn::QuantizedLstmQueueDescriptor::m_InputGateBias</a></div><div class="ttdeci">const ConstTensorHandle * m_InputGateBias</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00638">WorkloadData.hpp:638</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_const_tensor_handle_html_a66e8f43a5b42b500871ed96e15419567"><div class="ttname"><a href="classarmnn_1_1_const_tensor_handle.html#a66e8f43a5b42b500871ed96e15419567">armnn::ConstTensorHandle::GetTensorInfo</a></div><div class="ttdeci">const TensorInfo &amp; GetTensorInfo() const</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_handle_8hpp_source.html#l00040">TensorHandle.hpp:40</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_ae1d5a487fcd13852927c8a2b9f9dfeb6"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#ae1d5a487fcd13852927c8a2b9f9dfeb6">armnn::QuantizedLstmInputParamsInfo::GetInputGateBias</a></div><div class="ttdeci">const TensorInfo &amp; GetInputGateBias() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00199">QuantizedLstmParams.hpp:199</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html">armnn::QuantizedLstmInputParamsInfo</a></div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00119">QuantizedLstmParams.hpp:119</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_ac18c8b8b2039267d8282e91b4162d8aa"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#ac18c8b8b2039267d8282e91b4162d8aa">armnn::QuantizedLstmQueueDescriptor::m_RecurrentToCellWeights</a></div><div class="ttdeci">const ConstTensorHandle * m_RecurrentToCellWeights</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00635">WorkloadData.hpp:635</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_afe4d25acd31b98dee6f6b28d4d756071"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#afe4d25acd31b98dee6f6b28d4d756071">armnn::QuantizedLstmInputParamsInfo::GetRecurrentToOutputWeights</a></div><div class="ttdeci">const TensorInfo &amp; GetRecurrentToOutputWeights() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00194">QuantizedLstmParams.hpp:194</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_afa2b04197a764428a8c3a648de8058fc"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#afa2b04197a764428a8c3a648de8058fc">armnn::QuantizedLstmInputParamsInfo::GetInputToInputWeights</a></div><div class="ttdeci">const TensorInfo &amp; GetInputToInputWeights() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00165">QuantizedLstmParams.hpp:165</a></div></div>
+<div class="ttc" id="a_cl_workload_utils_8hpp_html_a2d57ef1645138f5f8a6dbd2ce92dc072"><div class="ttname"><a href="_cl_workload_utils_8hpp.html#a2d57ef1645138f5f8a6dbd2ce92dc072">ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID</a></div><div class="ttdeci">#define ARMNN_SCOPED_PROFILING_EVENT_CL_NAME_GUID(label)</div><div class="ttdoc">Creates a profiling event that uses GetGuid() and GetName() from the calling class.</div><div class="ttdef"><b>Definition:</b> <a href="_cl_workload_utils_8hpp_source.html#l00036">ClWorkloadUtils.hpp:36</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_ad5f4be37766b41f342dd196cb1c6e141"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#ad5f4be37766b41f342dd196cb1c6e141">armnn::QuantizedLstmInputParamsInfo::GetCellBias</a></div><div class="ttdeci">const TensorInfo &amp; GetCellBias() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00207">QuantizedLstmParams.hpp:207</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_ab160eba2493d5fe52185c0986dcb190c"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#ab160eba2493d5fe52185c0986dcb190c">armnn::QuantizedLstmQueueDescriptor::m_InputToInputWeights</a></div><div class="ttdeci">const ConstTensorHandle * m_InputToInputWeights</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00628">WorkloadData.hpp:628</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_workload_info_html"><div class="ttname"><a href="structarmnn_1_1_workload_info.html">armnn::WorkloadInfo</a></div><div class="ttdoc">Contains information about TensorInfos of a layer.</div><div class="ttdef"><b>Definition:</b> <a href="_workload_info_8hpp_source.html#l00016">WorkloadInfo.hpp:16</a></div></div>
+<div class="ttc" id="a_cl_workload_utils_8hpp_html"><div class="ttname"><a href="_cl_workload_utils_8hpp.html">ClWorkloadUtils.hpp</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_adf8571dd1867ee91082bd005f94f2610"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#adf8571dd1867ee91082bd005f94f2610">armnn::QuantizedLstmQueueDescriptor::m_RecurrentToForgetWeights</a></div><div class="ttdeci">const ConstTensorHandle * m_RecurrentToForgetWeights</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00634">WorkloadData.hpp:634</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_ae5bfdd423b16f990c1713ef9f91f947b"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#ae5bfdd423b16f990c1713ef9f91f947b">armnn::QuantizedLstmInputParamsInfo::GetRecurrentToCellWeights</a></div><div class="ttdeci">const TensorInfo &amp; GetRecurrentToCellWeights() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00190">QuantizedLstmParams.hpp:190</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_a841439e3b8dc157a7368b19c9ecb7d03"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a841439e3b8dc157a7368b19c9ecb7d03">armnn::QuantizedLstmQueueDescriptor::m_InputToCellWeights</a></div><div class="ttdeci">const ConstTensorHandle * m_InputToCellWeights</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00630">WorkloadData.hpp:630</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_a800adf0f61e84d706060f63037c1a336"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#a800adf0f61e84d706060f63037c1a336">armnn::QuantizedLstmInputParamsInfo::GetInputToOutputWeights</a></div><div class="ttdeci">const TensorInfo &amp; GetInputToOutputWeights() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00177">QuantizedLstmParams.hpp:177</a></div></div>
+<div class="ttc" id="anamespacearmnn_html_a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c"><div class="ttname"><a href="namespacearmnn.html#a4dc0adc6737b5944e7671bee71788407acaf9b6b99962bf5c2264824231d7a40c">armnn::BoostLogSeverityMapping::info</a></div><div class="ttdeci">@ info</div></div>
+<div class="ttc" id="anamespacearmnn_html_a0eec4a463a166fad55307d9f26ba3a68"><div class="ttname"><a href="namespacearmnn.html#a0eec4a463a166fad55307d9f26ba3a68">armnn::InitializeArmComputeClTensorData</a></div><div class="ttdeci">void InitializeArmComputeClTensorData(arm_compute::CLTensor &amp;clTensor, const ConstTensorHandle *handle)</div><div class="ttdef"><b>Definition:</b> <a href="_cl_workload_utils_8hpp_source.html#l00124">ClWorkloadUtils.hpp:124</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_queue_descriptor_html_a6abd491bb99ffe88bd472c1ae5a1ed1a"><div class="ttname"><a href="structarmnn_1_1_queue_descriptor.html#a6abd491bb99ffe88bd472c1ae5a1ed1a">armnn::QueueDescriptor::m_Outputs</a></div><div class="ttdeci">std::vector&lt; ITensorHandle * &gt; m_Outputs</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00027">WorkloadData.hpp:27</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_a17ba1c8bcc71a55a95b2a3913f8cb203"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a17ba1c8bcc71a55a95b2a3913f8cb203">armnn::QuantizedLstmQueueDescriptor::m_InputToOutputWeights</a></div><div class="ttdeci">const ConstTensorHandle * m_InputToOutputWeights</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00631">WorkloadData.hpp:631</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_cl_quantized_lstm_workload_html_ae071e8822437c78baea75c3aef3a263a"><div class="ttname"><a href="classarmnn_1_1_cl_quantized_lstm_workload.html#ae071e8822437c78baea75c3aef3a263a">armnn::ClQuantizedLstmWorkload::Execute</a></div><div class="ttdeci">void Execute() const override</div><div class="ttdef"><b>Definition:</b> <a href="_cl_quantized_lstm_workload_8cpp_source.html#l00142">ClQuantizedLstmWorkload.cpp:142</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html">armnn::QuantizedLstmQueueDescriptor</a></div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00609">WorkloadData.hpp:609</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_i_cl_tensor_handle_html"><div class="ttname"><a href="classarmnn_1_1_i_cl_tensor_handle.html">armnn::IClTensorHandle</a></div><div class="ttdef"><b>Definition:</b> <a href="_i_cl_tensor_handle_8hpp_source.html#l00013">IClTensorHandle.hpp:13</a></div></div>
+<div class="ttc" id="a_tensor_handle_8hpp_html"><div class="ttname"><a href="_tensor_handle_8hpp.html">TensorHandle.hpp</a></div></div>
+<div class="ttc" id="anamespacearmnn_html_a67a0db04d321a74b7e7fcfd3f1a3f70b"><div class="ttname"><a href="namespacearmnn.html#a67a0db04d321a74b7e7fcfd3f1a3f70b">armnn::Status</a></div><div class="ttdeci">Status</div><div class="ttdef"><b>Definition:</b> <a href="_types_8hpp_source.html#l00042">Types.hpp:42</a></div></div>
+<div class="ttc" id="a_cl_tensor_handle_8hpp_html"><div class="ttname"><a href="_cl_tensor_handle_8hpp.html">ClTensorHandle.hpp</a></div></div>
+<div class="ttc" id="aclassarmnn_1_1_base_workload_html_afb8d2c8817c75de9d01a4c0e0d5c160b"><div class="ttname"><a href="classarmnn_1_1_base_workload.html#afb8d2c8817c75de9d01a4c0e0d5c160b">armnn::BaseWorkload&lt; QuantizedLstmQueueDescriptor &gt;::m_Data</a></div><div class="ttdeci">QuantizedLstmQueueDescriptor m_Data</div><div class="ttdef"><b>Definition:</b> <a href="_workload_8hpp_source.html#l00089">Workload.hpp:89</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_ae0da94ba17ce67b95b5b9d6e5adc4271"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#ae0da94ba17ce67b95b5b9d6e5adc4271">armnn::QuantizedLstmInputParamsInfo::GetOutputGateBias</a></div><div class="ttdeci">const TensorInfo &amp; GetOutputGateBias() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00211">QuantizedLstmParams.hpp:211</a></div></div>
+<div class="ttc" id="anamespacearmnn_html_a5fb7fe07abfb2373103d842b47a24726"><div class="ttname"><a href="namespacearmnn.html#a5fb7fe07abfb2373103d842b47a24726">armnn::ClQuantizedLstmWorkloadValidate</a></div><div class="ttdeci">arm_compute::Status ClQuantizedLstmWorkloadValidate(const TensorInfo &amp;input, const TensorInfo &amp;previousCellStateIn, const TensorInfo &amp;previousOutputIn, const TensorInfo &amp;cellStateOut, const TensorInfo &amp;output, const QuantizedLstmInputParamsInfo &amp;paramsInfo)</div><div class="ttdef"><b>Definition:</b> <a href="_cl_quantized_lstm_workload_8cpp_source.html#l00018">ClQuantizedLstmWorkload.cpp:18</a></div></div>
+<div class="ttc" id="anamespacearmnn_html"><div class="ttname"><a href="namespacearmnn.html">armnn</a></div><div class="ttdoc">Copyright (c) 2021 ARM Limited and Contributors.</div><div class="ttdef"><b>Definition:</b> <a href="01__00__quick__start_8dox_source.html#l00006">01_00_quick_start.dox:6</a></div></div>
+<div class="ttc" id="a_arm_compute_tensor_utils_8hpp_html"><div class="ttname"><a href="_arm_compute_tensor_utils_8hpp.html">ArmComputeTensorUtils.hpp</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_a3b3c26330a05bf4ea40f8a6b402be354"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#a3b3c26330a05bf4ea40f8a6b402be354">armnn::QuantizedLstmInputParamsInfo::GetInputToCellWeights</a></div><div class="ttdeci">const TensorInfo &amp; GetInputToCellWeights() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00173">QuantizedLstmParams.hpp:173</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_a5ff4158b1b363b898d0da04c42d37ce0"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a5ff4158b1b363b898d0da04c42d37ce0">armnn::QuantizedLstmQueueDescriptor::m_OutputGateBias</a></div><div class="ttdeci">const ConstTensorHandle * m_OutputGateBias</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00641">WorkloadData.hpp:641</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_input_params_info_html_ad159f9edbddeeb6cf6ff0ba042481ba8"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_input_params_info.html#ad159f9edbddeeb6cf6ff0ba042481ba8">armnn::QuantizedLstmInputParamsInfo::GetRecurrentToInputWeights</a></div><div class="ttdeci">const TensorInfo &amp; GetRecurrentToInputWeights() const</div><div class="ttdef"><b>Definition:</b> <a href="_quantized_lstm_params_8hpp_source.html#l00182">QuantizedLstmParams.hpp:182</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_queue_descriptor_html_a4b50e46a6810018f3edecfb68b2a76b3"><div class="ttname"><a href="structarmnn_1_1_queue_descriptor.html#a4b50e46a6810018f3edecfb68b2a76b3">armnn::QueueDescriptor::m_Inputs</a></div><div class="ttdeci">std::vector&lt; ITensorHandle * &gt; m_Inputs</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00026">WorkloadData.hpp:26</a></div></div>
+<div class="ttc" id="astructarmnn_1_1_quantized_lstm_queue_descriptor_html_a29fa293fffbf9c6f00cd75db1dc0a52a"><div class="ttname"><a href="structarmnn_1_1_quantized_lstm_queue_descriptor.html#a29fa293fffbf9c6f00cd75db1dc0a52a">armnn::QuantizedLstmQueueDescriptor::m_ForgetGateBias</a></div><div class="ttdeci">const ConstTensorHandle * m_ForgetGateBias</div><div class="ttdef"><b>Definition:</b> <a href="_workload_data_8hpp_source.html#l00639">WorkloadData.hpp:639</a></div></div>
+<!-- start footer part -->
+<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
+  <ul>
+    <li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.html">src</a></li><li class="navelem"><a class="el" href="dir_0f3cdec46afbc61a1ded8e1687c9c9a0.html">backends</a></li><li class="navelem"><a class="el" href="dir_1ad86c6d39ab715a831555571b9e98a5.html">cl</a></li><li class="navelem"><a class="el" href="dir_2d9c087bc7f49a1d7a25fdc615d2f0c9.html">workloads</a></li><li class="navelem"><a class="el" href="_cl_quantized_lstm_workload_8cpp.html">ClQuantizedLstmWorkload.cpp</a></li>
+    <li class="footer">Generated on Tue Aug 22 2023 11:36:59 for Arm NN by
+    <a href="http://www.doxygen.org/index.html">
+    <img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.17 </li>
+  </ul>
+</div>
+</body>
+</html>