blob: 280b7d1fdadc4c70c67e6f9a49b3cc9c5e6052b8 [file] [log] [blame]
<!-- HTML header for doxygen 1.8.17-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.17"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>Arm NN: FuseBatchNorm&lt; ConvLayer, ArmnnType, T &gt; Class Template Reference</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script>
<script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="customdoxygen.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 15rem; margin-top: .5rem; margin-left 13px"/>
<td id="projectalign" style="padding-left: 0.9em;">
<div id="projectname">
&#160;<span id="projectnumber">24.05</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.17 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('classarmnn_1_1optimizations_1_1_fuse_batch_norm.html',''); initResizable(); });
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="summary">
<a href="#pub-methods">Public Member Functions</a> &#124;
<a href="#pro-methods">Protected Member Functions</a> &#124;
<a href="classarmnn_1_1optimizations_1_1_fuse_batch_norm-members.html">List of all members</a> </div>
<div class="headertitle">
<div class="title">FuseBatchNorm&lt; ConvLayer, ArmnnType, T &gt; Class Template Reference</div> </div>
</div><!--header-->
<div class="contents">
<p><code>#include &lt;<a class="el" href="_fuse_batch_norm_8hpp_source.html">FuseBatchNorm.hpp</a>&gt;</code></p>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr class="memitem:a5a8476ffc04ce7460bb09ad50d1d23de"><td class="memItemLeft" align="right" valign="top">void&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.html#a5a8476ffc04ce7460bb09ad50d1d23de">Run</a> (<a class="el" href="classarmnn_1_1_graph.html">Graph</a> &amp;graph, <a class="el" href="classarmnn_1_1_input_slot.html">InputSlot</a> &amp;connection) const</td></tr>
<tr class="memdesc:a5a8476ffc04ce7460bb09ad50d1d23de"><td class="mdescLeft">&#160;</td><td class="mdescRight">Run for every exclusive connection between any base Convolution layer and a child BatchNorm layer for not quantized layers. <a href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.html#a5a8476ffc04ce7460bb09ad50d1d23de">More...</a><br /></td></tr>
<tr class="separator:a5a8476ffc04ce7460bb09ad50d1d23de"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pro-methods"></a>
Protected Member Functions</h2></td></tr>
<tr class="memitem:abe49327783cb8bdc12c085c987db14db"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.html#abe49327783cb8bdc12c085c987db14db">FuseBatchNorm</a> ()=default</td></tr>
<tr class="separator:abe49327783cb8bdc12c085c987db14db"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a0ff9a790927b898d90261a8ea0e479e6"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.html#a0ff9a790927b898d90261a8ea0e479e6">~FuseBatchNorm</a> ()=default</td></tr>
<tr class="separator:a0ff9a790927b898d90261a8ea0e479e6"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock"><h3>template&lt;typename ConvLayer, armnn::DataType ArmnnType, typename T = armnn::ResolveType&lt;ArmnnType&gt;&gt;<br />
class armnn::optimizations::FuseBatchNorm&lt; ConvLayer, ArmnnType, T &gt;</h3>
<p class="definition">Definition at line <a class="el" href="_fuse_batch_norm_8hpp_source.html#l00019">19</a> of file <a class="el" href="_fuse_batch_norm_8hpp_source.html">FuseBatchNorm.hpp</a>.</p>
</div><h2 class="groupheader">Constructor &amp; Destructor Documentation</h2>
<a id="abe49327783cb8bdc12c085c987db14db"></a>
<h2 class="memtitle"><span class="permalink"><a href="#abe49327783cb8bdc12c085c987db14db">&#9670;&nbsp;</a></span>FuseBatchNorm()</h2>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname"><a class="el" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.html">FuseBatchNorm</a> </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">protected</span><span class="mlabel">default</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
</div>
</div>
<a id="a0ff9a790927b898d90261a8ea0e479e6"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a0ff9a790927b898d90261a8ea0e479e6">&#9670;&nbsp;</a></span>~FuseBatchNorm()</h2>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname">~<a class="el" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.html">FuseBatchNorm</a> </td>
<td>(</td>
<td class="paramname"></td><td>)</td>
<td></td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">protected</span><span class="mlabel">default</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
</div>
</div>
<h2 class="groupheader">Member Function Documentation</h2>
<a id="a5a8476ffc04ce7460bb09ad50d1d23de"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a5a8476ffc04ce7460bb09ad50d1d23de">&#9670;&nbsp;</a></span>Run()</h2>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname">void Run </td>
<td>(</td>
<td class="paramtype"><a class="el" href="classarmnn_1_1_graph.html">Graph</a> &amp;&#160;</td>
<td class="paramname"><em>graph</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype"><a class="el" href="classarmnn_1_1_input_slot.html">InputSlot</a> &amp;&#160;</td>
<td class="paramname"><em>connection</em>&#160;</td>
</tr>
<tr>
<td></td>
<td>)</td>
<td></td><td> const</td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">inline</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
<p>Run for every exclusive connection between any base Convolution layer and a child BatchNorm layer for not quantized layers. </p>
<p>The child will be removed, the base will be removed if it's left unconnected. A new Convolution layer will be added, its weights and bias will be calculated using the weights and bias of the base Convolution layer combined with the parameters of the child BatchNorm layer. </p>
<p class="definition">Definition at line <a class="el" href="_fuse_batch_norm_8hpp_source.html#l00027">27</a> of file <a class="el" href="_fuse_batch_norm_8hpp_source.html">FuseBatchNorm.hpp</a>.</p>
<div class="fragment"><div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160; {</div>
<div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160; Layer&amp; base = connection.GetConnectedOutputSlot()-&gt;GetOwningLayer();</div>
<div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160; Layer&amp; child = connection.GetOwningLayer();</div>
<div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160; </div>
<div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160; <span class="keywordtype">bool</span> depthwise = (base.GetType() == <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">LayerType::DepthwiseConvolution2d</a>);</div>
<div class="line"><a name="l00033"></a><span class="lineno"> 33</span>&#160; </div>
<div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160; <a class="code" href="_assert_8hpp.html#a5698be69cbd5dfe6c28fcd9867e8cbed">ARMNN_ASSERT</a>(base.GetType() == <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">LayerType::Convolution2d</a> || depthwise);</div>
<div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160; <a class="code" href="_assert_8hpp.html#a5698be69cbd5dfe6c28fcd9867e8cbed">ARMNN_ASSERT</a>(child.GetType() == <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4ae4743c3ec15d1d84169b17264634692e">LayerType::BatchNormalization</a>);</div>
<div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160; </div>
<div class="line"><a name="l00037"></a><span class="lineno"> 37</span>&#160; <span class="keywordflow">if</span> (base.GetDataType() == ArmnnType &amp;&amp; child.GetDataType() == ArmnnType)</div>
<div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160; {</div>
<div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160; OutputSlot* parentOut = base.GetInputSlot(0).GetConnectedOutputSlot();</div>
<div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160; <span class="keyword">auto</span> convLayer = PolymorphicDowncast&lt;ConvLayer*&gt;(&amp;base);</div>
<div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160; <span class="keyword">auto</span> batchNormLayer = PolymorphicDowncast&lt;BatchNormalizationLayer*&gt;(&amp;child);</div>
<div class="line"><a name="l00042"></a><span class="lineno"> 42</span>&#160; </div>
<div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160; <span class="comment">// Read convolution and batch norm parameters</span></div>
<div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160; BatchNormalizationDescriptor batchNormDescriptor = batchNormLayer-&gt;GetParameters();</div>
<div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160; <span class="keyword">auto</span> epsilon = batchNormDescriptor.m_Eps;</div>
<div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160; <a class="code" href="namespacearmnn.html#a44affeeb090c3c6a3062830562672e84">IgnoreUnused</a>(epsilon);</div>
<div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160; </div>
<div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160; ConstTensor betaTensor(batchNormLayer-&gt;m_Beta-&gt;GetTensorInfo(), batchNormLayer-&gt;m_Beta-&gt;Map(<span class="keyword">true</span>));</div>
<div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160; ConstTensor gammaTensor(batchNormLayer-&gt;m_Gamma-&gt;GetTensorInfo(), batchNormLayer-&gt;m_Gamma-&gt;Map(<span class="keyword">true</span>));</div>
<div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160; ConstTensor meanTensor(batchNormLayer-&gt;m_Mean-&gt;GetTensorInfo(), batchNormLayer-&gt;m_Mean-&gt;Map(<span class="keyword">true</span>));</div>
<div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160; ConstTensor varTensor(batchNormLayer-&gt;m_Variance-&gt;GetTensorInfo(), batchNormLayer-&gt;m_Variance-&gt;Map(<span class="keyword">true</span>));</div>
<div class="line"><a name="l00052"></a><span class="lineno"> 52</span>&#160; </div>
<div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160; <span class="keyword">auto</span> convDescriptor = convLayer-&gt;GetParameters();</div>
<div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160; ConstTensor weightsTensor;</div>
<div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160; <a class="code" href="_assert_8hpp.html#a91c4dfde57907d7698c7531785690a7f">ARMNN_ASSERT_MSG</a>(convLayer-&gt;GetInputSlots()[1].GetConnection() != <span class="keyword">nullptr</span>,</div>
<div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160; <span class="stringliteral">&quot;FuseBatchNorm: Weight data should not be null.&quot;</span>);</div>
<div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160; </div>
<div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160; ConstantLayer* weightLayer = PolymorphicDowncast&lt;ConstantLayer*&gt;(</div>
<div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160; &amp;base.GetInputSlot(1).GetConnectedOutputSlot()-&gt;GetOwningLayer());</div>
<div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160; </div>
<div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160; weightsTensor = ConstTensor(weightLayer-&gt;m_LayerOutput-&gt;GetTensorInfo(),</div>
<div class="line"><a name="l00062"></a><span class="lineno"> 62</span>&#160; weightLayer-&gt;m_LayerOutput-&gt;Map(<span class="keyword">true</span>));</div>
<div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160; </div>
<div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160; <a class="code" href="classarmnn_utils_1_1_data_layout_indexed.html">armnnUtils::DataLayoutIndexed</a> dataLayout(convDescriptor.m_DataLayout);</div>
<div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160; <span class="keyword">auto</span> weightsShape = weightsTensor.GetInfo().GetShape();</div>
<div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> inputChannels = parentOut-&gt;GetTensorInfo().GetShape()[dataLayout.GetChannelsIndex()];</div>
<div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> depthMultiplier = depthwise ? weightsShape[3] / inputChannels : 1;</div>
<div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> outputChannels = depthwise ? weightsShape[3] : weightsShape[0];</div>
<div class="line"><a name="l00069"></a><span class="lineno"> 69</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> weightsHeight = depthwise ? weightsShape[1] :</div>
<div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160; weightsShape[dataLayout.GetHeightIndex()];</div>
<div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160; <span class="keyword">const</span> <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> weightsWidth = depthwise ? weightsShape[2] :</div>
<div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160; weightsShape[dataLayout.GetWidthIndex()];</div>
<div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160; </div>
<div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* weightsBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(weightsTensor.GetMemoryArea());</div>
<div class="line"><a name="l00075"></a><span class="lineno"> 75</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* betaBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(betaTensor.GetMemoryArea());</div>
<div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* gammaBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(gammaTensor.GetMemoryArea());</div>
<div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* meanBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(meanTensor.GetMemoryArea());</div>
<div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* varBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(varTensor.GetMemoryArea());</div>
<div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160; </div>
<div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160; std::vector&lt;T&gt; weightsVector (weightsBuffer, weightsBuffer + weightsTensor.GetNumElements());</div>
<div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160; std::vector&lt;T&gt; betaVector (betaBuffer, betaBuffer + betaTensor.GetNumElements());</div>
<div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160; std::vector&lt;T&gt; gammaVector (gammaBuffer, gammaBuffer + gammaTensor.GetNumElements());</div>
<div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160; std::vector&lt;T&gt; meanVector (meanBuffer, meanBuffer + meanTensor.GetNumElements());</div>
<div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160; std::vector&lt;T&gt; varianceVector(varBuffer, varBuffer + varTensor.GetNumElements());</div>
<div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160; </div>
<div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160; <span class="comment">// fusedWeights = ( gamma * weights ) / ( std - epsilon);</span></div>
<div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160; std::vector&lt;T&gt; fusedWeightsVector(weightsVector.size());</div>
<div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160; </div>
<div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> cInput = 0; cInput &lt; inputChannels; ++cInput)</div>
<div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160; {</div>
<div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> cOut = 0; cOut &lt; outputChannels; ++cOut)</div>
<div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160; {</div>
<div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160; T mult = gammaVector[cOut] / <span class="keyword">static_cast&lt;</span>T<span class="keyword">&gt;</span>(sqrtf(varianceVector[cOut] + epsilon));</div>
<div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160; </div>
<div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> h = 0; h &lt; weightsHeight; ++h)</div>
<div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160; {</div>
<div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> w = 0; w &lt; weightsWidth; ++w)</div>
<div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160; {</div>
<div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160; <span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> weightsIdx = 0;</div>
<div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160; </div>
<div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160; <span class="keywordflow">if</span> (depthwise)</div>
<div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160; {</div>
<div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; cInput = cOut / depthMultiplier;</div>
<div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160; weightsIdx = w * outputChannels + cOut +</div>
<div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160; h * weightsWidth * outputChannels;</div>
<div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160; }</div>
<div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160; <span class="keywordflow">else</span> <span class="keywordflow">if</span> (convDescriptor.m_DataLayout == <a class="code" href="namespacearmnn.html#ad1d5cce2d9e9a5d61c243e5c989112e0ad066db54b89b0912e7e7c6da51e2da51">DataLayout::NHWC</a>)</div>
<div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160; {</div>
<div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160; weightsIdx = cOut * weightsHeight * weightsWidth * inputChannels +</div>
<div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160; h * weightsWidth * inputChannels +</div>
<div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160; w * inputChannels +</div>
<div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160; cInput;</div>
<div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160; }</div>
<div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160; {</div>
<div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160; weightsIdx = cOut * weightsWidth * weightsHeight * inputChannels +</div>
<div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160; cInput * weightsWidth * weightsHeight +</div>
<div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160; h * weightsWidth +</div>
<div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160; w;</div>
<div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; }</div>
<div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160; fusedWeightsVector[weightsIdx] = mult * weightsVector[weightsIdx];</div>
<div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160; }</div>
<div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160; }</div>
<div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160; }</div>
<div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160; }</div>
<div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160; ConstTensor fusedWeightsTensor(weightsTensor.GetInfo(), fusedWeightsVector);</div>
<div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160; </div>
<div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160; <span class="comment">// fusedBias = (gamma * (bias - mean)) / (variance - epsilon) + beta;</span></div>
<div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160; std::vector&lt;T&gt; fusedBiasVector(outputChannels);</div>
<div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160; <span class="keywordtype">bool</span> biasWasEnabledBeforeOpt = convDescriptor.m_BiasEnabled;</div>
<div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160; <span class="keywordflow">if</span> (biasWasEnabledBeforeOpt)</div>
<div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160; {</div>
<div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160; ConstTensor biasTensor;</div>
<div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160; <a class="code" href="_assert_8hpp.html#a91c4dfde57907d7698c7531785690a7f">ARMNN_ASSERT_MSG</a>(convLayer-&gt;GetInputSlots()[2].GetConnection() != <span class="keyword">nullptr</span>,</div>
<div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160; <span class="stringliteral">&quot;FuseBatchNorm: Bias data should not be null if bias is enabled.&quot;</span>);</div>
<div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160; </div>
<div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160; ConstantLayer* biasLayer = PolymorphicDowncast&lt;ConstantLayer*&gt;(</div>
<div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160; &amp;base.GetInputSlot(2).GetConnectedOutputSlot()-&gt;GetOwningLayer());</div>
<div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160; </div>
<div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; biasTensor = ConstTensor(biasLayer-&gt;m_LayerOutput-&gt;GetTensorInfo(),</div>
<div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; biasLayer-&gt;m_LayerOutput-&gt;Map(<span class="keyword">true</span>));</div>
<div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; </div>
<div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160; <span class="keyword">const</span> <span class="keyword">auto</span>* biasBuffer = <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(biasTensor.GetMemoryArea());</div>
<div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160; std::vector&lt;T&gt; biasVector(biasBuffer, biasBuffer + biasTensor.GetNumElements());</div>
<div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160; </div>
<div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> cOut = 0; cOut &lt; outputChannels; ++cOut)</div>
<div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; {</div>
<div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; fusedBiasVector[cOut] = ((gammaVector[cOut] * (biasVector[cOut] - meanVector[cOut])) /</div>
<div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; sqrtf(varianceVector[cOut] + epsilon)) + betaVector[cOut];</div>
<div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; }</div>
<div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160; }</div>
<div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160; {</div>
<div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; convDescriptor.m_BiasEnabled = <span class="keyword">true</span>;</div>
<div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160; std::vector&lt;T&gt; biasVector(outputChannels, T(0));</div>
<div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160; </div>
<div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> cOut = 0; cOut &lt; outputChannels; ++cOut)</div>
<div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; {</div>
<div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; fusedBiasVector[cOut] = ((gammaVector[cOut] * (biasVector[cOut] - meanVector[cOut])) /</div>
<div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; sqrtf(varianceVector[cOut] + epsilon)) + betaVector[cOut];</div>
<div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160; }</div>
<div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160; }</div>
<div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; ConstTensor fusedBiasTensor(TensorInfo({outputChannels}, ArmnnType, 0.0f, 0, <span class="keyword">true</span>), fusedBiasVector);</div>
<div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; </div>
<div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; <span class="comment">// Insert the new convolution layer that has batch norm parameters fused into</span></div>
<div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; <span class="keyword">const</span> std::string name = std::string(<span class="stringliteral">&quot;fused-&quot;</span>) + child.GetName() + std::string(<span class="stringliteral">&quot;-into-&quot;</span>) + base.GetName();</div>
<div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160; <span class="keyword">auto</span>&amp; newConv2dLayer = *graph.InsertNewLayer&lt;ConvLayer&gt;(base.GetInputSlot(0),</div>
<div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160; convDescriptor,</div>
<div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160; name.c_str());</div>
<div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160; </div>
<div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; <span class="comment">// Connect weights and bias from old to new Conv2d layer</span></div>
<div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160; <span class="comment">// This optimization will always have 3 input slots on the Conv2d base layer</span></div>
<div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; <span class="keywordflow">if</span> (newConv2dLayer.GetNumInputSlots() &gt; 1)</div>
<div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160; {</div>
<div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; <span class="comment">// Remove old connection and connect to new layer2d</span></div>
<div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; weightLayer-&gt;GetOutputSlot(0).Disconnect(base.GetInputSlot(1));</div>
<div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160; weightLayer-&gt;GetOutputSlot(0).Connect(newConv2dLayer.GetInputSlot(1));</div>
<div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; weightLayer-&gt;m_LayerOutput = std::make_unique&lt;ScopedTensorHandle&gt;(fusedWeightsTensor);</div>
<div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160; </div>
<div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; <span class="comment">// Move bias const layers as normal if it was enabled before the optimisation</span></div>
<div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; ConstantLayer* biasLayer;</div>
<div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160; <span class="keywordflow">if</span> (biasWasEnabledBeforeOpt)</div>
<div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160; {</div>
<div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; biasLayer = PolymorphicDowncast&lt;ConstantLayer*&gt;(</div>
<div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160; &amp;base.GetInputSlot(2).GetConnectedOutputSlot()-&gt;GetOwningLayer());</div>
<div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160; <span class="comment">// Remove old connection and connect to new layer2d</span></div>
<div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160; biasLayer-&gt;GetOutputSlot(0).Disconnect(base.GetInputSlot(2));</div>
<div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160; biasLayer-&gt;GetOutputSlot(0).Connect(newConv2dLayer.GetInputSlot(2));</div>
<div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160; </div>
<div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; }</div>
<div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; <span class="comment">// Otherwise create a new bias layer and add to the new convolution2d</span></div>
<div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; {</div>
<div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160; <span class="comment">// Add in bias constant layer</span></div>
<div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160; biasLayer = graph.AddLayer&lt;ConstantLayer&gt;(<span class="stringliteral">&quot;Bias&quot;</span>);</div>
<div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160; biasLayer-&gt;GetOutputSlot(0).SetTensorInfo(fusedBiasTensor.GetInfo());</div>
<div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160; biasLayer-&gt;GetOutputSlot(0).Connect(newConv2dLayer.GetInputSlot(2));</div>
<div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160; }</div>
<div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160; biasLayer-&gt;m_LayerOutput = std::make_unique&lt;ScopedTensorHandle&gt;(ConstTensor(fusedBiasTensor));</div>
<div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160; }</div>
<div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160; </div>
<div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160; </div>
<div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160; <span class="comment">// Reconnects with original parent.</span></div>
<div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160; newConv2dLayer.GetOutputSlot().MoveAllConnections(*parentOut);</div>
<div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160; <span class="comment">// Parent is now the new convolution2d layer.</span></div>
<div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160; parentOut = &amp;newConv2dLayer.GetOutputSlot();</div>
<div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160; </div>
<div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; <span class="comment">// Moves connections in child output to parent layer.</span></div>
<div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160; <span class="comment">// Child layer will be removed as it&#39;s left unconnected.</span></div>
<div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160; <span class="comment">// Base layer will be removed if left unconnected.</span></div>
<div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160; child.GetOutputSlot().MoveAllConnections(*parentOut);</div>
<div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; }</div>
<div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160; }</div>
</div><!-- fragment -->
<p class="reference">References <a class="el" href="_graph_8hpp_source.html#l00466">Graph::AddLayer()</a>, <a class="el" href="_assert_8hpp_source.html#l00014">ARMNN_ASSERT</a>, <a class="el" href="_assert_8hpp_source.html#l00015">ARMNN_ASSERT_MSG</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4ae4743c3ec15d1d84169b17264634692e">armnn::BatchNormalization</a>, <a class="el" href="_layer_8cpp_source.html#l00123">OutputSlot::Connect()</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">armnn::Convolution2d</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">armnn::DepthwiseConvolution2d</a>, <a class="el" href="_layer_8cpp_source.html#l00131">OutputSlot::Disconnect()</a>, <a class="el" href="_data_layout_indexed_8hpp_source.html#l00023">DataLayoutIndexed::GetChannelsIndex()</a>, <a class="el" href="_layer_8hpp_source.html#l00056">InputSlot::GetConnectedOutputSlot()</a>, <a class="el" href="_layer_8cpp_source.html#l00345">Layer::GetDataType()</a>, <a class="el" href="_data_layout_indexed_8hpp_source.html#l00024">DataLayoutIndexed::GetHeightIndex()</a>, <a class="el" href="_tensor_8hpp_source.html#l00297">BaseTensor&lt; MemoryType &gt;::GetInfo()</a>, <a class="el" href="_layer_8hpp_source.html#l00337">Layer::GetInputSlot()</a>, <a class="el" href="_tensor_8hpp_source.html#l00307">BaseTensor&lt; MemoryType &gt;::GetMemoryArea()</a>, <a class="el" href="_layer_8hpp_source.html#l00332">Layer::GetName()</a>, <a class="el" href="_tensor_8hpp_source.html#l00305">BaseTensor&lt; MemoryType &gt;::GetNumElements()</a>, <a class="el" href="_layer_8hpp_source.html#l00339">Layer::GetOutputSlot()</a>, <a class="el" href="_layer_8hpp_source.html#l00053">InputSlot::GetOwningLayer()</a>, <a class="el" href="_layer_8hpp_source.html#l00132">OutputSlot::GetOwningLayer()</a>, <a class="el" href="_tensor_8hpp_source.html#l00193">TensorInfo::GetShape()</a>, <a class="el" href="_layer_8cpp_source.html#l00100">OutputSlot::GetTensorInfo()</a>, <a class="el" href="_layer_8hpp_source.html#l00286">Layer::GetType()</a>, <a class="el" href="_data_layout_indexed_8hpp_source.html#l00025">DataLayoutIndexed::GetWidthIndex()</a>, <a class="el" href="_ignore_unused_8hpp_source.html#l00014">armnn::IgnoreUnused()</a>, <a class="el" href="_graph_8hpp_source.html#l00481">Graph::InsertNewLayer()</a>, <a class="el" href="_descriptors_8hpp_source.html#l00841">BatchNormalizationDescriptor::m_Eps</a>, <a class="el" href="_constant_layer_8hpp_source.html#l00046">ConstantLayer::m_LayerOutput</a>, <a class="el" href="_layer_8cpp_source.html#l00156">OutputSlot::MoveAllConnections()</a>, <a class="el" href="namespacearmnn.html#ad1d5cce2d9e9a5d61c243e5c989112e0ad066db54b89b0912e7e7c6da51e2da51">armnn::NHWC</a>, and <a class="el" href="_layer_8cpp_source.html#l00095">OutputSlot::SetTensorInfo()</a>.</p>
</div>
</div>
<hr/>The documentation for this class was generated from the following file:<ul>
<li>src/armnn/optimizations/<a class="el" href="_fuse_batch_norm_8hpp_source.html">FuseBatchNorm.hpp</a></li>
</ul>
</div><!-- contents -->
</div><!-- doc-content -->
<div class="ttc" id="a_assert_8hpp_html_a5698be69cbd5dfe6c28fcd9867e8cbed"><div class="ttname"><a href="_assert_8hpp.html#a5698be69cbd5dfe6c28fcd9867e8cbed">ARMNN_ASSERT</a></div><div class="ttdeci">#define ARMNN_ASSERT(COND)</div><div class="ttdef"><b>Definition:</b> <a href="_assert_8hpp_source.html#l00014">Assert.hpp:14</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4ae4743c3ec15d1d84169b17264634692e"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4ae4743c3ec15d1d84169b17264634692e">armnn::LayerType::BatchNormalization</a></div><div class="ttdeci">@ BatchNormalization</div></div>
<div class="ttc" id="anamespacearmnn_html_ad1d5cce2d9e9a5d61c243e5c989112e0ad066db54b89b0912e7e7c6da51e2da51"><div class="ttname"><a href="namespacearmnn.html#ad1d5cce2d9e9a5d61c243e5c989112e0ad066db54b89b0912e7e7c6da51e2da51">armnn::DataLayout::NHWC</a></div><div class="ttdeci">@ NHWC</div></div>
<div class="ttc" id="aclassarmnn_utils_1_1_data_layout_indexed_html"><div class="ttname"><a href="classarmnn_utils_1_1_data_layout_indexed.html">armnnUtils::DataLayoutIndexed</a></div><div class="ttdoc">Provides access to the appropriate indexes for Channels, Height and Width based on DataLayout.</div><div class="ttdef"><b>Definition:</b> <a href="_data_layout_indexed_8hpp_source.html#l00017">DataLayoutIndexed.hpp:17</a></div></div>
<div class="ttc" id="a_assert_8hpp_html_a91c4dfde57907d7698c7531785690a7f"><div class="ttname"><a href="_assert_8hpp.html#a91c4dfde57907d7698c7531785690a7f">ARMNN_ASSERT_MSG</a></div><div class="ttdeci">#define ARMNN_ASSERT_MSG(COND, MSG)</div><div class="ttdef"><b>Definition:</b> <a href="_assert_8hpp_source.html#l00015">Assert.hpp:15</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">armnn::LayerType::DepthwiseConvolution2d</a></div><div class="ttdeci">@ DepthwiseConvolution2d</div></div>
<div class="ttc" id="anamespacearmnn_html_a44affeeb090c3c6a3062830562672e84"><div class="ttname"><a href="namespacearmnn.html#a44affeeb090c3c6a3062830562672e84">armnn::IgnoreUnused</a></div><div class="ttdeci">void IgnoreUnused(Ts &amp;&amp;...)</div><div class="ttdef"><b>Definition:</b> <a href="_ignore_unused_8hpp_source.html#l00014">IgnoreUnused.hpp:14</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">armnn::LayerType::Convolution2d</a></div><div class="ttdeci">@ Convolution2d</div></div>
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="navelem"><a class="el" href="namespacearmnn.html">armnn</a></li><li class="navelem"><a class="el" href="namespacearmnn_1_1optimizations.html">optimizations</a></li><li class="navelem"><a class="el" href="classarmnn_1_1optimizations_1_1_fuse_batch_norm.html">FuseBatchNorm</a></li>
<li class="footer">Generated on Thu May 16 2024 09:32:00 for Arm NN by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.17 </li>
</ul>
</div>
</body>
</html>