blob: 6d09179c9a70ac403149838991a1749b33e8dfb6 [file] [log] [blame]
<!-- HTML header for doxygen 1.8.17-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.17"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>Arm NN: src/backends/gpuFsa/GpuFsaBackend.cpp Source File</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script>
<script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="customdoxygen.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 15rem; margin-top: .5rem; margin-left 13px"/>
<td id="projectalign" style="padding-left: 0.9em;">
<div id="projectname">
&#160;<span id="projectnumber">24.05</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.17 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('_gpu_fsa_backend_8cpp_source.html',''); initResizable(); });
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="headertitle">
<div class="title">GpuFsaBackend.cpp</div> </div>
</div><!--header-->
<div class="contents">
<a href="_gpu_fsa_backend_8cpp.html">Go to the documentation of this file.</a><div class="fragment"><div class="line"><a name="l00001"></a><span class="lineno"> 1</span>&#160;<span class="comment">//</span></div>
<div class="line"><a name="l00002"></a><span class="lineno"> 2</span>&#160;<span class="comment">// Copyright © 2022-2024 Arm Ltd and Contributors. All rights reserved.</span></div>
<div class="line"><a name="l00003"></a><span class="lineno"> 3</span>&#160;<span class="comment">// SPDX-License-Identifier: MIT</span></div>
<div class="line"><a name="l00004"></a><span class="lineno"> 4</span>&#160;<span class="comment">//</span></div>
<div class="line"><a name="l00005"></a><span class="lineno"> 5</span>&#160; </div>
<div class="line"><a name="l00006"></a><span class="lineno"> 6</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_backend_8hpp.html">GpuFsaBackend.hpp</a>&quot;</span></div>
<div class="line"><a name="l00007"></a><span class="lineno"> 7</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_backend_context_8hpp.html">GpuFsaBackendContext.hpp</a>&quot;</span></div>
<div class="line"><a name="l00008"></a><span class="lineno"> 8</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_backend_default_allocator_8hpp.html">GpuFsaBackendDefaultAllocator.hpp</a>&quot;</span></div>
<div class="line"><a name="l00009"></a><span class="lineno"> 9</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_backend_id_8hpp.html">GpuFsaBackendId.hpp</a>&quot;</span></div>
<div class="line"><a name="l00010"></a><span class="lineno"> 10</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_layer_support_8hpp.html">GpuFsaLayerSupport.hpp</a>&quot;</span></div>
<div class="line"><a name="l00011"></a><span class="lineno"> 11</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_tensor_handle_factory_8hpp.html">GpuFsaTensorHandleFactory.hpp</a>&quot;</span></div>
<div class="line"><a name="l00012"></a><span class="lineno"> 12</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_workload_factory_8hpp.html">GpuFsaWorkloadFactory.hpp</a>&quot;</span></div>
<div class="line"><a name="l00013"></a><span class="lineno"> 13</span>&#160; </div>
<div class="line"><a name="l00014"></a><span class="lineno"> 14</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_i_backend_context_8hpp.html">armnn/backends/IBackendContext.hpp</a>&gt;</span></div>
<div class="line"><a name="l00015"></a><span class="lineno"> 15</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_i_memory_manager_8hpp.html">armnn/backends/IMemoryManager.hpp</a>&gt;</span></div>
<div class="line"><a name="l00016"></a><span class="lineno"> 16</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_subgraph_utils_8hpp.html">backendsCommon/SubgraphUtils.hpp</a>&gt;</span></div>
<div class="line"><a name="l00017"></a><span class="lineno"> 17</span>&#160;<span class="preprocessor">#include &lt;<a class="code" href="_optimizer_8hpp.html">Optimizer.hpp</a>&gt;</span></div>
<div class="line"><a name="l00018"></a><span class="lineno"> 18</span>&#160; </div>
<div class="line"><a name="l00019"></a><span class="lineno"> 19</span>&#160;<span class="preprocessor">#include &lt;arm_compute/core/CL/CLKernelLibrary.h&gt;</span></div>
<div class="line"><a name="l00020"></a><span class="lineno"> 20</span>&#160;<span class="preprocessor">#include &lt;arm_compute/runtime/CL/CLBufferAllocator.h&gt;</span></div>
<div class="line"><a name="l00021"></a><span class="lineno"> 21</span>&#160; </div>
<div class="line"><a name="l00022"></a><span class="lineno"> 22</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_activation_8hpp.html">layers/GpuFsaActivation.hpp</a>&quot;</span></div>
<div class="line"><a name="l00023"></a><span class="lineno"> 23</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_batch_mat_mul_8hpp.html">layers/GpuFsaBatchMatMul.hpp</a>&quot;</span></div>
<div class="line"><a name="l00024"></a><span class="lineno"> 24</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_cast_8hpp.html">layers/GpuFsaCast.hpp</a>&quot;</span></div>
<div class="line"><a name="l00025"></a><span class="lineno"> 25</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_convolution2d_8hpp.html">layers/GpuFsaConvolution2d.hpp</a>&quot;</span></div>
<div class="line"><a name="l00026"></a><span class="lineno"> 26</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_depthwise_convolution2d_8hpp.html">layers/GpuFsaDepthwiseConvolution2d.hpp</a>&quot;</span></div>
<div class="line"><a name="l00027"></a><span class="lineno"> 27</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_elementwise_binary_8hpp.html">layers/GpuFsaElementwiseBinary.hpp</a>&quot;</span></div>
<div class="line"><a name="l00028"></a><span class="lineno"> 28</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_pooling2d_8hpp.html">layers/GpuFsaPooling2d.hpp</a>&quot;</span></div>
<div class="line"><a name="l00029"></a><span class="lineno"> 29</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_reshape_8hpp.html">layers/GpuFsaReshape.hpp</a>&quot;</span></div>
<div class="line"><a name="l00030"></a><span class="lineno"> 30</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_resize_8hpp.html">layers/GpuFsaResize.hpp</a>&quot;</span></div>
<div class="line"><a name="l00031"></a><span class="lineno"> 31</span>&#160;<span class="preprocessor">#include &quot;<a class="code" href="_gpu_fsa_softmax_8hpp.html">layers/GpuFsaSoftmax.hpp</a>&quot;</span></div>
<div class="line"><a name="l00032"></a><span class="lineno"> 32</span>&#160; </div>
<div class="line"><a name="l00033"></a><span class="lineno"> 33</span>&#160;<span class="keyword">namespace </span><a class="code" href="namespacearmnn.html">armnn</a></div>
<div class="line"><a name="l00034"></a><span class="lineno"> 34</span>&#160;{</div>
<div class="line"><a name="l00035"></a><span class="lineno"> 35</span>&#160; </div>
<div class="line"><a name="l00036"></a><span class="lineno"> 36</span>&#160;<span class="keyword">template</span> &lt;<span class="keyword">typename</span> T&gt;</div>
<div class="line"><a name="l00037"></a><span class="lineno"><a class="line" href="namespacearmnn.html#a7dacd5f6b52fd93bfd536976d27f8293"> 37</a></span>&#160;<span class="keyword">inline</span> <span class="keywordtype">void</span> <a class="code" href="namespacearmnn.html#a7dacd5f6b52fd93bfd536976d27f8293">DeleteAsType</a>(<span class="keyword">const</span> <span class="keywordtype">void</span>* <span class="keyword">const</span> blob)</div>
<div class="line"><a name="l00038"></a><span class="lineno"> 38</span>&#160;{</div>
<div class="line"><a name="l00039"></a><span class="lineno"> 39</span>&#160; <span class="keyword">delete</span> <span class="keyword">static_cast&lt;</span><span class="keyword">const </span>T*<span class="keyword">&gt;</span>(blob);</div>
<div class="line"><a name="l00040"></a><span class="lineno"> 40</span>&#160;}</div>
<div class="line"><a name="l00041"></a><span class="lineno"> 41</span>&#160; </div>
<div class="line"><a name="l00042"></a><span class="lineno"><a class="line" href="namespacearmnn.html#a3f6dfcc7396ea476f96658ae6cb02b54"> 42</a></span>&#160;<span class="keyword">inline</span> <a class="code" href="classarmnn_1_1_subgraph_view.html#a5cc65e15002dbc33a5c8a7d6680e9a9d">SubgraphView::InputSlots</a> <a class="code" href="namespacearmnn.html#a3f6dfcc7396ea476f96658ae6cb02b54">CreateInputsFrom</a>(<a class="code" href="classarmnn_1_1_layer.html">Layer</a>* layer)</div>
<div class="line"><a name="l00043"></a><span class="lineno"> 43</span>&#160;{</div>
<div class="line"><a name="l00044"></a><span class="lineno"> 44</span>&#160; <a class="code" href="classarmnn_1_1_subgraph_view.html#a5cc65e15002dbc33a5c8a7d6680e9a9d">SubgraphView::InputSlots</a> result;</div>
<div class="line"><a name="l00045"></a><span class="lineno"> 45</span>&#160; <span class="keywordflow">for</span> (<span class="keyword">auto</span>&amp;&amp; it = layer-&gt;<a class="code" href="classarmnn_1_1_layer.html#af6cb8de21ef0da269ec9b67755ae92a0">BeginInputSlots</a>(); it != layer-&gt;<a class="code" href="classarmnn_1_1_layer.html#a9752e12d6b79e18da1a25f76159d2a72">EndInputSlots</a>(); ++it)</div>
<div class="line"><a name="l00046"></a><span class="lineno"> 46</span>&#160; {</div>
<div class="line"><a name="l00047"></a><span class="lineno"> 47</span>&#160; result.push_back(&amp;(*it));</div>
<div class="line"><a name="l00048"></a><span class="lineno"> 48</span>&#160; }</div>
<div class="line"><a name="l00049"></a><span class="lineno"> 49</span>&#160; <span class="keywordflow">return</span> result;</div>
<div class="line"><a name="l00050"></a><span class="lineno"> 50</span>&#160;}</div>
<div class="line"><a name="l00051"></a><span class="lineno"> 51</span>&#160; </div>
<div class="line"><a name="l00052"></a><span class="lineno"><a class="line" href="namespacearmnn.html#a047e95685b63fedaa1d2ebb3b9428ff5"> 52</a></span>&#160;<span class="keyword">inline</span> <a class="code" href="classarmnn_1_1_subgraph_view.html#a78293334750ec5279eb9c96d56deaf08">SubgraphView::OutputSlots</a> <a class="code" href="namespacearmnn.html#a047e95685b63fedaa1d2ebb3b9428ff5">CreateOutputsFrom</a>(<a class="code" href="classarmnn_1_1_layer.html">Layer</a>* layer)</div>
<div class="line"><a name="l00053"></a><span class="lineno"> 53</span>&#160;{</div>
<div class="line"><a name="l00054"></a><span class="lineno"> 54</span>&#160; <a class="code" href="classarmnn_1_1_subgraph_view.html#a78293334750ec5279eb9c96d56deaf08">SubgraphView::OutputSlots</a> result;</div>
<div class="line"><a name="l00055"></a><span class="lineno"> 55</span>&#160; <span class="keywordflow">for</span> (<span class="keyword">auto</span>&amp;&amp; it = layer-&gt;<a class="code" href="classarmnn_1_1_layer.html#a817d4be6dd88f532d36f51748ec14185">BeginOutputSlots</a>(); it != layer-&gt;<a class="code" href="classarmnn_1_1_layer.html#a55f76d98fcd2f5cdac3e2b14536cb7ab">EndOutputSlots</a>(); ++it)</div>
<div class="line"><a name="l00056"></a><span class="lineno"> 56</span>&#160; {</div>
<div class="line"><a name="l00057"></a><span class="lineno"> 57</span>&#160; result.push_back(&amp;(*it));</div>
<div class="line"><a name="l00058"></a><span class="lineno"> 58</span>&#160; }</div>
<div class="line"><a name="l00059"></a><span class="lineno"> 59</span>&#160; <span class="keywordflow">return</span> result;</div>
<div class="line"><a name="l00060"></a><span class="lineno"> 60</span>&#160;}</div>
<div class="line"><a name="l00061"></a><span class="lineno"> 61</span>&#160; </div>
<div class="line"><a name="l00062"></a><span class="lineno"><a class="line" href="namespacearmnn.html#a5ce134e586578a4c448a128f95b4d7af"> 62</a></span>&#160;<span class="keyword">inline</span> <a class="code" href="classarmnn_1_1_subgraph_view.html#ad5fc1b5213dcb72c0d4ac9dfb46ef677">SubgraphView::SubgraphViewPtr</a> <a class="code" href="namespacearmnn.html#a5ce134e586578a4c448a128f95b4d7af">CreateSubgraphViewFrom</a>(<a class="code" href="classarmnn_1_1_subgraph_view.html#a5cc65e15002dbc33a5c8a7d6680e9a9d">SubgraphView::InputSlots</a>&amp;&amp; inputs,</div>
<div class="line"><a name="l00063"></a><span class="lineno"> 63</span>&#160; <a class="code" href="classarmnn_1_1_subgraph_view.html#a78293334750ec5279eb9c96d56deaf08">SubgraphView::OutputSlots</a>&amp;&amp; outputs,</div>
<div class="line"><a name="l00064"></a><span class="lineno"> 64</span>&#160; <a class="code" href="classarmnn_1_1_subgraph_view.html#a74798938fdaeae75c8adfa4a7439e7f9">SubgraphView::Layers</a>&amp;&amp; layers)</div>
<div class="line"><a name="l00065"></a><span class="lineno"> 65</span>&#160;{</div>
<div class="line"><a name="l00066"></a><span class="lineno"> 66</span>&#160; <span class="keywordflow">return</span> std::make_unique&lt;SubgraphView&gt;(std::move(inputs), std::move(outputs), std::move(layers));</div>
<div class="line"><a name="l00067"></a><span class="lineno"> 67</span>&#160;}</div>
<div class="line"><a name="l00068"></a><span class="lineno"> 68</span>&#160; </div>
<div class="line"><a name="l00069"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a177af502214bbc8123fbb4a3c4f0a1b8"> 69</a></span>&#160;<span class="keyword">const</span> <a class="code" href="classarmnn_1_1_backend_id.html">BackendId</a>&amp; <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a177af502214bbc8123fbb4a3c4f0a1b8">GpuFsaBackend::GetIdStatic</a>()</div>
<div class="line"><a name="l00070"></a><span class="lineno"> 70</span>&#160;{</div>
<div class="line"><a name="l00071"></a><span class="lineno"> 71</span>&#160; <span class="keyword">static</span> <span class="keyword">const</span> <a class="code" href="classarmnn_1_1_backend_id.html">BackendId</a> s_Id{<a class="code" href="namespacearmnn.html#ab912656f08118f8b81f8261b5e0dc3a9">GpuFsaBackendId</a>()};</div>
<div class="line"><a name="l00072"></a><span class="lineno"> 72</span>&#160; <span class="keywordflow">return</span> s_Id;</div>
<div class="line"><a name="l00073"></a><span class="lineno"> 73</span>&#160;}</div>
<div class="line"><a name="l00074"></a><span class="lineno"> 74</span>&#160; </div>
<div class="line"><a name="l00075"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a93fcb3bff141d8c77b53466a44b58eee"> 75</a></span>&#160;<a class="code" href="classarmnn_1_1_i_backend_internal.html#a12bff6d51d63dac1375c89bc8415dc46">IBackendInternal::IMemoryManagerUniquePtr</a> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a93fcb3bff141d8c77b53466a44b58eee">GpuFsaBackend::CreateMemoryManager</a>()<span class="keyword"> const</span></div>
<div class="line"><a name="l00076"></a><span class="lineno"> 76</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160; <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#adda2579a4649278c81f18a1d3915b3a0">m_UsingCustomAllocator</a>)</div>
<div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; {</div>
<div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160; <span class="keywordflow">return</span> std::make_unique&lt;GpuFsaMemoryManager&gt;(<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#afe31515f8502943018e1a6244beea1a2">m_CustomAllocator</a>);</div>
<div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160; }</div>
<div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160; <span class="keywordflow">return</span> std::make_unique&lt;GpuFsaMemoryManager&gt;(std::make_unique&lt;arm_compute::CLBufferAllocator&gt;());</div>
<div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160;}</div>
<div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160; </div>
<div class="line"><a name="l00084"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a8e19e275c8162e34e6d8d10a9245dbc9"> 84</a></span>&#160;<a class="code" href="classarmnn_1_1_i_backend_internal.html#a72ca1cf423bda4b0a9ffb789627126de">IBackendInternal::IWorkloadFactoryPtr</a> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a8e19e275c8162e34e6d8d10a9245dbc9">GpuFsaBackend::CreateWorkloadFactory</a>(</div>
<div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160; <span class="keyword">const</span> <a class="code" href="classarmnn_1_1_i_backend_internal.html#a693b40e6b94e958836aeb0410ca186bd">IBackendInternal::IMemoryManagerSharedPtr</a>&amp; memoryManager)<span class="keyword"> const</span></div>
<div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160; <span class="keywordflow">return</span> std::make_unique&lt;GpuFsaWorkloadFactory&gt;(PolymorphicPointerDowncast&lt;GpuFsaMemoryManager&gt;(memoryManager));</div>
<div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160;}</div>
<div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160; </div>
<div class="line"><a name="l00090"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#aa2d432e503780e146c08729323da519a"> 90</a></span>&#160;<a class="code" href="classarmnn_1_1_i_backend_internal.html#a72ca1cf423bda4b0a9ffb789627126de">IBackendInternal::IWorkloadFactoryPtr</a> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a8e19e275c8162e34e6d8d10a9245dbc9">GpuFsaBackend::CreateWorkloadFactory</a>(</div>
<div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; <a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html">TensorHandleFactoryRegistry</a>&amp; registry)<span class="keyword"> const</span></div>
<div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160; std::shared_ptr&lt;GpuFsaMemoryManager&gt; memoryManager;</div>
<div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160; <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#adda2579a4649278c81f18a1d3915b3a0">m_UsingCustomAllocator</a>)</div>
<div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160; {</div>
<div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160; memoryManager = std::make_shared&lt;GpuFsaMemoryManager&gt;(<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#afe31515f8502943018e1a6244beea1a2">m_CustomAllocator</a>);</div>
<div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160; }</div>
<div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160; {</div>
<div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160; memoryManager = std::make_shared&lt;GpuFsaMemoryManager&gt;(std::make_unique&lt;arm_compute::CLBufferAllocator&gt;());</div>
<div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160; }</div>
<div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160; </div>
<div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; std::unique_ptr&lt;ITensorHandleFactory&gt; factory = std::make_unique&lt;GpuFsaTensorHandleFactory&gt;(memoryManager);</div>
<div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160; </div>
<div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160; registry.<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html#a958ab0c60b6bfdfba5cc075211edec37">RegisterMemoryManager</a>(memoryManager);</div>
<div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160; registry.<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html#a05f82bd846630bb3aa8afe22ef6f15fc">RegisterFactory</a>(std::move(factory));</div>
<div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160; </div>
<div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160; <span class="keywordflow">return</span> std::make_unique&lt;GpuFsaWorkloadFactory&gt;(PolymorphicPointerDowncast&lt;GpuFsaMemoryManager&gt;(memoryManager));</div>
<div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160;}</div>
<div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160; </div>
<div class="line"><a name="l00111"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a5362d5d9510627da3c9d5db2e669d64a"> 111</a></span>&#160;<a class="code" href="classarmnn_1_1_i_backend_internal.html#a72ca1cf423bda4b0a9ffb789627126de">IBackendInternal::IWorkloadFactoryPtr</a> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a8e19e275c8162e34e6d8d10a9245dbc9">GpuFsaBackend::CreateWorkloadFactory</a>(</div>
<div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160; <a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html">TensorHandleFactoryRegistry</a>&amp; registry,</div>
<div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160; <span class="keyword">const</span> <a class="code" href="namespacearmnn.html#a5b6893cda5b69359a4244c06054da18f">ModelOptions</a>&amp;,</div>
<div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160; <a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a> inputFlags,</div>
<div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160; <a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a> outputFlags)<span class="keyword"> const</span></div>
<div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160; </div>
<div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160; <span class="comment">// To allow force import if inputFlags/outputFlags are Undefined, set it as Malloc</span></div>
<div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160; <span class="keywordflow">if</span> (inputFlags == <span class="keyword">static_cast&lt;</span><a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a><span class="keyword">&gt;</span>(<a class="code" href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277aec0fc0100c4fc1ce4eea230c3dc10360">MemorySource::Undefined</a>))</div>
<div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; {</div>
<div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160; inputFlags = <span class="keyword">static_cast&lt;</span><a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a><span class="keyword">&gt;</span>(<a class="code" href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">MemorySource::Malloc</a>);</div>
<div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160; }</div>
<div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160; <span class="keywordflow">if</span> (outputFlags == <span class="keyword">static_cast&lt;</span><a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a><span class="keyword">&gt;</span>(<a class="code" href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277aec0fc0100c4fc1ce4eea230c3dc10360">MemorySource::Undefined</a>))</div>
<div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160; {</div>
<div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160; outputFlags = <span class="keyword">static_cast&lt;</span><a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a><span class="keyword">&gt;</span>(<a class="code" href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">MemorySource::Malloc</a>);</div>
<div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160; }</div>
<div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160; </div>
<div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160; std::shared_ptr&lt;GpuFsaMemoryManager&gt; memoryManager;</div>
<div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160; <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#adda2579a4649278c81f18a1d3915b3a0">m_UsingCustomAllocator</a>)</div>
<div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160; {</div>
<div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160; memoryManager = std::make_shared&lt;GpuFsaMemoryManager&gt;(<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#afe31515f8502943018e1a6244beea1a2">m_CustomAllocator</a>);</div>
<div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160; }</div>
<div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160; {</div>
<div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160; memoryManager = std::make_shared&lt;GpuFsaMemoryManager&gt;(std::make_unique&lt;arm_compute::CLBufferAllocator&gt;());</div>
<div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160; }</div>
<div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160; </div>
<div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160; std::unique_ptr&lt;ITensorHandleFactory&gt; factory = std::make_unique&lt;GpuFsaTensorHandleFactory&gt;(memoryManager);</div>
<div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160; </div>
<div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; registry.<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html#a958ab0c60b6bfdfba5cc075211edec37">RegisterMemoryManager</a>(memoryManager);</div>
<div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; registry.<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html#a05f82bd846630bb3aa8afe22ef6f15fc">RegisterFactory</a>(std::move(factory));</div>
<div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; </div>
<div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160; <span class="keywordflow">return</span> std::make_unique&lt;GpuFsaWorkloadFactory&gt;(PolymorphicPointerDowncast&lt;GpuFsaMemoryManager&gt;(memoryManager));</div>
<div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160;}</div>
<div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160; </div>
<div class="line"><a name="l00146"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a63559c7f206c265f5fff5ffcc8a58e3e"> 146</a></span>&#160;std::vector&lt;ITensorHandleFactory::FactoryId&gt; <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a63559c7f206c265f5fff5ffcc8a58e3e">GpuFsaBackend::GetHandleFactoryPreferences</a>()<span class="keyword"> const</span></div>
<div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; <span class="keywordflow">return</span> std::vector&lt;ITensorHandleFactory::FactoryId&gt; { <a class="code" href="classarmnn_1_1_gpu_fsa_tensor_handle_factory.html#acdecb5b442434112c2cc8fc48c0ea922">GpuFsaTensorHandleFactory::GetIdStatic</a>() };</div>
<div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160;}</div>
<div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; </div>
<div class="line"><a name="l00151"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a531177ce89c53c6af616175b0b05e5c0"> 151</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a531177ce89c53c6af616175b0b05e5c0">GpuFsaBackend::RegisterTensorHandleFactories</a>(<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html">TensorHandleFactoryRegistry</a>&amp; registry)</div>
<div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160;{</div>
<div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160; std::shared_ptr&lt;GpuFsaMemoryManager&gt; memoryManager;</div>
<div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#adda2579a4649278c81f18a1d3915b3a0">m_UsingCustomAllocator</a>)</div>
<div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160; {</div>
<div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160; memoryManager = std::make_shared&lt;GpuFsaMemoryManager&gt;(<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#afe31515f8502943018e1a6244beea1a2">m_CustomAllocator</a>);</div>
<div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160; }</div>
<div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; {</div>
<div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; memoryManager = std::make_shared&lt;GpuFsaMemoryManager&gt;(std::make_unique&lt;arm_compute::CLBufferAllocator&gt;());</div>
<div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160; }</div>
<div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160; </div>
<div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; std::unique_ptr&lt;ITensorHandleFactory&gt; factory = std::make_unique&lt;GpuFsaTensorHandleFactory&gt;(memoryManager);</div>
<div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; registry.<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html#a958ab0c60b6bfdfba5cc075211edec37">RegisterMemoryManager</a>(memoryManager);</div>
<div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; registry.<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html#a05f82bd846630bb3aa8afe22ef6f15fc">RegisterFactory</a>(std::move(factory));</div>
<div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; </div>
<div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160;}</div>
<div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160; </div>
<div class="line"><a name="l00169"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a5f7312e595427f00078975f71c61dd8f"> 169</a></span>&#160;<span class="keywordtype">void</span> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a531177ce89c53c6af616175b0b05e5c0">GpuFsaBackend::RegisterTensorHandleFactories</a>(<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html">TensorHandleFactoryRegistry</a>&amp; registry,</div>
<div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160; <a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a> inputFlags,</div>
<div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; <a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a> outputFlags)</div>
<div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160;{</div>
<div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; <span class="comment">// To allow force import if inputFlags/outputFlags are Undefined, set it as Malloc</span></div>
<div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160; <span class="keywordflow">if</span> (inputFlags == <span class="keyword">static_cast&lt;</span><a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a><span class="keyword">&gt;</span>(<a class="code" href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277aec0fc0100c4fc1ce4eea230c3dc10360">MemorySource::Undefined</a>))</div>
<div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; {</div>
<div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; inputFlags = <span class="keyword">static_cast&lt;</span><a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a><span class="keyword">&gt;</span>(<a class="code" href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">MemorySource::Malloc</a>);</div>
<div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160; }</div>
<div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; <span class="keywordflow">if</span> (outputFlags == <span class="keyword">static_cast&lt;</span><a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a><span class="keyword">&gt;</span>(<a class="code" href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277aec0fc0100c4fc1ce4eea230c3dc10360">MemorySource::Undefined</a>))</div>
<div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160; {</div>
<div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; outputFlags = <span class="keyword">static_cast&lt;</span><a class="code" href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">MemorySourceFlags</a><span class="keyword">&gt;</span>(<a class="code" href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">MemorySource::Malloc</a>);</div>
<div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; }</div>
<div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160; </div>
<div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160; std::shared_ptr&lt;GpuFsaMemoryManager&gt; memoryManager;</div>
<div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; <span class="keywordflow">if</span> (<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#adda2579a4649278c81f18a1d3915b3a0">m_UsingCustomAllocator</a>)</div>
<div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160; {</div>
<div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160; memoryManager = std::make_shared&lt;GpuFsaMemoryManager&gt;(<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#afe31515f8502943018e1a6244beea1a2">m_CustomAllocator</a>);</div>
<div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160; }</div>
<div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160; {</div>
<div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; memoryManager = std::make_shared&lt;GpuFsaMemoryManager&gt;(std::make_unique&lt;arm_compute::CLBufferAllocator&gt;());</div>
<div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; }</div>
<div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160; </div>
<div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; std::unique_ptr&lt;ITensorHandleFactory&gt; factory = std::make_unique&lt;GpuFsaTensorHandleFactory&gt;(memoryManager);</div>
<div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160; registry.<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html#a958ab0c60b6bfdfba5cc075211edec37">RegisterMemoryManager</a>(memoryManager);</div>
<div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160; registry.<a class="code" href="classarmnn_1_1_tensor_handle_factory_registry.html#a05f82bd846630bb3aa8afe22ef6f15fc">RegisterFactory</a>(std::move(factory));</div>
<div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160;}</div>
<div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160; </div>
<div class="line"><a name="l00198"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a801cf3170dc777aca3e6f926d1bd70a5"> 198</a></span>&#160;<a class="code" href="classarmnn_1_1_i_backend_internal.html#ada6d56575c0fe53cf23c7ae4610c6367">IBackendInternal::IBackendContextPtr</a> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a801cf3170dc777aca3e6f926d1bd70a5">GpuFsaBackend::CreateBackendContext</a>(<span class="keyword">const</span> <a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.html">IRuntime::CreationOptions</a>&amp; options)<span class="keyword"> const</span></div>
<div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classarmnn_1_1_i_backend_internal.html#ada6d56575c0fe53cf23c7ae4610c6367">IBackendContextPtr</a>{<span class="keyword">new</span> <a class="code" href="classarmnn_1_1_gpu_fsa_backend_context.html">GpuFsaBackendContext</a>{options}};</div>
<div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160;}</div>
<div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160; </div>
<div class="line"><a name="l00203"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a68c2ef244261cc9649799284774af132"> 203</a></span>&#160;<a class="code" href="classarmnn_1_1_i_backend_internal.html#ae44a82b0e485e551a0f77150b1076e06">IBackendInternal::IBackendProfilingContextPtr</a> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a68c2ef244261cc9649799284774af132">GpuFsaBackend::CreateBackendProfilingContext</a>(</div>
<div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160; <span class="keyword">const</span> <a class="code" href="structarmnn_1_1_i_runtime_1_1_creation_options.html">IRuntime::CreationOptions</a>&amp;, <a class="code" href="classarmnn_1_1_i_backend_internal.html#a335964abd41c91f7e6ef5c65865a7b98">IBackendProfilingPtr</a>&amp;)</div>
<div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160;{</div>
<div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160; <span class="keywordflow">return</span> <a class="code" href="classarmnn_1_1_i_backend_internal.html#ae44a82b0e485e551a0f77150b1076e06">IBackendProfilingContextPtr</a>{};</div>
<div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160;}</div>
<div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; </div>
<div class="line"><a name="l00209"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a93d4285a3ea5e4e3b35578484d889daa"> 209</a></span>&#160;<a class="code" href="classarmnn_1_1_i_backend_internal.html#a11fa919c11fe46aad613b2e960fcfe90">IBackendInternal::ILayerSupportSharedPtr</a> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a93d4285a3ea5e4e3b35578484d889daa">GpuFsaBackend::GetLayerSupport</a>()<span class="keyword"> const</span></div>
<div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160; <span class="keyword">static</span> <a class="code" href="classarmnn_1_1_i_backend_internal.html#a11fa919c11fe46aad613b2e960fcfe90">ILayerSupportSharedPtr</a> layerSupport{<span class="keyword">new</span> <a class="code" href="classarmnn_1_1_gpu_fsa_layer_support.html">GpuFsaLayerSupport</a>};</div>
<div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; <span class="keywordflow">return</span> layerSupport;</div>
<div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160;}</div>
<div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160; </div>
<div class="line"><a name="l00215"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#afb75bad43612f5314622c4fa8a16e63d"> 215</a></span>&#160;std::unique_ptr&lt;ICustomAllocator&gt; <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#afb75bad43612f5314622c4fa8a16e63d">GpuFsaBackend::GetDefaultAllocator</a>()<span class="keyword"> const</span></div>
<div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160; <span class="keywordflow">return</span> std::make_unique&lt;GpuFsaBackendDefaultAllocator&gt;();</div>
<div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160;}</div>
<div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160; </div>
<div class="line"><a name="l00220"></a><span class="lineno"><a class="line" href="classarmnn_1_1_gpu_fsa_backend.html#a339efc194a1738cd93fcdde4d948a5cd"> 220</a></span>&#160;<a class="code" href="classarmnn_1_1_optimization_views.html">OptimizationViews</a> <a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a339efc194a1738cd93fcdde4d948a5cd">GpuFsaBackend::OptimizeSubgraphView</a>(<span class="keyword">const</span> <a class="code" href="classarmnn_1_1_subgraph_view.html">SubgraphView</a>&amp; subgraph,</div>
<div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160; <span class="keyword">const</span> <a class="code" href="namespacearmnn.html#a5b6893cda5b69359a4244c06054da18f">ModelOptions</a>&amp; modelOptions)<span class="keyword"> const</span></div>
<div class="line"><a name="l00222"></a><span class="lineno"> 222</span>&#160;<span class="keyword"></span>{</div>
<div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160; <a class="code" href="classarmnn_1_1_optimization_views.html">OptimizationViews</a> optimizationViews(modelOptions);</div>
<div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160; </div>
<div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160; <span class="keyword">using namespace </span>arm_compute::experimental::dynamic_fusion;</div>
<div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160; </div>
<div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160; <span class="keyword">auto</span> it = subgraph.<a class="code" href="classarmnn_1_1_subgraph_view.html#afc40490fb4f488bd2a211e81c06a6971">end</a>();</div>
<div class="line"><a name="l00228"></a><span class="lineno"> 228</span>&#160; std::map&lt;LayerGuid, Layer*&gt; untouched;</div>
<div class="line"><a name="l00229"></a><span class="lineno"> 229</span>&#160; <span class="keywordflow">while</span> (it != subgraph.<a class="code" href="classarmnn_1_1_subgraph_view.html#ad2570202bb366163f8b4660bfe78c49d">begin</a>())</div>
<div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160; {</div>
<div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160; --it;</div>
<div class="line"><a name="l00232"></a><span class="lineno"> 232</span>&#160; <a class="code" href="classarmnn_1_1_layer.html">Layer</a>&amp; base = *(PolymorphicDowncast&lt;Layer*&gt;(*it));</div>
<div class="line"><a name="l00233"></a><span class="lineno"> 233</span>&#160; untouched.insert({base.<a class="code" href="classarmnn_1_1_layer.html#a8dc12f0ee5b232d397bd18ced1a72a64">GetGuid</a>(), &amp;base});</div>
<div class="line"><a name="l00234"></a><span class="lineno"> 234</span>&#160; }</div>
<div class="line"><a name="l00235"></a><span class="lineno"> 235</span>&#160; </div>
<div class="line"><a name="l00236"></a><span class="lineno"> 236</span>&#160; <a class="code" href="classarmnn_1_1_gpu_fsa_layer_support.html">GpuFsaLayerSupport</a> supportChecker;</div>
<div class="line"><a name="l00237"></a><span class="lineno"> 237</span>&#160; it = subgraph.<a class="code" href="classarmnn_1_1_subgraph_view.html#afc40490fb4f488bd2a211e81c06a6971">end</a>();</div>
<div class="line"><a name="l00238"></a><span class="lineno"> 238</span>&#160; arm_compute::CLCompileContext* compileCtx = &amp;(arm_compute::CLKernelLibrary::get().get_compile_context());</div>
<div class="line"><a name="l00239"></a><span class="lineno"> 239</span>&#160; </div>
<div class="line"><a name="l00240"></a><span class="lineno"> 240</span>&#160; <span class="comment">// Setup the GpuWokloadContext which will exist for the lifetime of the Graph. This contains the TensorInfos</span></div>
<div class="line"><a name="l00241"></a><span class="lineno"> 241</span>&#160; std::shared_ptr&lt;GpuWorkloadContext&gt; workloadContext = std::make_shared&lt;GpuWorkloadContext&gt;(compileCtx);</div>
<div class="line"><a name="l00242"></a><span class="lineno"> 242</span>&#160; <span class="keywordflow">while</span> (it != subgraph.<a class="code" href="classarmnn_1_1_subgraph_view.html#ad2570202bb366163f8b4660bfe78c49d">begin</a>())</div>
<div class="line"><a name="l00243"></a><span class="lineno"> 243</span>&#160; {</div>
<div class="line"><a name="l00244"></a><span class="lineno"> 244</span>&#160; --it;</div>
<div class="line"><a name="l00245"></a><span class="lineno"> 245</span>&#160; <a class="code" href="classarmnn_1_1_layer.html">Layer</a>&amp; base = *(PolymorphicDowncast&lt;Layer*&gt;(*it));</div>
<div class="line"><a name="l00246"></a><span class="lineno"> 246</span>&#160; <span class="comment">// Create a GpuFsaPreCompiledBlob, this contains all of the information needed to execute an operator</span></div>
<div class="line"><a name="l00247"></a><span class="lineno"> 247</span>&#160; <a class="code" href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html">GpuFsaPreCompiledBlob</a>* preCompiledBlobPtr = <span class="keyword">new</span> <a class="code" href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html">GpuFsaPreCompiledBlob</a>();</div>
<div class="line"><a name="l00248"></a><span class="lineno"> 248</span>&#160; preCompiledBlobPtr-&gt;<a class="code" href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#a255c9012137b149ffb46d83c23f2df43">workloadContext</a> = workloadContext;</div>
<div class="line"><a name="l00249"></a><span class="lineno"> 249</span>&#160; preCompiledBlobPtr-&gt;<a class="code" href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#a14f92a9f65e32c3da896e7b1d45abd02">sketch</a> = std::make_unique&lt;GpuWorkloadSketch&gt;(workloadContext.get());</div>
<div class="line"><a name="l00250"></a><span class="lineno"> 250</span>&#160; </div>
<div class="line"><a name="l00251"></a><span class="lineno"> 251</span>&#160; <span class="comment">// Configure and setup the sketch for each supported op. Their data will be wrapped into a PreCompiled layer</span></div>
<div class="line"><a name="l00252"></a><span class="lineno"> 252</span>&#160; <span class="keywordflow">switch</span> (base.<a class="code" href="classarmnn_1_1_layer.html#ad8e15c530c929ab823d89ae9fd2d3f11">GetType</a>())</div>
<div class="line"><a name="l00253"></a><span class="lineno"> 253</span>&#160; {</div>
<div class="line"><a name="l00254"></a><span class="lineno"> 254</span>&#160; <span class="keywordflow">case</span> (<a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa9a62e70841c4d06dd16306a85700d36">LayerType::Activation</a>):</div>
<div class="line"><a name="l00255"></a><span class="lineno"> 255</span>&#160; {</div>
<div class="line"><a name="l00256"></a><span class="lineno"> 256</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const ActivationDescriptor*&gt;(&amp;base.<a class="code" href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">GetParameters</a>());</div>
<div class="line"><a name="l00257"></a><span class="lineno"> 257</span>&#160; <span class="keyword">auto</span> input = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00258"></a><span class="lineno"> 258</span>&#160; <a class="code" href="namespacearmnn.html#a40f8d250a71183a6cd1b700b287fc32c">GpuFsaActivationCreateOp</a>(preCompiledBlobPtr, input, *desc);</div>
<div class="line"><a name="l00259"></a><span class="lineno"> 259</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00260"></a><span class="lineno"> 260</span>&#160; }</div>
<div class="line"><a name="l00261"></a><span class="lineno"> 261</span>&#160; <span class="keywordflow">case</span> (<a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a4cd9f3996d60790cd11c04f842ebc43c">LayerType::Cast</a>):</div>
<div class="line"><a name="l00262"></a><span class="lineno"> 262</span>&#160; {</div>
<div class="line"><a name="l00263"></a><span class="lineno"> 263</span>&#160; <span class="keyword">auto</span> input = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00264"></a><span class="lineno"> 264</span>&#160; <span class="keyword">auto</span> output = base.<a class="code" href="classarmnn_1_1_layer.html#a0e36688a43c35668d8db5257274c68fe">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00265"></a><span class="lineno"> 265</span>&#160; <a class="code" href="namespacearmnn.html#aa2889978c1d194097838a2a0e671da60">GpuFsaCastCreateOp</a>(preCompiledBlobPtr, input, output);</div>
<div class="line"><a name="l00266"></a><span class="lineno"> 266</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00267"></a><span class="lineno"> 267</span>&#160; }</div>
<div class="line"><a name="l00268"></a><span class="lineno"> 268</span>&#160; <span class="keywordflow">case</span> (<a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">LayerType::Convolution2d</a>):</div>
<div class="line"><a name="l00269"></a><span class="lineno"> 269</span>&#160; {</div>
<div class="line"><a name="l00270"></a><span class="lineno"> 270</span>&#160; <span class="keyword">auto</span> input = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00271"></a><span class="lineno"> 271</span>&#160; <span class="keyword">auto</span> weights = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(1).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00272"></a><span class="lineno"> 272</span>&#160; </div>
<div class="line"><a name="l00273"></a><span class="lineno"> 273</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const Convolution2dDescriptor*&gt;(&amp;base.<a class="code" href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">GetParameters</a>());</div>
<div class="line"><a name="l00274"></a><span class="lineno"> 274</span>&#160; <span class="keywordflow">if</span> (desc-&gt;m_BiasEnabled)</div>
<div class="line"><a name="l00275"></a><span class="lineno"> 275</span>&#160; {</div>
<div class="line"><a name="l00276"></a><span class="lineno"> 276</span>&#160; <span class="keyword">auto</span> bias = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(2).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00277"></a><span class="lineno"> 277</span>&#160; <a class="code" href="namespacearmnn.html#ab8a797269fd9db3b8832998f10ad9688">GpuFsaConvolution2dCreateOp</a>(preCompiledBlobPtr,</div>
<div class="line"><a name="l00278"></a><span class="lineno"> 278</span>&#160; input,</div>
<div class="line"><a name="l00279"></a><span class="lineno"> 279</span>&#160; *desc,</div>
<div class="line"><a name="l00280"></a><span class="lineno"> 280</span>&#160; weights,</div>
<div class="line"><a name="l00281"></a><span class="lineno"> 281</span>&#160; bias);</div>
<div class="line"><a name="l00282"></a><span class="lineno"> 282</span>&#160; }</div>
<div class="line"><a name="l00283"></a><span class="lineno"> 283</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00284"></a><span class="lineno"> 284</span>&#160; {</div>
<div class="line"><a name="l00285"></a><span class="lineno"> 285</span>&#160; <a class="code" href="namespacearmnn.html#ab8a797269fd9db3b8832998f10ad9688">GpuFsaConvolution2dCreateOp</a>(preCompiledBlobPtr,</div>
<div class="line"><a name="l00286"></a><span class="lineno"> 286</span>&#160; input,</div>
<div class="line"><a name="l00287"></a><span class="lineno"> 287</span>&#160; *desc,</div>
<div class="line"><a name="l00288"></a><span class="lineno"> 288</span>&#160; weights,</div>
<div class="line"><a name="l00289"></a><span class="lineno"> 289</span>&#160; <a class="code" href="structarmnn_1_1_empty_optional.html">EmptyOptional</a>());</div>
<div class="line"><a name="l00290"></a><span class="lineno"> 290</span>&#160; }</div>
<div class="line"><a name="l00291"></a><span class="lineno"> 291</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00292"></a><span class="lineno"> 292</span>&#160; }</div>
<div class="line"><a name="l00293"></a><span class="lineno"> 293</span>&#160; <span class="keywordflow">case</span> (<a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9882ff3cfed27d6161c20a305e7a3484">LayerType::BatchMatMul</a>):</div>
<div class="line"><a name="l00294"></a><span class="lineno"> 294</span>&#160; {</div>
<div class="line"><a name="l00295"></a><span class="lineno"> 295</span>&#160; <span class="keyword">auto</span> input0 = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00296"></a><span class="lineno"> 296</span>&#160; <span class="keyword">auto</span> input1 = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(1).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00297"></a><span class="lineno"> 297</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const BatchMatMulDescriptor*&gt;(&amp;base.<a class="code" href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">GetParameters</a>());</div>
<div class="line"><a name="l00298"></a><span class="lineno"> 298</span>&#160; <a class="code" href="namespacearmnn.html#a1334119d4ee3de23f273ee91aa256cbd">GpuFsaBatchMatMulCreateOp</a>(preCompiledBlobPtr, input0, input1, *desc);</div>
<div class="line"><a name="l00299"></a><span class="lineno"> 299</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00300"></a><span class="lineno"> 300</span>&#160; }</div>
<div class="line"><a name="l00301"></a><span class="lineno"> 301</span>&#160; <span class="keywordflow">case</span> (<a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">LayerType::DepthwiseConvolution2d</a>):</div>
<div class="line"><a name="l00302"></a><span class="lineno"> 302</span>&#160; {</div>
<div class="line"><a name="l00303"></a><span class="lineno"> 303</span>&#160; <span class="keyword">auto</span> input = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00304"></a><span class="lineno"> 304</span>&#160; <span class="keyword">auto</span> weights = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(1).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00305"></a><span class="lineno"> 305</span>&#160; </div>
<div class="line"><a name="l00306"></a><span class="lineno"> 306</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const DepthwiseConvolution2dDescriptor*&gt;(&amp;base.<a class="code" href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">GetParameters</a>());</div>
<div class="line"><a name="l00307"></a><span class="lineno"> 307</span>&#160; <span class="keywordflow">if</span> (desc-&gt;m_BiasEnabled)</div>
<div class="line"><a name="l00308"></a><span class="lineno"> 308</span>&#160; {</div>
<div class="line"><a name="l00309"></a><span class="lineno"> 309</span>&#160; <span class="keyword">auto</span> bias = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(2).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00310"></a><span class="lineno"> 310</span>&#160; <a class="code" href="namespacearmnn.html#a5bdf4240a1fbda27b5fc84baba721781">GpuFsaDepthwiseConvolution2dCreateOp</a>(preCompiledBlobPtr,</div>
<div class="line"><a name="l00311"></a><span class="lineno"> 311</span>&#160; input,</div>
<div class="line"><a name="l00312"></a><span class="lineno"> 312</span>&#160; *desc,</div>
<div class="line"><a name="l00313"></a><span class="lineno"> 313</span>&#160; weights,</div>
<div class="line"><a name="l00314"></a><span class="lineno"> 314</span>&#160; bias);</div>
<div class="line"><a name="l00315"></a><span class="lineno"> 315</span>&#160; }</div>
<div class="line"><a name="l00316"></a><span class="lineno"> 316</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00317"></a><span class="lineno"> 317</span>&#160; {</div>
<div class="line"><a name="l00318"></a><span class="lineno"> 318</span>&#160; <a class="code" href="namespacearmnn.html#a5bdf4240a1fbda27b5fc84baba721781">GpuFsaDepthwiseConvolution2dCreateOp</a>(preCompiledBlobPtr,</div>
<div class="line"><a name="l00319"></a><span class="lineno"> 319</span>&#160; input,</div>
<div class="line"><a name="l00320"></a><span class="lineno"> 320</span>&#160; *desc,</div>
<div class="line"><a name="l00321"></a><span class="lineno"> 321</span>&#160; weights,</div>
<div class="line"><a name="l00322"></a><span class="lineno"> 322</span>&#160; <a class="code" href="structarmnn_1_1_empty_optional.html">EmptyOptional</a>());</div>
<div class="line"><a name="l00323"></a><span class="lineno"> 323</span>&#160; }</div>
<div class="line"><a name="l00324"></a><span class="lineno"> 324</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00325"></a><span class="lineno"> 325</span>&#160; }</div>
<div class="line"><a name="l00326"></a><span class="lineno"> 326</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a699bcffd93aff3022014b9efc9eaefd1">LayerType::ElementwiseBinary</a>:</div>
<div class="line"><a name="l00327"></a><span class="lineno"> 327</span>&#160; {</div>
<div class="line"><a name="l00328"></a><span class="lineno"> 328</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const ElementwiseBinaryDescriptor *&gt;(&amp;base.<a class="code" href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">GetParameters</a>());</div>
<div class="line"><a name="l00329"></a><span class="lineno"> 329</span>&#160; <span class="keyword">auto</span> input0 = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00330"></a><span class="lineno"> 330</span>&#160; <span class="keyword">auto</span> input1 = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(1).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00331"></a><span class="lineno"> 331</span>&#160; <a class="code" href="namespacearmnn.html#ab9fda8f44398cd4c531c9ac136f2a1bf">GpuFsaElementwiseBinaryCreateOp</a>(preCompiledBlobPtr, input0, input1, *desc);</div>
<div class="line"><a name="l00332"></a><span class="lineno"> 332</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00333"></a><span class="lineno"> 333</span>&#160; }</div>
<div class="line"><a name="l00334"></a><span class="lineno"> 334</span>&#160; <span class="keywordflow">case</span> (<a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4ad662867a41bfb30b9f75dda2b5849001">LayerType::Pooling2d</a>):</div>
<div class="line"><a name="l00335"></a><span class="lineno"> 335</span>&#160; {</div>
<div class="line"><a name="l00336"></a><span class="lineno"> 336</span>&#160; <span class="keyword">auto</span> input = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00337"></a><span class="lineno"> 337</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const Pooling2dDescriptor*&gt;(&amp;base.<a class="code" href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">GetParameters</a>());</div>
<div class="line"><a name="l00338"></a><span class="lineno"> 338</span>&#160; <a class="code" href="namespacearmnn.html#abcce06cfe2568f392b05010579e83ae0">GpuFsaPooling2dCreateOp</a>(preCompiledBlobPtr, input, *desc);</div>
<div class="line"><a name="l00339"></a><span class="lineno"> 339</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00340"></a><span class="lineno"> 340</span>&#160; }</div>
<div class="line"><a name="l00341"></a><span class="lineno"> 341</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa7c59ccedc6a3bd90c17f3b990afefad">LayerType::Reshape</a>:</div>
<div class="line"><a name="l00342"></a><span class="lineno"> 342</span>&#160; {</div>
<div class="line"><a name="l00343"></a><span class="lineno"> 343</span>&#160; <span class="keyword">auto</span> input = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00344"></a><span class="lineno"> 344</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const ReshapeDescriptor*&gt;(&amp;base.<a class="code" href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">GetParameters</a>());</div>
<div class="line"><a name="l00345"></a><span class="lineno"> 345</span>&#160; <a class="code" href="namespacearmnn.html#a4cc1900a15abbcc19a632e99c7c2120f">GpuFsaReshapeCreateOp</a>(preCompiledBlobPtr, input, *desc);</div>
<div class="line"><a name="l00346"></a><span class="lineno"> 346</span>&#160; </div>
<div class="line"><a name="l00347"></a><span class="lineno"> 347</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00348"></a><span class="lineno"> 348</span>&#160; }</div>
<div class="line"><a name="l00349"></a><span class="lineno"> 349</span>&#160; <span class="keywordflow">case</span> (<a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9d723d04c40bfd81835c0766a698cf63">LayerType::Resize</a>):</div>
<div class="line"><a name="l00350"></a><span class="lineno"> 350</span>&#160; {</div>
<div class="line"><a name="l00351"></a><span class="lineno"> 351</span>&#160; <span class="keyword">auto</span> input = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00352"></a><span class="lineno"> 352</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const ResizeDescriptor*&gt;(&amp;base.<a class="code" href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">GetParameters</a>());</div>
<div class="line"><a name="l00353"></a><span class="lineno"> 353</span>&#160; <a class="code" href="namespacearmnn.html#a67faae0ae4e9d94069de01964d3fa0a0">GpuFsaResizeCreateOp</a>(preCompiledBlobPtr, input, *desc);</div>
<div class="line"><a name="l00354"></a><span class="lineno"> 354</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00355"></a><span class="lineno"> 355</span>&#160; }</div>
<div class="line"><a name="l00356"></a><span class="lineno"> 356</span>&#160; <span class="keywordflow">case</span> (<a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a31d953b9d49a6b4378f45097047976d0">LayerType::Softmax</a>):</div>
<div class="line"><a name="l00357"></a><span class="lineno"> 357</span>&#160; {</div>
<div class="line"><a name="l00358"></a><span class="lineno"> 358</span>&#160; <span class="keyword">auto</span> input = base.<a class="code" href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">GetInputSlot</a>(0).<a class="code" href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">GetConnectedOutputSlot</a>()-&gt;<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00359"></a><span class="lineno"> 359</span>&#160; <span class="keyword">auto</span> output = base.<a class="code" href="classarmnn_1_1_layer.html#a0e36688a43c35668d8db5257274c68fe">GetOutputSlot</a>(0).<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>();</div>
<div class="line"><a name="l00360"></a><span class="lineno"> 360</span>&#160; </div>
<div class="line"><a name="l00361"></a><span class="lineno"> 361</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const SoftmaxDescriptor*&gt;(&amp;base.<a class="code" href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">GetParameters</a>());</div>
<div class="line"><a name="l00362"></a><span class="lineno"> 362</span>&#160; <a class="code" href="namespacearmnn.html#af347e46317f7427d35f87c6fae065bcb">GpuFsaSoftmaxCreateOp</a>(preCompiledBlobPtr,</div>
<div class="line"><a name="l00363"></a><span class="lineno"> 363</span>&#160; input,</div>
<div class="line"><a name="l00364"></a><span class="lineno"> 364</span>&#160; output,</div>
<div class="line"><a name="l00365"></a><span class="lineno"> 365</span>&#160; *desc);</div>
<div class="line"><a name="l00366"></a><span class="lineno"> 366</span>&#160; <span class="keywordflow">break</span>;</div>
<div class="line"><a name="l00367"></a><span class="lineno"> 367</span>&#160; }</div>
<div class="line"><a name="l00368"></a><span class="lineno"> 368</span>&#160; <span class="keywordflow">default</span>:</div>
<div class="line"><a name="l00369"></a><span class="lineno"> 369</span>&#160; <span class="comment">// unsupported layer for GpuFsa backend</span></div>
<div class="line"><a name="l00370"></a><span class="lineno"> 370</span>&#160; <span class="keywordflow">continue</span>;</div>
<div class="line"><a name="l00371"></a><span class="lineno"> 371</span>&#160; }</div>
<div class="line"><a name="l00372"></a><span class="lineno"> 372</span>&#160; </div>
<div class="line"><a name="l00373"></a><span class="lineno"> 373</span>&#160; <span class="keyword">auto</span> compiledBlob =</div>
<div class="line"><a name="l00374"></a><span class="lineno"> 374</span>&#160; std::make_unique&lt;PreCompiledObjectPtr&gt;(preCompiledBlobPtr, DeleteAsType&lt;GpuFsaPreCompiledBlob&gt;);</div>
<div class="line"><a name="l00375"></a><span class="lineno"> 375</span>&#160; </div>
<div class="line"><a name="l00376"></a><span class="lineno"> 376</span>&#160; <a class="code" href="classarmnn_1_1_i_connectable_layer.html">IConnectableLayer</a>* preCompiledLayer = optimizationViews.<a class="code" href="classarmnn_1_1_optimization_views.html#ad04187fe81f68558b15b6049b2da9cf9">GetINetwork</a>()-&gt;<a class="code" href="classarmnn_1_1_i_network.html#aee3a15d2fa419f50a8ac45e6d3c11e16">AddPrecompiledLayer</a>(</div>
<div class="line"><a name="l00377"></a><span class="lineno"> 377</span>&#160; <a class="code" href="structarmnn_1_1_pre_compiled_descriptor.html">PreCompiledDescriptor</a>(base.<a class="code" href="classarmnn_1_1_layer.html#abc0660dc440c8a285b456c9ef6383c26">GetNumInputSlots</a>(), base.<a class="code" href="classarmnn_1_1_layer.html#a1594bddc87d6477df300317658f566bb">GetNumOutputSlots</a>()),</div>
<div class="line"><a name="l00378"></a><span class="lineno"> 378</span>&#160; std::move(*compiledBlob),</div>
<div class="line"><a name="l00379"></a><span class="lineno"> 379</span>&#160; <a class="code" href="classarmnn_1_1_optional.html">armnn::Optional&lt;BackendId&gt;</a>(<a class="code" href="classarmnn_1_1_gpu_fsa_backend.html#a6c6c3d137a7792e264a89cc40ea94bb0">GetId</a>()),</div>
<div class="line"><a name="l00380"></a><span class="lineno"> 380</span>&#160; <span class="stringliteral">&quot;GpuFsa_Pre_Compiled_Layer&quot;</span>);</div>
<div class="line"><a name="l00381"></a><span class="lineno"> 381</span>&#160; </div>
<div class="line"><a name="l00382"></a><span class="lineno"> 382</span>&#160; <span class="comment">// Copy the output tensor infos from sub-graph</span></div>
<div class="line"><a name="l00383"></a><span class="lineno"> 383</span>&#160; <span class="keywordflow">for</span> (<span class="keywordtype">unsigned</span> <span class="keywordtype">int</span> i = 0; i &lt; subgraph.<a class="code" href="classarmnn_1_1_subgraph_view.html#a59a4100374e80a3504f4bb4d13695d0b">GetNumOutputSlots</a>(); i++)</div>
<div class="line"><a name="l00384"></a><span class="lineno"> 384</span>&#160; {</div>
<div class="line"><a name="l00385"></a><span class="lineno"> 385</span>&#160; preCompiledLayer-&gt;<a class="code" href="classarmnn_1_1_i_connectable_layer.html#a80ac4eda2e7f2757ec9dd96fc96dbd16">GetOutputSlot</a>(i).<a class="code" href="classarmnn_1_1_i_output_slot.html#a5ee4a6c9a2481245487b1b1a70d20fd0">SetTensorInfo</a>(base.<a class="code" href="classarmnn_1_1_layer.html#a0e36688a43c35668d8db5257274c68fe">GetOutputSlot</a>(i).<a class="code" href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">GetTensorInfo</a>());</div>
<div class="line"><a name="l00386"></a><span class="lineno"> 386</span>&#160; }</div>
<div class="line"><a name="l00387"></a><span class="lineno"> 387</span>&#160; </div>
<div class="line"><a name="l00388"></a><span class="lineno"> 388</span>&#160; <a class="code" href="classarmnn_1_1_subgraph_view.html#ad5fc1b5213dcb72c0d4ac9dfb46ef677">SubgraphView::SubgraphViewPtr</a> substituteSubgraph =</div>
<div class="line"><a name="l00389"></a><span class="lineno"> 389</span>&#160; <a class="code" href="namespacearmnn.html#a5ce134e586578a4c448a128f95b4d7af">CreateSubgraphViewFrom</a>(<a class="code" href="namespacearmnn.html#a3f6dfcc7396ea476f96658ae6cb02b54">CreateInputsFrom</a>(&amp;base),</div>
<div class="line"><a name="l00390"></a><span class="lineno"> 390</span>&#160; <a class="code" href="namespacearmnn.html#a047e95685b63fedaa1d2ebb3b9428ff5">CreateOutputsFrom</a>(&amp;base),</div>
<div class="line"><a name="l00391"></a><span class="lineno"> 391</span>&#160; {&amp;base});</div>
<div class="line"><a name="l00392"></a><span class="lineno"> 392</span>&#160; </div>
<div class="line"><a name="l00393"></a><span class="lineno"> 393</span>&#160; optimizationViews.<a class="code" href="classarmnn_1_1_optimization_views.html#a57340147dcb1c0ae05fdf14ff7af8a01">AddSubstitution</a>({ std::move(*substituteSubgraph), <a class="code" href="classarmnn_1_1_subgraph_view.html">SubgraphView</a>(preCompiledLayer) });</div>
<div class="line"><a name="l00394"></a><span class="lineno"> 394</span>&#160; </div>
<div class="line"><a name="l00395"></a><span class="lineno"> 395</span>&#160; untouched.erase(base.<a class="code" href="classarmnn_1_1_layer.html#a8dc12f0ee5b232d397bd18ced1a72a64">GetGuid</a>());</div>
<div class="line"><a name="l00396"></a><span class="lineno"> 396</span>&#160; }</div>
<div class="line"><a name="l00397"></a><span class="lineno"> 397</span>&#160; </div>
<div class="line"><a name="l00398"></a><span class="lineno"> 398</span>&#160; <span class="keywordflow">if</span> (optimizationViews.<a class="code" href="classarmnn_1_1_optimization_views.html#a9a1555f25af4a0ae2c0a1fc0ed9aded8">GetSubstitutions</a>().empty())</div>
<div class="line"><a name="l00399"></a><span class="lineno"> 399</span>&#160; {</div>
<div class="line"><a name="l00400"></a><span class="lineno"> 400</span>&#160; optimizationViews.<a class="code" href="classarmnn_1_1_optimization_views.html#a28e41bdd6b719a3d60a1a0de2e1ebc95">AddUntouchedSubgraph</a>(<a class="code" href="classarmnn_1_1_subgraph_view.html">SubgraphView</a>(subgraph));</div>
<div class="line"><a name="l00401"></a><span class="lineno"> 401</span>&#160; }</div>
<div class="line"><a name="l00402"></a><span class="lineno"> 402</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00403"></a><span class="lineno"> 403</span>&#160; {</div>
<div class="line"><a name="l00404"></a><span class="lineno"> 404</span>&#160; <a class="code" href="namespacearmnn.html#a5afd10e5e84ebf5cb8cfc8707492eda7">ReportUntouchedLayers</a>(optimizationViews, untouched);</div>
<div class="line"><a name="l00405"></a><span class="lineno"> 405</span>&#160; }</div>
<div class="line"><a name="l00406"></a><span class="lineno"> 406</span>&#160; </div>
<div class="line"><a name="l00407"></a><span class="lineno"> 407</span>&#160; </div>
<div class="line"><a name="l00408"></a><span class="lineno"> 408</span>&#160; <span class="keywordflow">return</span> optimizationViews;</div>
<div class="line"><a name="l00409"></a><span class="lineno"> 409</span>&#160;}</div>
<div class="line"><a name="l00410"></a><span class="lineno"> 410</span>&#160; </div>
<div class="line"><a name="l00411"></a><span class="lineno"> 411</span>&#160;} <span class="comment">// namespace armnn</span></div>
</div><!-- fragment --></div><!-- contents -->
</div><!-- doc-content -->
<div class="ttc" id="anamespacearmnn_html_a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523"><div class="ttname"><a href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277a1131a914388fac73e5f07b0ba0aad523">armnn::MemorySource::Malloc</a></div><div class="ttdeci">@ Malloc</div></div>
<div class="ttc" id="aclassarmnn_1_1_optimization_views_html_a28e41bdd6b719a3d60a1a0de2e1ebc95"><div class="ttname"><a href="classarmnn_1_1_optimization_views.html#a28e41bdd6b719a3d60a1a0de2e1ebc95">armnn::OptimizationViews::AddUntouchedSubgraph</a></div><div class="ttdeci">void AddUntouchedSubgraph(SubgraphView &amp;&amp;subgraph)</div><div class="ttdef"><b>Definition:</b> <a href="_optimization_views_8hpp_source.html#l00048">OptimizationViews.hpp:48</a></div></div>
<div class="ttc" id="anamespacearmnn_html_af347e46317f7427d35f87c6fae065bcb"><div class="ttname"><a href="namespacearmnn.html#af347e46317f7427d35f87c6fae065bcb">armnn::GpuFsaSoftmaxCreateOp</a></div><div class="ttdeci">void GpuFsaSoftmaxCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input, const TensorInfo &amp;output, const SoftmaxDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_softmax_8cpp_source.html#l00063">GpuFsaSoftmax.cpp:63</a></div></div>
<div class="ttc" id="anamespacearmnn_html_ab9fda8f44398cd4c531c9ac136f2a1bf"><div class="ttname"><a href="namespacearmnn.html#ab9fda8f44398cd4c531c9ac136f2a1bf">armnn::GpuFsaElementwiseBinaryCreateOp</a></div><div class="ttdeci">void GpuFsaElementwiseBinaryCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input0, const TensorInfo &amp;input1, const ElementwiseBinaryDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_elementwise_binary_8cpp_source.html#l00063">GpuFsaElementwiseBinary.cpp:63</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_optional_html"><div class="ttname"><a href="classarmnn_1_1_optional.html">armnn::Optional</a></div><div class="ttdef"><b>Definition:</b> <a href="_optional_8hpp_source.html#l00270">Optional.hpp:270</a></div></div>
<div class="ttc" id="anamespacearmnn_html_abcce06cfe2568f392b05010579e83ae0"><div class="ttname"><a href="namespacearmnn.html#abcce06cfe2568f392b05010579e83ae0">armnn::GpuFsaPooling2dCreateOp</a></div><div class="ttdeci">void GpuFsaPooling2dCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input, const Pooling2dDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_pooling2d_8cpp_source.html#l00040">GpuFsaPooling2d.cpp:40</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_layer_support_html"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_layer_support.html">armnn::GpuFsaLayerSupport</a></div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_layer_support_8hpp_source.html#l00013">GpuFsaLayerSupport.hpp:13</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_output_slot_html_ada2ad7d1caeeb4ef6195c8925fad6a65"><div class="ttname"><a href="classarmnn_1_1_output_slot.html#ada2ad7d1caeeb4ef6195c8925fad6a65">armnn::OutputSlot::GetTensorInfo</a></div><div class="ttdeci">const TensorInfo &amp; GetTensorInfo() const override</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8cpp_source.html#l00100">Layer.cpp:100</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a531177ce89c53c6af616175b0b05e5c0"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a531177ce89c53c6af616175b0b05e5c0">armnn::GpuFsaBackend::RegisterTensorHandleFactories</a></div><div class="ttdeci">void RegisterTensorHandleFactories(TensorHandleFactoryRegistry &amp;registry) override</div><div class="ttdoc">(Optional) Register TensorHandleFactories Either this method or CreateMemoryManager() and IWorkloadFa...</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00151">GpuFsaBackend.cpp:151</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a177af502214bbc8123fbb4a3c4f0a1b8"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a177af502214bbc8123fbb4a3c4f0a1b8">armnn::GpuFsaBackend::GetIdStatic</a></div><div class="ttdeci">static const BackendId &amp; GetIdStatic()</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00069">GpuFsaBackend.cpp:69</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a1334119d4ee3de23f273ee91aa256cbd"><div class="ttname"><a href="namespacearmnn.html#a1334119d4ee3de23f273ee91aa256cbd">armnn::GpuFsaBatchMatMulCreateOp</a></div><div class="ttdeci">void GpuFsaBatchMatMulCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input0, const TensorInfo &amp;input1, const BatchMatMulDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_batch_mat_mul_8cpp_source.html#l00051">GpuFsaBatchMatMul.cpp:51</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_adda2579a4649278c81f18a1d3915b3a0"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#adda2579a4649278c81f18a1d3915b3a0">armnn::GpuFsaBackend::m_UsingCustomAllocator</a></div><div class="ttdeci">bool m_UsingCustomAllocator</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00304">GpuFsaBackend.hpp:304</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_i_backend_internal_html_a693b40e6b94e958836aeb0410ca186bd"><div class="ttname"><a href="classarmnn_1_1_i_backend_internal.html#a693b40e6b94e958836aeb0410ca186bd">armnn::IBackendInternal::IMemoryManagerSharedPtr</a></div><div class="ttdeci">std::shared_ptr&lt; IMemoryManager &gt; IMemoryManagerSharedPtr</div><div class="ttdef"><b>Definition:</b> <a href="_i_backend_internal_8hpp_source.html#l00099">IBackendInternal.hpp:99</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_tensor_handle_factory_registry_html"><div class="ttname"><a href="classarmnn_1_1_tensor_handle_factory_registry.html">armnn::TensorHandleFactoryRegistry</a></div><div class="ttdef"><b>Definition:</b> <a href="_tensor_handle_factory_registry_8hpp_source.html#l00023">TensorHandleFactoryRegistry.hpp:23</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a93d4285a3ea5e4e3b35578484d889daa"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a93d4285a3ea5e4e3b35578484d889daa">armnn::GpuFsaBackend::GetLayerSupport</a></div><div class="ttdeci">IBackendInternal::ILayerSupportSharedPtr GetLayerSupport() const override</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00209">GpuFsaBackend.cpp:209</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_subgraph_view_html_a74798938fdaeae75c8adfa4a7439e7f9"><div class="ttname"><a href="classarmnn_1_1_subgraph_view.html#a74798938fdaeae75c8adfa4a7439e7f9">armnn::SubgraphView::Layers</a></div><div class="ttdeci">std::list&lt; Layer * &gt; Layers</div><div class="ttdef"><b>Definition:</b> <a href="_subgraph_view_8hpp_source.html#l00061">SubgraphView.hpp:61</a></div></div>
<div class="ttc" id="a_gpu_fsa_elementwise_binary_8hpp_html"><div class="ttname"><a href="_gpu_fsa_elementwise_binary_8hpp.html">GpuFsaElementwiseBinary.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_tensor_handle_factory_html_acdecb5b442434112c2cc8fc48c0ea922"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_tensor_handle_factory.html#acdecb5b442434112c2cc8fc48c0ea922">armnn::GpuFsaTensorHandleFactory::GetIdStatic</a></div><div class="ttdeci">static const FactoryId &amp; GetIdStatic()</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_tensor_handle_factory_8cpp_source.html#l00086">GpuFsaTensorHandleFactory.cpp:86</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a40f8d250a71183a6cd1b700b287fc32c"><div class="ttname"><a href="namespacearmnn.html#a40f8d250a71183a6cd1b700b287fc32c">armnn::GpuFsaActivationCreateOp</a></div><div class="ttdeci">void GpuFsaActivationCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input, const ActivationDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_activation_8cpp_source.html#l00058">GpuFsaActivation.cpp:58</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a5b05f3b7208ec7cea3338e30057c0bac"><div class="ttname"><a href="namespacearmnn.html#a5b05f3b7208ec7cea3338e30057c0bac">armnn::MemorySourceFlags</a></div><div class="ttdeci">unsigned int MemorySourceFlags</div><div class="ttdef"><b>Definition:</b> <a href="_memory_sources_8hpp_source.html#l00015">MemorySources.hpp:15</a></div></div>
<div class="ttc" id="a_gpu_fsa_backend_default_allocator_8hpp_html"><div class="ttname"><a href="_gpu_fsa_backend_default_allocator_8hpp.html">GpuFsaBackendDefaultAllocator.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_a0e36688a43c35668d8db5257274c68fe"><div class="ttname"><a href="classarmnn_1_1_layer.html#a0e36688a43c35668d8db5257274c68fe">armnn::Layer::GetOutputSlot</a></div><div class="ttdeci">const OutputSlot &amp; GetOutputSlot(unsigned int index=0) const override</div><div class="ttdoc">Get the const output slot handle by slot index.</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00339">Layer.hpp:339</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_tensor_handle_factory_registry_html_a958ab0c60b6bfdfba5cc075211edec37"><div class="ttname"><a href="classarmnn_1_1_tensor_handle_factory_registry.html#a958ab0c60b6bfdfba5cc075211edec37">armnn::TensorHandleFactoryRegistry::RegisterMemoryManager</a></div><div class="ttdeci">void RegisterMemoryManager(std::shared_ptr&lt; IMemoryManager &gt; memoryManger)</div><div class="ttdoc">Register a memory manager with shared ownership.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_handle_factory_registry_8cpp_source.html#l00034">TensorHandleFactoryRegistry.cpp:34</a></div></div>
<div class="ttc" id="anamespacearmnn_html_ab912656f08118f8b81f8261b5e0dc3a9"><div class="ttname"><a href="namespacearmnn.html#ab912656f08118f8b81f8261b5e0dc3a9">armnn::GpuFsaBackendId</a></div><div class="ttdeci">constexpr const char * GpuFsaBackendId()</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_id_8hpp_source.html#l00010">GpuFsaBackendId.hpp:10</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a339efc194a1738cd93fcdde4d948a5cd"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a339efc194a1738cd93fcdde4d948a5cd">armnn::GpuFsaBackend::OptimizeSubgraphView</a></div><div class="ttdeci">OptimizationViews OptimizeSubgraphView(const SubgraphView &amp;subgraph, const ModelOptions &amp;modelOptions) const override</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00220">GpuFsaBackend.cpp:220</a></div></div>
<div class="ttc" id="a_gpu_fsa_backend_context_8hpp_html"><div class="ttname"><a href="_gpu_fsa_backend_context_8hpp.html">GpuFsaBackendContext.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_i_backend_internal_html_ada6d56575c0fe53cf23c7ae4610c6367"><div class="ttname"><a href="classarmnn_1_1_i_backend_internal.html#ada6d56575c0fe53cf23c7ae4610c6367">armnn::IBackendInternal::IBackendContextPtr</a></div><div class="ttdeci">std::unique_ptr&lt; IBackendContext &gt; IBackendContextPtr</div><div class="ttdef"><b>Definition:</b> <a href="_i_backend_internal_8hpp_source.html#l00090">IBackendInternal.hpp:90</a></div></div>
<div class="ttc" id="a_optimizer_8hpp_html"><div class="ttname"><a href="_optimizer_8hpp.html">Optimizer.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_afb75bad43612f5314622c4fa8a16e63d"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#afb75bad43612f5314622c4fa8a16e63d">armnn::GpuFsaBackend::GetDefaultAllocator</a></div><div class="ttdeci">std::unique_ptr&lt; ICustomAllocator &gt; GetDefaultAllocator() const override</div><div class="ttdoc">Returns the default memory allocator for the backend.</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00215">GpuFsaBackend.cpp:215</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_acf8b8e23bf647836592982f97088d375"><div class="ttname"><a href="classarmnn_1_1_layer.html#acf8b8e23bf647836592982f97088d375">armnn::Layer::GetInputSlot</a></div><div class="ttdeci">const InputSlot &amp; GetInputSlot(unsigned int index) const override</div><div class="ttdoc">Get a const input slot handle by slot index.</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00337">Layer.hpp:337</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a699bcffd93aff3022014b9efc9eaefd1"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a699bcffd93aff3022014b9efc9eaefd1">armnn::LayerType::ElementwiseBinary</a></div><div class="ttdeci">@ ElementwiseBinary</div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a63559c7f206c265f5fff5ffcc8a58e3e"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a63559c7f206c265f5fff5ffcc8a58e3e">armnn::GpuFsaBackend::GetHandleFactoryPreferences</a></div><div class="ttdeci">std::vector&lt; ITensorHandleFactory::FactoryId &gt; GetHandleFactoryPreferences() const override</div><div class="ttdoc">(Optional) Returns a vector of supported TensorHandleFactory ids in preference order.</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00146">GpuFsaBackend.cpp:146</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_subgraph_view_html_a5cc65e15002dbc33a5c8a7d6680e9a9d"><div class="ttname"><a href="classarmnn_1_1_subgraph_view.html#a5cc65e15002dbc33a5c8a7d6680e9a9d">armnn::SubgraphView::InputSlots</a></div><div class="ttdeci">std::vector&lt; InputSlot * &gt; InputSlots</div><div class="ttdef"><b>Definition:</b> <a href="_subgraph_view_8hpp_source.html#l00057">SubgraphView.hpp:57</a></div></div>
<div class="ttc" id="a_gpu_fsa_workload_factory_8hpp_html"><div class="ttname"><a href="_gpu_fsa_workload_factory_8hpp.html">GpuFsaWorkloadFactory.hpp</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a4cc1900a15abbcc19a632e99c7c2120f"><div class="ttname"><a href="namespacearmnn.html#a4cc1900a15abbcc19a632e99c7c2120f">armnn::GpuFsaReshapeCreateOp</a></div><div class="ttdeci">void GpuFsaReshapeCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input, const ReshapeDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_reshape_8cpp_source.html#l00049">GpuFsaReshape.cpp:49</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html"><div class="ttname"><a href="classarmnn_1_1_layer.html">armnn::Layer</a></div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00230">Layer.hpp:230</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a93fcb3bff141d8c77b53466a44b58eee"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a93fcb3bff141d8c77b53466a44b58eee">armnn::GpuFsaBackend::CreateMemoryManager</a></div><div class="ttdeci">IBackendInternal::IMemoryManagerUniquePtr CreateMemoryManager() const override</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00075">GpuFsaBackend.cpp:75</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a801cf3170dc777aca3e6f926d1bd70a5"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a801cf3170dc777aca3e6f926d1bd70a5">armnn::GpuFsaBackend::CreateBackendContext</a></div><div class="ttdeci">IBackendInternal::IBackendContextPtr CreateBackendContext(const IRuntime::CreationOptions &amp;) const override</div><div class="ttdoc">Create the runtime context of the backend.</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00198">GpuFsaBackend.cpp:198</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_subgraph_view_html_ad2570202bb366163f8b4660bfe78c49d"><div class="ttname"><a href="classarmnn_1_1_subgraph_view.html#ad2570202bb366163f8b4660bfe78c49d">armnn::SubgraphView::begin</a></div><div class="ttdeci">IConnectableLayerIterator begin()</div><div class="ttdef"><b>Definition:</b> <a href="_subgraph_view_8cpp_source.html#l00286">SubgraphView.cpp:286</a></div></div>
<div class="ttc" id="a_gpu_fsa_softmax_8hpp_html"><div class="ttname"><a href="_gpu_fsa_softmax_8hpp.html">GpuFsaSoftmax.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_i_network_html_aee3a15d2fa419f50a8ac45e6d3c11e16"><div class="ttname"><a href="classarmnn_1_1_i_network.html#aee3a15d2fa419f50a8ac45e6d3c11e16">armnn::INetwork::AddPrecompiledLayer</a></div><div class="ttdeci">IConnectableLayer * AddPrecompiledLayer(const PreCompiledDescriptor &amp;preCompiledDescriptor, CompiledBlobPtr compiledBlobPtr, const Optional&lt; BackendId &gt; &amp;backend, const char *name=nullptr)</div><div class="ttdoc">Adds a Precompiled layer to the network.</div><div class="ttdef"><b>Definition:</b> <a href="_network_8cpp_source.html#l00368">Network.cpp:368</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_i_output_slot_html_a5ee4a6c9a2481245487b1b1a70d20fd0"><div class="ttname"><a href="classarmnn_1_1_i_output_slot.html#a5ee4a6c9a2481245487b1b1a70d20fd0">armnn::IOutputSlot::SetTensorInfo</a></div><div class="ttdeci">virtual void SetTensorInfo(const TensorInfo &amp;tensorInfo)=0</div></div>
<div class="ttc" id="anamespacearmnn_html_a14fcd7f88d11cea0a018269dca5f9277aec0fc0100c4fc1ce4eea230c3dc10360"><div class="ttname"><a href="namespacearmnn.html#a14fcd7f88d11cea0a018269dca5f9277aec0fc0100c4fc1ce4eea230c3dc10360">armnn::MemorySource::Undefined</a></div><div class="ttdeci">@ Undefined</div></div>
<div class="ttc" id="a_i_backend_context_8hpp_html"><div class="ttname"><a href="_i_backend_context_8hpp.html">IBackendContext.hpp</a></div></div>
<div class="ttc" id="astructarmnn_1_1_empty_optional_html"><div class="ttname"><a href="structarmnn_1_1_empty_optional.html">armnn::EmptyOptional</a></div><div class="ttdoc">EmptyOptional is used to initialize the Optional class in case we want to have default value for an O...</div><div class="ttdef"><b>Definition:</b> <a href="_optional_8hpp_source.html#l00032">Optional.hpp:32</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a31d953b9d49a6b4378f45097047976d0"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a31d953b9d49a6b4378f45097047976d0">armnn::LayerType::Softmax</a></div><div class="ttdeci">@ Softmax</div></div>
<div class="ttc" id="astructarmnn_1_1_gpu_fsa_pre_compiled_blob_html_a14f92a9f65e32c3da896e7b1d45abd02"><div class="ttname"><a href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#a14f92a9f65e32c3da896e7b1d45abd02">armnn::GpuFsaPreCompiledBlob::sketch</a></div><div class="ttdeci">std::unique_ptr&lt; arm_compute::experimental::dynamic_fusion::GpuWorkloadSketch &gt; sketch</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00034">GpuFsaBackend.hpp:34</a></div></div>
<div class="ttc" id="a_gpu_fsa_convolution2d_8hpp_html"><div class="ttname"><a href="_gpu_fsa_convolution2d_8hpp.html">GpuFsaConvolution2d.hpp</a></div></div>
<div class="ttc" id="a_gpu_fsa_layer_support_8hpp_html"><div class="ttname"><a href="_gpu_fsa_layer_support_8hpp.html">GpuFsaLayerSupport.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_subgraph_view_html_ad5fc1b5213dcb72c0d4ac9dfb46ef677"><div class="ttname"><a href="classarmnn_1_1_subgraph_view.html#ad5fc1b5213dcb72c0d4ac9dfb46ef677">armnn::SubgraphView::SubgraphViewPtr</a></div><div class="ttdeci">std::shared_ptr&lt; SubgraphView &gt; SubgraphViewPtr</div><div class="ttdef"><b>Definition:</b> <a href="_subgraph_view_8hpp_source.html#l00056">SubgraphView.hpp:56</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a5bdf4240a1fbda27b5fc84baba721781"><div class="ttname"><a href="namespacearmnn.html#a5bdf4240a1fbda27b5fc84baba721781">armnn::GpuFsaDepthwiseConvolution2dCreateOp</a></div><div class="ttdeci">void GpuFsaDepthwiseConvolution2dCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input, const DepthwiseConvolution2dDescriptor &amp;descriptor, const TensorInfo &amp;weights, const Optional&lt; TensorInfo &gt; &amp;biases)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_depthwise_convolution2d_8cpp_source.html#l00089">GpuFsaDepthwiseConvolution2d.cpp:89</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_a9752e12d6b79e18da1a25f76159d2a72"><div class="ttname"><a href="classarmnn_1_1_layer.html#a9752e12d6b79e18da1a25f76159d2a72">armnn::Layer::EndInputSlots</a></div><div class="ttdeci">std::vector&lt; InputSlot &gt;::iterator EndInputSlots()</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00263">Layer.hpp:263</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_a8dc12f0ee5b232d397bd18ced1a72a64"><div class="ttname"><a href="classarmnn_1_1_layer.html#a8dc12f0ee5b232d397bd18ced1a72a64">armnn::Layer::GetGuid</a></div><div class="ttdeci">LayerGuid GetGuid() const final</div><div class="ttdoc">Returns the unique id of the layer.</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00343">Layer.hpp:343</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_a1594bddc87d6477df300317658f566bb"><div class="ttname"><a href="classarmnn_1_1_layer.html#a1594bddc87d6477df300317658f566bb">armnn::Layer::GetNumOutputSlots</a></div><div class="ttdeci">unsigned int GetNumOutputSlots() const override</div><div class="ttdoc">Returns the number of connectable output slots.</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00335">Layer.hpp:335</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_subgraph_view_html"><div class="ttname"><a href="classarmnn_1_1_subgraph_view.html">armnn::SubgraphView</a></div><div class="ttdoc">The SubgraphView class represents a subgraph of a Graph.</div><div class="ttdef"><b>Definition:</b> <a href="_subgraph_view_8hpp_source.html#l00031">SubgraphView.hpp:31</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_optimization_views_html"><div class="ttname"><a href="classarmnn_1_1_optimization_views.html">armnn::OptimizationViews</a></div><div class="ttdef"><b>Definition:</b> <a href="_optimization_views_8hpp_source.html#l00017">OptimizationViews.hpp:17</a></div></div>
<div class="ttc" id="a_gpu_fsa_activation_8hpp_html"><div class="ttname"><a href="_gpu_fsa_activation_8hpp.html">GpuFsaActivation.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a68c2ef244261cc9649799284774af132"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a68c2ef244261cc9649799284774af132">armnn::GpuFsaBackend::CreateBackendProfilingContext</a></div><div class="ttdeci">IBackendInternal::IBackendProfilingContextPtr CreateBackendProfilingContext(const IRuntime::CreationOptions &amp;, IBackendProfilingPtr &amp;backendProfiling) override</div><div class="ttdoc">Create context specifically used for profiling interaction from backends.</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00203">GpuFsaBackend.cpp:203</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a8e19e275c8162e34e6d8d10a9245dbc9"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a8e19e275c8162e34e6d8d10a9245dbc9">armnn::GpuFsaBackend::CreateWorkloadFactory</a></div><div class="ttdeci">IBackendInternal::IWorkloadFactoryPtr CreateWorkloadFactory(const IBackendInternal::IMemoryManagerSharedPtr &amp;memoryManager=nullptr) const override</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00084">GpuFsaBackend.cpp:84</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a5ce134e586578a4c448a128f95b4d7af"><div class="ttname"><a href="namespacearmnn.html#a5ce134e586578a4c448a128f95b4d7af">armnn::CreateSubgraphViewFrom</a></div><div class="ttdeci">SubgraphView::SubgraphViewPtr CreateSubgraphViewFrom(SubgraphView::InputSlots &amp;&amp;inputs, SubgraphView::OutputSlots &amp;&amp;outputs, SubgraphView::Layers &amp;&amp;layers)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00062">GpuFsaBackend.cpp:62</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_optimization_views_html_a57340147dcb1c0ae05fdf14ff7af8a01"><div class="ttname"><a href="classarmnn_1_1_optimization_views.html#a57340147dcb1c0ae05fdf14ff7af8a01">armnn::OptimizationViews::AddSubstitution</a></div><div class="ttdeci">void AddSubstitution(SubstitutionPair &amp;&amp;substitution)</div><div class="ttdef"><b>Definition:</b> <a href="_optimization_views_8hpp_source.html#l00038">OptimizationViews.hpp:38</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_af6cb8de21ef0da269ec9b67755ae92a0"><div class="ttname"><a href="classarmnn_1_1_layer.html#af6cb8de21ef0da269ec9b67755ae92a0">armnn::Layer::BeginInputSlots</a></div><div class="ttdeci">std::vector&lt; InputSlot &gt;::iterator BeginInputSlots()</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00262">Layer.hpp:262</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_afe31515f8502943018e1a6244beea1a2"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#afe31515f8502943018e1a6244beea1a2">armnn::GpuFsaBackend::m_CustomAllocator</a></div><div class="ttdeci">std::shared_ptr&lt; GpuFsaBackendCustomAllocatorWrapper &gt; m_CustomAllocator</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00303">GpuFsaBackend.hpp:303</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4ad662867a41bfb30b9f75dda2b5849001"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4ad662867a41bfb30b9f75dda2b5849001">armnn::LayerType::Pooling2d</a></div><div class="ttdeci">@ Pooling2d</div></div>
<div class="ttc" id="aclassarmnn_1_1_i_backend_internal_html_ae44a82b0e485e551a0f77150b1076e06"><div class="ttname"><a href="classarmnn_1_1_i_backend_internal.html#ae44a82b0e485e551a0f77150b1076e06">armnn::IBackendInternal::IBackendProfilingContextPtr</a></div><div class="ttdeci">std::shared_ptr&lt; arm::pipe::IBackendProfilingContext &gt; IBackendProfilingContextPtr</div><div class="ttdoc">This is the bridge between backend and backend profiling we'll keep it in the backend namespace.</div><div class="ttdef"><b>Definition:</b> <a href="_i_backend_internal_8hpp_source.html#l00092">IBackendInternal.hpp:92</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_abc0660dc440c8a285b456c9ef6383c26"><div class="ttname"><a href="classarmnn_1_1_layer.html#abc0660dc440c8a285b456c9ef6383c26">armnn::Layer::GetNumInputSlots</a></div><div class="ttdeci">unsigned int GetNumInputSlots() const override</div><div class="ttdoc">Returns the number of connectable input slots.</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00334">Layer.hpp:334</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a67faae0ae4e9d94069de01964d3fa0a0"><div class="ttname"><a href="namespacearmnn.html#a67faae0ae4e9d94069de01964d3fa0a0">armnn::GpuFsaResizeCreateOp</a></div><div class="ttdeci">void GpuFsaResizeCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input, const ResizeDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_resize_8cpp_source.html#l00039">GpuFsaResize.cpp:39</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_a94a487f29157eeec10e2f9a372487bcc"><div class="ttname"><a href="classarmnn_1_1_layer.html#a94a487f29157eeec10e2f9a372487bcc">armnn::Layer::GetParameters</a></div><div class="ttdeci">virtual const BaseDescriptor &amp; GetParameters() const override</div><div class="ttdoc">If the layer has a descriptor return it.</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00378">Layer.hpp:378</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a7dacd5f6b52fd93bfd536976d27f8293"><div class="ttname"><a href="namespacearmnn.html#a7dacd5f6b52fd93bfd536976d27f8293">armnn::DeleteAsType</a></div><div class="ttdeci">void DeleteAsType(const void *const blob)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00037">GpuFsaBackend.cpp:37</a></div></div>
<div class="ttc" id="a_gpu_fsa_resize_8hpp_html"><div class="ttname"><a href="_gpu_fsa_resize_8hpp.html">GpuFsaResize.hpp</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a9882ff3cfed27d6161c20a305e7a3484"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9882ff3cfed27d6161c20a305e7a3484">armnn::LayerType::BatchMatMul</a></div><div class="ttdeci">@ BatchMatMul</div></div>
<div class="ttc" id="a_gpu_fsa_depthwise_convolution2d_8hpp_html"><div class="ttname"><a href="_gpu_fsa_depthwise_convolution2d_8hpp.html">GpuFsaDepthwiseConvolution2d.hpp</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">armnn::LayerType::DepthwiseConvolution2d</a></div><div class="ttdeci">@ DepthwiseConvolution2d</div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a4cd9f3996d60790cd11c04f842ebc43c"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a4cd9f3996d60790cd11c04f842ebc43c">armnn::LayerType::Cast</a></div><div class="ttdeci">@ Cast</div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_ad8e15c530c929ab823d89ae9fd2d3f11"><div class="ttname"><a href="classarmnn_1_1_layer.html#ad8e15c530c929ab823d89ae9fd2d3f11">armnn::Layer::GetType</a></div><div class="ttdeci">LayerType GetType() const override</div><div class="ttdoc">Returns the armnn::LayerType of this layer.</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00286">Layer.hpp:286</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_subgraph_view_html_a59a4100374e80a3504f4bb4d13695d0b"><div class="ttname"><a href="classarmnn_1_1_subgraph_view.html#a59a4100374e80a3504f4bb4d13695d0b">armnn::SubgraphView::GetNumOutputSlots</a></div><div class="ttdeci">unsigned int GetNumOutputSlots() const</div><div class="ttdef"><b>Definition:</b> <a href="_subgraph_view_8cpp_source.html#l00276">SubgraphView.cpp:276</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_i_backend_internal_html_a335964abd41c91f7e6ef5c65865a7b98"><div class="ttname"><a href="classarmnn_1_1_i_backend_internal.html#a335964abd41c91f7e6ef5c65865a7b98">armnn::IBackendInternal::IBackendProfilingPtr</a></div><div class="ttdeci">std::unique_ptr&lt; arm::pipe::IBackendProfiling &gt; IBackendProfilingPtr</div><div class="ttdef"><b>Definition:</b> <a href="_i_backend_internal_8hpp_source.html#l00093">IBackendInternal.hpp:93</a></div></div>
<div class="ttc" id="astructarmnn_1_1_i_runtime_1_1_creation_options_html"><div class="ttname"><a href="structarmnn_1_1_i_runtime_1_1_creation_options.html">armnn::IRuntime::CreationOptions</a></div><div class="ttdef"><b>Definition:</b> <a href="_i_runtime_8hpp_source.html#l00078">IRuntime.hpp:78</a></div></div>
<div class="ttc" id="a_gpu_fsa_backend_8hpp_html"><div class="ttname"><a href="_gpu_fsa_backend_8hpp.html">GpuFsaBackend.hpp</a></div></div>
<div class="ttc" id="anamespacearmnn_html_aa2889978c1d194097838a2a0e671da60"><div class="ttname"><a href="namespacearmnn.html#aa2889978c1d194097838a2a0e671da60">armnn::GpuFsaCastCreateOp</a></div><div class="ttdeci">void GpuFsaCastCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input, const TensorInfo &amp;output)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_cast_8cpp_source.html#l00061">GpuFsaCast.cpp:61</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4aa7c59ccedc6a3bd90c17f3b990afefad"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa7c59ccedc6a3bd90c17f3b990afefad">armnn::LayerType::Reshape</a></div><div class="ttdeci">@ Reshape</div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_a817d4be6dd88f532d36f51748ec14185"><div class="ttname"><a href="classarmnn_1_1_layer.html#a817d4be6dd88f532d36f51748ec14185">armnn::Layer::BeginOutputSlots</a></div><div class="ttdeci">std::vector&lt; OutputSlot &gt;::iterator BeginOutputSlots()</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00266">Layer.hpp:266</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_html_a6c6c3d137a7792e264a89cc40ea94bb0"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend.html#a6c6c3d137a7792e264a89cc40ea94bb0">armnn::GpuFsaBackend::GetId</a></div><div class="ttdeci">const BackendId &amp; GetId() const override</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00067">GpuFsaBackend.hpp:67</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_i_backend_internal_html_a12bff6d51d63dac1375c89bc8415dc46"><div class="ttname"><a href="classarmnn_1_1_i_backend_internal.html#a12bff6d51d63dac1375c89bc8415dc46">armnn::IBackendInternal::IMemoryManagerUniquePtr</a></div><div class="ttdeci">std::unique_ptr&lt; IMemoryManager &gt; IMemoryManagerUniquePtr</div><div class="ttdef"><b>Definition:</b> <a href="_i_backend_internal_8hpp_source.html#l00098">IBackendInternal.hpp:98</a></div></div>
<div class="ttc" id="a_gpu_fsa_cast_8hpp_html"><div class="ttname"><a href="_gpu_fsa_cast_8hpp.html">GpuFsaCast.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_optimization_views_html_a9a1555f25af4a0ae2c0a1fc0ed9aded8"><div class="ttname"><a href="classarmnn_1_1_optimization_views.html#a9a1555f25af4a0ae2c0a1fc0ed9aded8">armnn::OptimizationViews::GetSubstitutions</a></div><div class="ttdeci">const Substitutions &amp; GetSubstitutions() const</div><div class="ttdef"><b>Definition:</b> <a href="_optimization_views_8hpp_source.html#l00058">OptimizationViews.hpp:58</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_subgraph_view_html_afc40490fb4f488bd2a211e81c06a6971"><div class="ttname"><a href="classarmnn_1_1_subgraph_view.html#afc40490fb4f488bd2a211e81c06a6971">armnn::SubgraphView::end</a></div><div class="ttdeci">IConnectableLayerIterator end()</div><div class="ttdef"><b>Definition:</b> <a href="_subgraph_view_8cpp_source.html#l00291">SubgraphView.cpp:291</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_backend_id_html"><div class="ttname"><a href="classarmnn_1_1_backend_id.html">armnn::BackendId</a></div><div class="ttdef"><b>Definition:</b> <a href="_backend_id_8hpp_source.html#l00075">BackendId.hpp:75</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_subgraph_view_html_a78293334750ec5279eb9c96d56deaf08"><div class="ttname"><a href="classarmnn_1_1_subgraph_view.html#a78293334750ec5279eb9c96d56deaf08">armnn::SubgraphView::OutputSlots</a></div><div class="ttdeci">std::vector&lt; OutputSlot * &gt; OutputSlots</div><div class="ttdef"><b>Definition:</b> <a href="_subgraph_view_8hpp_source.html#l00059">SubgraphView.hpp:59</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_input_slot_html_a9effd325a6d512a3f8ff4bd207d53255"><div class="ttname"><a href="classarmnn_1_1_input_slot.html#a9effd325a6d512a3f8ff4bd207d53255">armnn::InputSlot::GetConnectedOutputSlot</a></div><div class="ttdeci">const OutputSlot * GetConnectedOutputSlot() const</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00056">Layer.hpp:56</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_i_connectable_layer_html_a80ac4eda2e7f2757ec9dd96fc96dbd16"><div class="ttname"><a href="classarmnn_1_1_i_connectable_layer.html#a80ac4eda2e7f2757ec9dd96fc96dbd16">armnn::IConnectableLayer::GetOutputSlot</a></div><div class="ttdeci">virtual const IOutputSlot &amp; GetOutputSlot(unsigned int index) const =0</div><div class="ttdoc">Get the const output slot handle by slot index.</div></div>
<div class="ttc" id="anamespacearmnn_html"><div class="ttname"><a href="namespacearmnn.html">armnn</a></div><div class="ttdoc">Copyright (c) 2021 ARM Limited and Contributors.</div><div class="ttdef"><b>Definition:</b> <a href="01__00__quick__start_8dox_source.html#l00006">01_00_quick_start.dox:6</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_tensor_handle_factory_registry_html_a05f82bd846630bb3aa8afe22ef6f15fc"><div class="ttname"><a href="classarmnn_1_1_tensor_handle_factory_registry.html#a05f82bd846630bb3aa8afe22ef6f15fc">armnn::TensorHandleFactoryRegistry::RegisterFactory</a></div><div class="ttdeci">void RegisterFactory(std::unique_ptr&lt; ITensorHandleFactory &gt; allocator)</div><div class="ttdoc">Register a TensorHandleFactory and transfer ownership.</div><div class="ttdef"><b>Definition:</b> <a href="_tensor_handle_factory_registry_8cpp_source.html#l00012">TensorHandleFactoryRegistry.cpp:12</a></div></div>
<div class="ttc" id="a_gpu_fsa_batch_mat_mul_8hpp_html"><div class="ttname"><a href="_gpu_fsa_batch_mat_mul_8hpp.html">GpuFsaBatchMatMul.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_optimization_views_html_ad04187fe81f68558b15b6049b2da9cf9"><div class="ttname"><a href="classarmnn_1_1_optimization_views.html#ad04187fe81f68558b15b6049b2da9cf9">armnn::OptimizationViews::GetINetwork</a></div><div class="ttdeci">INetwork * GetINetwork()</div><div class="ttdef"><b>Definition:</b> <a href="_optimization_views_8hpp_source.html#l00069">OptimizationViews.hpp:69</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_gpu_fsa_backend_context_html"><div class="ttname"><a href="classarmnn_1_1_gpu_fsa_backend_context.html">armnn::GpuFsaBackendContext</a></div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_context_8hpp_source.html#l00017">GpuFsaBackendContext.hpp:17</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_i_backend_internal_html_a11fa919c11fe46aad613b2e960fcfe90"><div class="ttname"><a href="classarmnn_1_1_i_backend_internal.html#a11fa919c11fe46aad613b2e960fcfe90">armnn::IBackendInternal::ILayerSupportSharedPtr</a></div><div class="ttdeci">std::shared_ptr&lt; ILayerSupport &gt; ILayerSupportSharedPtr</div><div class="ttdef"><b>Definition:</b> <a href="_i_backend_internal_8hpp_source.html#l00094">IBackendInternal.hpp:94</a></div></div>
<div class="ttc" id="astructarmnn_1_1_gpu_fsa_pre_compiled_blob_html_a255c9012137b149ffb46d83c23f2df43"><div class="ttname"><a href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html#a255c9012137b149ffb46d83c23f2df43">armnn::GpuFsaPreCompiledBlob::workloadContext</a></div><div class="ttdeci">std::shared_ptr&lt; arm_compute::experimental::dynamic_fusion::GpuWorkloadContext &gt; workloadContext</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00035">GpuFsaBackend.hpp:35</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_i_connectable_layer_html"><div class="ttname"><a href="classarmnn_1_1_i_connectable_layer.html">armnn::IConnectableLayer</a></div><div class="ttdoc">Interface for a layer that is connectable to other layers via InputSlots and OutputSlots.</div><div class="ttdef"><b>Definition:</b> <a href="_i_network_8hpp_source.html#l00080">INetwork.hpp:80</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a5b6893cda5b69359a4244c06054da18f"><div class="ttname"><a href="namespacearmnn.html#a5b6893cda5b69359a4244c06054da18f">armnn::ModelOptions</a></div><div class="ttdeci">std::vector&lt; BackendOptions &gt; ModelOptions</div><div class="ttdef"><b>Definition:</b> <a href="_backend_options_8hpp_source.html#l00018">BackendOptions.hpp:18</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a9d723d04c40bfd81835c0766a698cf63"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9d723d04c40bfd81835c0766a698cf63">armnn::LayerType::Resize</a></div><div class="ttdeci">@ Resize</div></div>
<div class="ttc" id="astructarmnn_1_1_gpu_fsa_pre_compiled_blob_html"><div class="ttname"><a href="structarmnn_1_1_gpu_fsa_pre_compiled_blob.html">armnn::GpuFsaPreCompiledBlob</a></div><div class="ttdoc">A structure which contains all the elements needed to execute a fused workload in the GpuFsa Backend.</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8hpp_source.html#l00032">GpuFsaBackend.hpp:32</a></div></div>
<div class="ttc" id="astructarmnn_1_1_pre_compiled_descriptor_html"><div class="ttname"><a href="structarmnn_1_1_pre_compiled_descriptor.html">armnn::PreCompiledDescriptor</a></div><div class="ttdoc">A PreCompiledDescriptor for the PreCompiledLayer.</div><div class="ttdef"><b>Definition:</b> <a href="_descriptors_8hpp_source.html#l01367">Descriptors.hpp:1367</a></div></div>
<div class="ttc" id="a_gpu_fsa_backend_id_8hpp_html"><div class="ttname"><a href="_gpu_fsa_backend_id_8hpp.html">GpuFsaBackendId.hpp</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a5afd10e5e84ebf5cb8cfc8707492eda7"><div class="ttname"><a href="namespacearmnn.html#a5afd10e5e84ebf5cb8cfc8707492eda7">armnn::ReportUntouchedLayers</a></div><div class="ttdeci">void ReportUntouchedLayers(OptimizationViews &amp;optimizationViews, std::map&lt; LayerGuid, Layer * &gt; untouched)</div><div class="ttdef"><b>Definition:</b> <a href="_subgraph_utils_8hpp_source.html#l00220">SubgraphUtils.hpp:220</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">armnn::LayerType::Convolution2d</a></div><div class="ttdeci">@ Convolution2d</div></div>
<div class="ttc" id="anamespacearmnn_html_ab8a797269fd9db3b8832998f10ad9688"><div class="ttname"><a href="namespacearmnn.html#ab8a797269fd9db3b8832998f10ad9688">armnn::GpuFsaConvolution2dCreateOp</a></div><div class="ttdeci">void GpuFsaConvolution2dCreateOp(GpuFsaPreCompiledBlob *blob, const TensorInfo &amp;input, const Convolution2dDescriptor &amp;descriptor, const TensorInfo &amp;weights, const Optional&lt; TensorInfo &gt; &amp;biases)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_convolution2d_8cpp_source.html#l00070">GpuFsaConvolution2d.cpp:70</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4aa9a62e70841c4d06dd16306a85700d36"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa9a62e70841c4d06dd16306a85700d36">armnn::LayerType::Activation</a></div><div class="ttdeci">@ Activation</div></div>
<div class="ttc" id="aclassarmnn_1_1_i_backend_internal_html_a72ca1cf423bda4b0a9ffb789627126de"><div class="ttname"><a href="classarmnn_1_1_i_backend_internal.html#a72ca1cf423bda4b0a9ffb789627126de">armnn::IBackendInternal::IWorkloadFactoryPtr</a></div><div class="ttdeci">std::unique_ptr&lt; IWorkloadFactory &gt; IWorkloadFactoryPtr</div><div class="ttdef"><b>Definition:</b> <a href="_i_backend_internal_8hpp_source.html#l00089">IBackendInternal.hpp:89</a></div></div>
<div class="ttc" id="a_subgraph_utils_8hpp_html"><div class="ttname"><a href="_subgraph_utils_8hpp.html">SubgraphUtils.hpp</a></div></div>
<div class="ttc" id="a_gpu_fsa_reshape_8hpp_html"><div class="ttname"><a href="_gpu_fsa_reshape_8hpp.html">GpuFsaReshape.hpp</a></div></div>
<div class="ttc" id="a_gpu_fsa_tensor_handle_factory_8hpp_html"><div class="ttname"><a href="_gpu_fsa_tensor_handle_factory_8hpp.html">GpuFsaTensorHandleFactory.hpp</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a047e95685b63fedaa1d2ebb3b9428ff5"><div class="ttname"><a href="namespacearmnn.html#a047e95685b63fedaa1d2ebb3b9428ff5">armnn::CreateOutputsFrom</a></div><div class="ttdeci">SubgraphView::OutputSlots CreateOutputsFrom(Layer *layer)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00052">GpuFsaBackend.cpp:52</a></div></div>
<div class="ttc" id="a_gpu_fsa_pooling2d_8hpp_html"><div class="ttname"><a href="_gpu_fsa_pooling2d_8hpp.html">GpuFsaPooling2d.hpp</a></div></div>
<div class="ttc" id="aclassarmnn_1_1_layer_html_a55f76d98fcd2f5cdac3e2b14536cb7ab"><div class="ttname"><a href="classarmnn_1_1_layer.html#a55f76d98fcd2f5cdac3e2b14536cb7ab">armnn::Layer::EndOutputSlots</a></div><div class="ttdeci">std::vector&lt; OutputSlot &gt;::iterator EndOutputSlots()</div><div class="ttdef"><b>Definition:</b> <a href="_layer_8hpp_source.html#l00267">Layer.hpp:267</a></div></div>
<div class="ttc" id="a_i_memory_manager_8hpp_html"><div class="ttname"><a href="_i_memory_manager_8hpp.html">IMemoryManager.hpp</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a3f6dfcc7396ea476f96658ae6cb02b54"><div class="ttname"><a href="namespacearmnn.html#a3f6dfcc7396ea476f96658ae6cb02b54">armnn::CreateInputsFrom</a></div><div class="ttdeci">SubgraphView::InputSlots CreateInputsFrom(Layer *layer)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_backend_8cpp_source.html#l00042">GpuFsaBackend.cpp:42</a></div></div>
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="navelem"><a class="el" href="dir_68267d1309a1af8e8297ef4c3efbcdba.html">src</a></li><li class="navelem"><a class="el" href="dir_0f3cdec46afbc61a1ded8e1687c9c9a0.html">backends</a></li><li class="navelem"><a class="el" href="dir_dd5880bc3520e42c5318e86a9fdc97f6.html">gpuFsa</a></li><li class="navelem"><a class="el" href="_gpu_fsa_backend_8cpp.html">GpuFsaBackend.cpp</a></li>
<li class="footer">Generated on Thu May 16 2024 09:31:47 for Arm NN by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.17 </li>
</ul>
</div>
</body>
</html>