blob: 9f0e1419fe622b3a0704f3c1f177aeb27ee50dec [file] [log] [blame]
<!-- HTML header for doxygen 1.8.17-->
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "https://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="Content-Type" content="text/xhtml;charset=UTF-8"/>
<meta http-equiv="X-UA-Compatible" content="IE=9"/>
<meta name="generator" content="Doxygen 1.8.17"/>
<meta name="viewport" content="width=device-width, initial-scale=1"/>
<title>Arm NN: GpuFsaLayerSupport Class Reference</title>
<link href="tabs.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="jquery.js"></script>
<script type="text/javascript" src="dynsections.js"></script>
<link href="navtree.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="resize.js"></script>
<script type="text/javascript" src="navtreedata.js"></script>
<script type="text/javascript" src="navtree.js"></script>
<link href="search/search.css" rel="stylesheet" type="text/css"/>
<script type="text/javascript" src="search/searchdata.js"></script>
<script type="text/javascript" src="search/search.js"></script>
<script type="text/x-mathjax-config">
MathJax.Hub.Config({
extensions: ["tex2jax.js"],
jax: ["input/TeX","output/HTML-CSS"],
});
</script>
<script type="text/javascript" async="async" src="http://cdn.mathjax.org/mathjax/latest/MathJax.js"></script>
<link href="doxygen.css" rel="stylesheet" type="text/css" />
<link href="customdoxygen.css" rel="stylesheet" type="text/css"/>
</head>
<body>
<div id="top"><!-- do not remove this div, it is closed by doxygen! -->
<div id="titlearea">
<table cellspacing="0" cellpadding="0">
<tbody>
<tr style="height: 56px;">
<img alt="ArmNN" src="Arm_NN_horizontal_blue.png" style="max-width: 15rem; margin-top: .5rem; margin-left 13px"/>
<td id="projectalign" style="padding-left: 0.9em;">
<div id="projectname">
&#160;<span id="projectnumber">24.05</span>
</div>
</td>
</tr>
</tbody>
</table>
</div>
<!-- end header part -->
<!-- Generated by Doxygen 1.8.17 -->
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
var searchBox = new SearchBox("searchBox", "search",false,'Search');
/* @license-end */
</script>
<script type="text/javascript" src="menudata.js"></script>
<script type="text/javascript" src="menu.js"></script>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(function() {
initMenu('',true,false,'search.php','Search');
$(document).ready(function() { init_search(); });
});
/* @license-end */</script>
<div id="main-nav"></div>
</div><!-- top -->
<div id="side-nav" class="ui-resizable side-nav-resizable">
<div id="nav-tree">
<div id="nav-tree-contents">
<div id="nav-sync" class="sync"></div>
</div>
</div>
<div id="splitbar" style="-moz-user-select:none;"
class="ui-resizable-handle">
</div>
</div>
<script type="text/javascript">
/* @license magnet:?xt=urn:btih:cf05388f2679ee054f2beb29a391d25f4e673ac3&amp;dn=gpl-2.0.txt GPL-v2 */
$(document).ready(function(){initNavTree('classarmnn_1_1_gpu_fsa_layer_support.html',''); initResizable(); });
/* @license-end */
</script>
<div id="doc-content">
<!-- window showing the filter options -->
<div id="MSearchSelectWindow"
onmouseover="return searchBox.OnSearchSelectShow()"
onmouseout="return searchBox.OnSearchSelectHide()"
onkeydown="return searchBox.OnSearchSelectKey(event)">
</div>
<!-- iframe showing the search results (closed by default) -->
<div id="MSearchResultsWindow">
<iframe src="javascript:void(0)" frameborder="0"
name="MSearchResults" id="MSearchResults">
</iframe>
</div>
<div class="header">
<div class="summary">
<a href="#pub-methods">Public Member Functions</a> &#124;
<a href="classarmnn_1_1_gpu_fsa_layer_support-members.html">List of all members</a> </div>
<div class="headertitle">
<div class="title">GpuFsaLayerSupport Class Reference</div> </div>
</div><!--header-->
<div class="contents">
<p><code>#include &lt;<a class="el" href="_gpu_fsa_layer_support_8hpp_source.html">GpuFsaLayerSupport.hpp</a>&gt;</code></p>
<div class="dynheader">
Inheritance diagram for GpuFsaLayerSupport:</div>
<div class="dyncontent">
<div class="center"><iframe scrolling="no" frameborder="0" src="classarmnn_1_1_gpu_fsa_layer_support__inherit__graph.svg" width="168" height="112"><p><b>This browser is not able to show SVG: try Firefox, Chrome, Safari, or Opera instead.</b></p></iframe>
</div>
<center><span class="legend">[<a target="top" href="graph_legend.html">legend</a>]</span></center></div>
<div class="dynheader">
Collaboration diagram for GpuFsaLayerSupport:</div>
<div class="dyncontent">
<div class="center"><iframe scrolling="no" frameborder="0" src="classarmnn_1_1_gpu_fsa_layer_support__coll__graph.svg" width="168" height="112"><p><b>This browser is not able to show SVG: try Firefox, Chrome, Safari, or Opera instead.</b></p></iframe>
</div>
<center><span class="legend">[<a target="top" href="graph_legend.html">legend</a>]</span></center></div>
<table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="pub-methods"></a>
Public Member Functions</h2></td></tr>
<tr class="memitem:a1e128c847b11f20fdedc72ec2951424f"><td class="memItemLeft" align="right" valign="top">bool&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_gpu_fsa_layer_support.html#a1e128c847b11f20fdedc72ec2951424f">IsLayerSupported</a> (const <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4">LayerType</a> &amp;type, const std::vector&lt; <a class="el" href="classarmnn_1_1_tensor_info.html">TensorInfo</a> &gt; &amp;infos, const <a class="el" href="structarmnn_1_1_base_descriptor.html">BaseDescriptor</a> &amp;descriptor, const <a class="el" href="classarmnn_1_1_optional.html">Optional</a>&lt; <a class="el" href="structarmnn_1_1_lstm_input_params_info.html">LstmInputParamsInfo</a> &gt; &amp;lstmParamsInfo, const <a class="el" href="classarmnn_1_1_optional.html">Optional</a>&lt; <a class="el" href="structarmnn_1_1_quantized_lstm_input_params_info.html">QuantizedLstmInputParamsInfo</a> &gt; &amp;, <a class="el" href="classarmnn_1_1_optional.html">Optional</a>&lt; std::string &amp; &gt; reasonIfUnsupported) const override</td></tr>
<tr class="memdesc:a1e128c847b11f20fdedc72ec2951424f"><td class="mdescLeft">&#160;</td><td class="mdescRight">Default implementation of the <a class="el" href="classarmnn_1_1_i_layer_support.html">ILayerSupport</a> interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported. <a href="classarmnn_1_1_gpu_fsa_layer_support.html#a1e128c847b11f20fdedc72ec2951424f">More...</a><br /></td></tr>
<tr class="separator:a1e128c847b11f20fdedc72ec2951424f"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table><table class="memberdecls">
<tr class="heading"><td colspan="2"><h2 class="groupheader"><a name="inherited"></a>
Additional Inherited Members</h2></td></tr>
<tr class="inherit_header pro_methods_classarmnn_1_1_i_layer_support"><td colspan="2" onclick="javascript:toggleInherit('pro_methods_classarmnn_1_1_i_layer_support')"><img src="closed.png" alt="-"/>&#160;Protected Member Functions inherited from <a class="el" href="classarmnn_1_1_i_layer_support.html">ILayerSupport</a></td></tr>
<tr class="memitem:aab2a487bc88047443d8a0f87c3c40b8d inherit pro_methods_classarmnn_1_1_i_layer_support"><td class="memItemLeft" align="right" valign="top">&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_i_layer_support.html#aab2a487bc88047443d8a0f87c3c40b8d">ILayerSupport</a> ()</td></tr>
<tr class="separator:aab2a487bc88047443d8a0f87c3c40b8d inherit pro_methods_classarmnn_1_1_i_layer_support"><td class="memSeparator" colspan="2">&#160;</td></tr>
<tr class="memitem:a9e7f4f28576904804cd759e49173beeb inherit pro_methods_classarmnn_1_1_i_layer_support"><td class="memItemLeft" align="right" valign="top">virtual&#160;</td><td class="memItemRight" valign="bottom"><a class="el" href="classarmnn_1_1_i_layer_support.html#a9e7f4f28576904804cd759e49173beeb">~ILayerSupport</a> ()</td></tr>
<tr class="separator:a9e7f4f28576904804cd759e49173beeb inherit pro_methods_classarmnn_1_1_i_layer_support"><td class="memSeparator" colspan="2">&#160;</td></tr>
</table>
<a name="details" id="details"></a><h2 class="groupheader">Detailed Description</h2>
<div class="textblock">
<p class="definition">Definition at line <a class="el" href="_gpu_fsa_layer_support_8hpp_source.html#l00013">13</a> of file <a class="el" href="_gpu_fsa_layer_support_8hpp_source.html">GpuFsaLayerSupport.hpp</a>.</p>
</div><h2 class="groupheader">Member Function Documentation</h2>
<a id="a1e128c847b11f20fdedc72ec2951424f"></a>
<h2 class="memtitle"><span class="permalink"><a href="#a1e128c847b11f20fdedc72ec2951424f">&#9670;&nbsp;</a></span>IsLayerSupported()</h2>
<div class="memitem">
<div class="memproto">
<table class="mlabels">
<tr>
<td class="mlabels-left">
<table class="memname">
<tr>
<td class="memname">bool IsLayerSupported </td>
<td>(</td>
<td class="paramtype">const <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4">LayerType</a> &amp;&#160;</td>
<td class="paramname"><em>type</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const std::vector&lt; <a class="el" href="classarmnn_1_1_tensor_info.html">TensorInfo</a> &gt; &amp;&#160;</td>
<td class="paramname"><em>infos</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="structarmnn_1_1_base_descriptor.html">BaseDescriptor</a> &amp;&#160;</td>
<td class="paramname"><em>descriptor</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarmnn_1_1_optional.html">Optional</a>&lt; <a class="el" href="structarmnn_1_1_lstm_input_params_info.html">LstmInputParamsInfo</a> &gt; &amp;&#160;</td>
<td class="paramname"><em>lstmParamsInfo</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype">const <a class="el" href="classarmnn_1_1_optional.html">Optional</a>&lt; <a class="el" href="structarmnn_1_1_quantized_lstm_input_params_info.html">QuantizedLstmInputParamsInfo</a> &gt; &amp;&#160;</td>
<td class="paramname"><em>quantizedLstmParamsInfo</em>, </td>
</tr>
<tr>
<td class="paramkey"></td>
<td></td>
<td class="paramtype"><a class="el" href="classarmnn_1_1_optional.html">Optional</a>&lt; std::string &amp; &gt;&#160;</td>
<td class="paramname"><em>reasonIfUnsupported</em>&#160;</td>
</tr>
<tr>
<td></td>
<td>)</td>
<td></td><td> const</td>
</tr>
</table>
</td>
<td class="mlabels-right">
<span class="mlabels"><span class="mlabel">override</span><span class="mlabel">virtual</span></span> </td>
</tr>
</table>
</div><div class="memdoc">
<p>Default implementation of the <a class="el" href="classarmnn_1_1_i_layer_support.html">ILayerSupport</a> interface, Backends should implement this as a switch statement for each of their LayerTypes calling their specific backend implementation of IsXXXLayerSupported. </p>
<p>Reimplemented from <a class="el" href="classarmnn_1_1_i_layer_support.html#ade82d7df4e18bdd77969958a8fccf10a">ILayerSupport</a>.</p>
<p class="definition">Definition at line <a class="el" href="_gpu_fsa_layer_support_8cpp_source.html#l00071">71</a> of file <a class="el" href="_gpu_fsa_layer_support_8cpp_source.html">GpuFsaLayerSupport.cpp</a>.</p>
<div class="fragment"><div class="line"><a name="l00077"></a><span class="lineno"> 77</span>&#160;{</div>
<div class="line"><a name="l00078"></a><span class="lineno"> 78</span>&#160; <a class="code" href="namespacearmnn.html#a44affeeb090c3c6a3062830562672e84">IgnoreUnused</a>(lstmParamsInfo);</div>
<div class="line"><a name="l00079"></a><span class="lineno"> 79</span>&#160; <a class="code" href="namespacearmnn.html#a44affeeb090c3c6a3062830562672e84">IgnoreUnused</a>(quantizedLstmInputParamsInfo);</div>
<div class="line"><a name="l00080"></a><span class="lineno"> 80</span>&#160; </div>
<div class="line"><a name="l00081"></a><span class="lineno"> 81</span>&#160; <span class="keywordflow">switch</span> (type)</div>
<div class="line"><a name="l00082"></a><span class="lineno"> 82</span>&#160; {</div>
<div class="line"><a name="l00083"></a><span class="lineno"> 83</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa9a62e70841c4d06dd16306a85700d36">LayerType::Activation</a>:</div>
<div class="line"><a name="l00084"></a><span class="lineno"> 84</span>&#160; {</div>
<div class="line"><a name="l00085"></a><span class="lineno"> 85</span>&#160; <span class="keywordflow">if</span> (infos.size() != 2)</div>
<div class="line"><a name="l00086"></a><span class="lineno"> 86</span>&#160; {</div>
<div class="line"><a name="l00087"></a><span class="lineno"> 87</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of Activation TensorInfos. &quot;</span></div>
<div class="line"><a name="l00088"></a><span class="lineno"> 88</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: {input, output}.&quot;</span>);</div>
<div class="line"><a name="l00089"></a><span class="lineno"> 89</span>&#160; }</div>
<div class="line"><a name="l00090"></a><span class="lineno"> 90</span>&#160; </div>
<div class="line"><a name="l00091"></a><span class="lineno"> 91</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const ActivationDescriptor*&gt;(&amp;descriptor);</div>
<div class="line"><a name="l00092"></a><span class="lineno"> 92</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#aa8d17ff94651485b0aa4b58e1d4ef5b5">GpuFsaActivationValidate</a>,</div>
<div class="line"><a name="l00093"></a><span class="lineno"> 93</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00094"></a><span class="lineno"> 94</span>&#160; infos[0],</div>
<div class="line"><a name="l00095"></a><span class="lineno"> 95</span>&#160; *desc);</div>
<div class="line"><a name="l00096"></a><span class="lineno"> 96</span>&#160; }</div>
<div class="line"><a name="l00097"></a><span class="lineno"> 97</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9882ff3cfed27d6161c20a305e7a3484">LayerType::BatchMatMul</a>:</div>
<div class="line"><a name="l00098"></a><span class="lineno"> 98</span>&#160; {</div>
<div class="line"><a name="l00099"></a><span class="lineno"> 99</span>&#160; <span class="keywordflow">if</span> (infos.size() != 3)</div>
<div class="line"><a name="l00100"></a><span class="lineno"> 100</span>&#160; {</div>
<div class="line"><a name="l00101"></a><span class="lineno"> 101</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of BatchMatMul TensorInfos. &quot;</span></div>
<div class="line"><a name="l00102"></a><span class="lineno"> 102</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: {input0, input1 output}.&quot;</span>);</div>
<div class="line"><a name="l00103"></a><span class="lineno"> 103</span>&#160; }</div>
<div class="line"><a name="l00104"></a><span class="lineno"> 104</span>&#160; </div>
<div class="line"><a name="l00105"></a><span class="lineno"> 105</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const BatchMatMulDescriptor*&gt;(&amp;descriptor);</div>
<div class="line"><a name="l00106"></a><span class="lineno"> 106</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#a47e56eb9bcbb170dddd202d770ed2cd8">GpuFsaBatchMatMulValidate</a>,</div>
<div class="line"><a name="l00107"></a><span class="lineno"> 107</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00108"></a><span class="lineno"> 108</span>&#160; infos[0],</div>
<div class="line"><a name="l00109"></a><span class="lineno"> 109</span>&#160; infos[1],</div>
<div class="line"><a name="l00110"></a><span class="lineno"> 110</span>&#160; *desc);</div>
<div class="line"><a name="l00111"></a><span class="lineno"> 111</span>&#160; }</div>
<div class="line"><a name="l00112"></a><span class="lineno"> 112</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a4cd9f3996d60790cd11c04f842ebc43c">LayerType::Cast</a>:</div>
<div class="line"><a name="l00113"></a><span class="lineno"> 113</span>&#160; {</div>
<div class="line"><a name="l00114"></a><span class="lineno"> 114</span>&#160; <span class="keywordflow">if</span> (infos.size() != 2)</div>
<div class="line"><a name="l00115"></a><span class="lineno"> 115</span>&#160; {</div>
<div class="line"><a name="l00116"></a><span class="lineno"> 116</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of cast TensorInfos. &quot;</span></div>
<div class="line"><a name="l00117"></a><span class="lineno"> 117</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: {input, output}.&quot;</span>);</div>
<div class="line"><a name="l00118"></a><span class="lineno"> 118</span>&#160; }</div>
<div class="line"><a name="l00119"></a><span class="lineno"> 119</span>&#160; </div>
<div class="line"><a name="l00120"></a><span class="lineno"> 120</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#a10d20c479436685e213150471a4a395b">GpuFsaCastValidate</a>,</div>
<div class="line"><a name="l00121"></a><span class="lineno"> 121</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00122"></a><span class="lineno"> 122</span>&#160; infos[0],</div>
<div class="line"><a name="l00123"></a><span class="lineno"> 123</span>&#160; infos[1]);</div>
<div class="line"><a name="l00124"></a><span class="lineno"> 124</span>&#160; }</div>
<div class="line"><a name="l00125"></a><span class="lineno"> 125</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">LayerType::Convolution2d</a>:</div>
<div class="line"><a name="l00126"></a><span class="lineno"> 126</span>&#160; {</div>
<div class="line"><a name="l00127"></a><span class="lineno"> 127</span>&#160; <span class="keywordflow">if</span> (infos.size() != 4)</div>
<div class="line"><a name="l00128"></a><span class="lineno"> 128</span>&#160; {</div>
<div class="line"><a name="l00129"></a><span class="lineno"> 129</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of Convolution2d TensorInfos. &quot;</span></div>
<div class="line"><a name="l00130"></a><span class="lineno"> 130</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: {input, output, weights, biases}.&quot;</span>);</div>
<div class="line"><a name="l00131"></a><span class="lineno"> 131</span>&#160; }</div>
<div class="line"><a name="l00132"></a><span class="lineno"> 132</span>&#160; </div>
<div class="line"><a name="l00133"></a><span class="lineno"> 133</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const Convolution2dDescriptor*&gt;(&amp;descriptor);</div>
<div class="line"><a name="l00134"></a><span class="lineno"> 134</span>&#160; <span class="keywordflow">if</span> (infos[3] == TensorInfo())</div>
<div class="line"><a name="l00135"></a><span class="lineno"> 135</span>&#160; {</div>
<div class="line"><a name="l00136"></a><span class="lineno"> 136</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#a1c42ef784299460fb9db004dd395d5e1">GpuFsaConvolution2dValidate</a>,</div>
<div class="line"><a name="l00137"></a><span class="lineno"> 137</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00138"></a><span class="lineno"> 138</span>&#160; infos[0],</div>
<div class="line"><a name="l00139"></a><span class="lineno"> 139</span>&#160; *desc,</div>
<div class="line"><a name="l00140"></a><span class="lineno"> 140</span>&#160; infos[2],</div>
<div class="line"><a name="l00141"></a><span class="lineno"> 141</span>&#160; EmptyOptional());</div>
<div class="line"><a name="l00142"></a><span class="lineno"> 142</span>&#160; }</div>
<div class="line"><a name="l00143"></a><span class="lineno"> 143</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00144"></a><span class="lineno"> 144</span>&#160; {</div>
<div class="line"><a name="l00145"></a><span class="lineno"> 145</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#a1c42ef784299460fb9db004dd395d5e1">GpuFsaConvolution2dValidate</a>,</div>
<div class="line"><a name="l00146"></a><span class="lineno"> 146</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00147"></a><span class="lineno"> 147</span>&#160; infos[0],</div>
<div class="line"><a name="l00148"></a><span class="lineno"> 148</span>&#160; *desc,</div>
<div class="line"><a name="l00149"></a><span class="lineno"> 149</span>&#160; infos[2],</div>
<div class="line"><a name="l00150"></a><span class="lineno"> 150</span>&#160; infos[3]);</div>
<div class="line"><a name="l00151"></a><span class="lineno"> 151</span>&#160; }</div>
<div class="line"><a name="l00152"></a><span class="lineno"> 152</span>&#160; }</div>
<div class="line"><a name="l00153"></a><span class="lineno"> 153</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">LayerType::DepthwiseConvolution2d</a>:</div>
<div class="line"><a name="l00154"></a><span class="lineno"> 154</span>&#160; {</div>
<div class="line"><a name="l00155"></a><span class="lineno"> 155</span>&#160; <span class="keywordflow">if</span> (infos.size() != 4)</div>
<div class="line"><a name="l00156"></a><span class="lineno"> 156</span>&#160; {</div>
<div class="line"><a name="l00157"></a><span class="lineno"> 157</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of DepthwiseConvolution2dDescriptor TensorInfos. &quot;</span></div>
<div class="line"><a name="l00158"></a><span class="lineno"> 158</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: {input, output, weights, biases}.&quot;</span>);</div>
<div class="line"><a name="l00159"></a><span class="lineno"> 159</span>&#160; }</div>
<div class="line"><a name="l00160"></a><span class="lineno"> 160</span>&#160; </div>
<div class="line"><a name="l00161"></a><span class="lineno"> 161</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const DepthwiseConvolution2dDescriptor*&gt;(&amp;descriptor);</div>
<div class="line"><a name="l00162"></a><span class="lineno"> 162</span>&#160; <span class="keywordflow">if</span> (infos[3] == TensorInfo())</div>
<div class="line"><a name="l00163"></a><span class="lineno"> 163</span>&#160; {</div>
<div class="line"><a name="l00164"></a><span class="lineno"> 164</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#af2016aed3575b7302e79c72830c23025">GpuFsaDepthwiseConvolution2dValidate</a>,</div>
<div class="line"><a name="l00165"></a><span class="lineno"> 165</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00166"></a><span class="lineno"> 166</span>&#160; infos[0],</div>
<div class="line"><a name="l00167"></a><span class="lineno"> 167</span>&#160; *desc,</div>
<div class="line"><a name="l00168"></a><span class="lineno"> 168</span>&#160; infos[2],</div>
<div class="line"><a name="l00169"></a><span class="lineno"> 169</span>&#160; EmptyOptional());</div>
<div class="line"><a name="l00170"></a><span class="lineno"> 170</span>&#160; }</div>
<div class="line"><a name="l00171"></a><span class="lineno"> 171</span>&#160; <span class="keywordflow">else</span></div>
<div class="line"><a name="l00172"></a><span class="lineno"> 172</span>&#160; {</div>
<div class="line"><a name="l00173"></a><span class="lineno"> 173</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#af2016aed3575b7302e79c72830c23025">GpuFsaDepthwiseConvolution2dValidate</a>,</div>
<div class="line"><a name="l00174"></a><span class="lineno"> 174</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00175"></a><span class="lineno"> 175</span>&#160; infos[0],</div>
<div class="line"><a name="l00176"></a><span class="lineno"> 176</span>&#160; *desc,</div>
<div class="line"><a name="l00177"></a><span class="lineno"> 177</span>&#160; infos[2],</div>
<div class="line"><a name="l00178"></a><span class="lineno"> 178</span>&#160; infos[3]);</div>
<div class="line"><a name="l00179"></a><span class="lineno"> 179</span>&#160; }</div>
<div class="line"><a name="l00180"></a><span class="lineno"> 180</span>&#160; }</div>
<div class="line"><a name="l00181"></a><span class="lineno"> 181</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a699bcffd93aff3022014b9efc9eaefd1">LayerType::ElementwiseBinary</a>:</div>
<div class="line"><a name="l00182"></a><span class="lineno"> 182</span>&#160; {</div>
<div class="line"><a name="l00183"></a><span class="lineno"> 183</span>&#160; <span class="keywordflow">if</span> (infos.size() != 3)</div>
<div class="line"><a name="l00184"></a><span class="lineno"> 184</span>&#160; {</div>
<div class="line"><a name="l00185"></a><span class="lineno"> 185</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of ElementwiseBinary TensorInfos. &quot;</span></div>
<div class="line"><a name="l00186"></a><span class="lineno"> 186</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: {input0, input1, output}.&quot;</span>);</div>
<div class="line"><a name="l00187"></a><span class="lineno"> 187</span>&#160; }</div>
<div class="line"><a name="l00188"></a><span class="lineno"> 188</span>&#160; </div>
<div class="line"><a name="l00189"></a><span class="lineno"> 189</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const ElementwiseBinaryDescriptor*&gt;(&amp;descriptor);</div>
<div class="line"><a name="l00190"></a><span class="lineno"> 190</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#abd83d555d05a33057f9e80475522a631">GpuFsaElementwiseBinaryValidate</a>,</div>
<div class="line"><a name="l00191"></a><span class="lineno"> 191</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00192"></a><span class="lineno"> 192</span>&#160; infos[0],</div>
<div class="line"><a name="l00193"></a><span class="lineno"> 193</span>&#160; infos[1],</div>
<div class="line"><a name="l00194"></a><span class="lineno"> 194</span>&#160; *desc);</div>
<div class="line"><a name="l00195"></a><span class="lineno"> 195</span>&#160; }</div>
<div class="line"><a name="l00196"></a><span class="lineno"> 196</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4ad662867a41bfb30b9f75dda2b5849001">LayerType::Pooling2d</a>:</div>
<div class="line"><a name="l00197"></a><span class="lineno"> 197</span>&#160; {</div>
<div class="line"><a name="l00198"></a><span class="lineno"> 198</span>&#160; <span class="keywordflow">if</span> (infos.size() != 2)</div>
<div class="line"><a name="l00199"></a><span class="lineno"> 199</span>&#160; {</div>
<div class="line"><a name="l00200"></a><span class="lineno"> 200</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of Pooling2d TensorInfos. &quot;</span></div>
<div class="line"><a name="l00201"></a><span class="lineno"> 201</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: {input, output}.&quot;</span>);</div>
<div class="line"><a name="l00202"></a><span class="lineno"> 202</span>&#160; }</div>
<div class="line"><a name="l00203"></a><span class="lineno"> 203</span>&#160; </div>
<div class="line"><a name="l00204"></a><span class="lineno"> 204</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const Pooling2dDescriptor*&gt;(&amp;descriptor);</div>
<div class="line"><a name="l00205"></a><span class="lineno"> 205</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#a7aff81ee45420f659fc30fa239ccc7c8">GpuFsaPooling2dValidate</a>,</div>
<div class="line"><a name="l00206"></a><span class="lineno"> 206</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00207"></a><span class="lineno"> 207</span>&#160; infos[0],</div>
<div class="line"><a name="l00208"></a><span class="lineno"> 208</span>&#160; *desc);</div>
<div class="line"><a name="l00209"></a><span class="lineno"> 209</span>&#160; }</div>
<div class="line"><a name="l00210"></a><span class="lineno"> 210</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa7c59ccedc6a3bd90c17f3b990afefad">LayerType::Reshape</a>:</div>
<div class="line"><a name="l00211"></a><span class="lineno"> 211</span>&#160; {</div>
<div class="line"><a name="l00212"></a><span class="lineno"> 212</span>&#160; <span class="keywordflow">if</span> (infos.size() != 2)</div>
<div class="line"><a name="l00213"></a><span class="lineno"> 213</span>&#160; {</div>
<div class="line"><a name="l00214"></a><span class="lineno"> 214</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of Reshape TensorInfos. &quot;</span></div>
<div class="line"><a name="l00215"></a><span class="lineno"> 215</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: { input, output }.&quot;</span>);</div>
<div class="line"><a name="l00216"></a><span class="lineno"> 216</span>&#160; }</div>
<div class="line"><a name="l00217"></a><span class="lineno"> 217</span>&#160; </div>
<div class="line"><a name="l00218"></a><span class="lineno"> 218</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const ReshapeDescriptor*&gt;(&amp;descriptor);</div>
<div class="line"><a name="l00219"></a><span class="lineno"> 219</span>&#160; </div>
<div class="line"><a name="l00220"></a><span class="lineno"> 220</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#a75837bce7e31ee2315b702d16833c0ff">GpuFsaReshapeValidate</a>,</div>
<div class="line"><a name="l00221"></a><span class="lineno"> 221</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00222"></a><span class="lineno"> 222</span>&#160; infos[0],</div>
<div class="line"><a name="l00223"></a><span class="lineno"> 223</span>&#160; *desc);</div>
<div class="line"><a name="l00224"></a><span class="lineno"> 224</span>&#160; }</div>
<div class="line"><a name="l00225"></a><span class="lineno"> 225</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9d723d04c40bfd81835c0766a698cf63">LayerType::Resize</a>:</div>
<div class="line"><a name="l00226"></a><span class="lineno"> 226</span>&#160; {</div>
<div class="line"><a name="l00227"></a><span class="lineno"> 227</span>&#160; <span class="keywordflow">if</span> (infos.size() != 2)</div>
<div class="line"><a name="l00228"></a><span class="lineno"> 228</span>&#160; {</div>
<div class="line"><a name="l00229"></a><span class="lineno"> 229</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of Resize TensorInfos. &quot;</span></div>
<div class="line"><a name="l00230"></a><span class="lineno"> 230</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: {input, output}.&quot;</span>);</div>
<div class="line"><a name="l00231"></a><span class="lineno"> 231</span>&#160; }</div>
<div class="line"><a name="l00232"></a><span class="lineno"> 232</span>&#160; </div>
<div class="line"><a name="l00233"></a><span class="lineno"> 233</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const ResizeDescriptor*&gt;(&amp;descriptor);</div>
<div class="line"><a name="l00234"></a><span class="lineno"> 234</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#a7a26e6ad4d26aa9c32cfbd1d0d9110be">GpuFsaResizeValidate</a>,</div>
<div class="line"><a name="l00235"></a><span class="lineno"> 235</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00236"></a><span class="lineno"> 236</span>&#160; infos[0],</div>
<div class="line"><a name="l00237"></a><span class="lineno"> 237</span>&#160; *desc);</div>
<div class="line"><a name="l00238"></a><span class="lineno"> 238</span>&#160; }</div>
<div class="line"><a name="l00239"></a><span class="lineno"> 239</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a31d953b9d49a6b4378f45097047976d0">LayerType::Softmax</a>:</div>
<div class="line"><a name="l00240"></a><span class="lineno"> 240</span>&#160; {</div>
<div class="line"><a name="l00241"></a><span class="lineno"> 241</span>&#160; <span class="keywordflow">if</span> (infos.size() != 2)</div>
<div class="line"><a name="l00242"></a><span class="lineno"> 242</span>&#160; {</div>
<div class="line"><a name="l00243"></a><span class="lineno"> 243</span>&#160; <span class="keywordflow">throw</span> InvalidArgumentException(<span class="stringliteral">&quot;Invalid number of Softmax TensorInfos. &quot;</span></div>
<div class="line"><a name="l00244"></a><span class="lineno"> 244</span>&#160; <span class="stringliteral">&quot;TensorInfos should be of format: {input, output}.&quot;</span>);</div>
<div class="line"><a name="l00245"></a><span class="lineno"> 245</span>&#160; }</div>
<div class="line"><a name="l00246"></a><span class="lineno"> 246</span>&#160; </div>
<div class="line"><a name="l00247"></a><span class="lineno"> 247</span>&#160; <span class="keyword">auto</span> desc = PolymorphicDowncast&lt;const SoftmaxDescriptor*&gt;(&amp;descriptor);</div>
<div class="line"><a name="l00248"></a><span class="lineno"> 248</span>&#160; <a class="code" href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a>(<a class="code" href="namespacearmnn.html#ad0672c046428b5b75115d2c91d0162eb">GpuFsaSoftmaxValidate</a>,</div>
<div class="line"><a name="l00249"></a><span class="lineno"> 249</span>&#160; reasonIfUnsupported,</div>
<div class="line"><a name="l00250"></a><span class="lineno"> 250</span>&#160; infos[0],</div>
<div class="line"><a name="l00251"></a><span class="lineno"> 251</span>&#160; infos[1],</div>
<div class="line"><a name="l00252"></a><span class="lineno"> 252</span>&#160; *desc);</div>
<div class="line"><a name="l00253"></a><span class="lineno"> 253</span>&#160; }</div>
<div class="line"><a name="l00254"></a><span class="lineno"> 254</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4acb17869fe51048b5a5c4c6106551a255">LayerType::Constant</a>:</div>
<div class="line"><a name="l00255"></a><span class="lineno"> 255</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a324118a6721dd6b8a9b9f4e327df2bf5">LayerType::Input</a>:</div>
<div class="line"><a name="l00256"></a><span class="lineno"> 256</span>&#160; <span class="keywordflow">case</span> <a class="code" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a29c2c02a361c9d7028472e5d92cd4a54">LayerType::Output</a>:</div>
<div class="line"><a name="l00257"></a><span class="lineno"> 257</span>&#160; <span class="keywordflow">return</span> <a class="code" href="namespacearmnn.html#a78c4767bc9e09daae610861fd00007db">IsGpuFsaBackendSupported</a>(reasonIfUnsupported, infos[0]);</div>
<div class="line"><a name="l00258"></a><span class="lineno"> 258</span>&#160; <span class="keywordflow">default</span>:</div>
<div class="line"><a name="l00259"></a><span class="lineno"> 259</span>&#160; <span class="comment">// Layers not supported in the GpuFsa backend.</span></div>
<div class="line"><a name="l00260"></a><span class="lineno"> 260</span>&#160; <span class="keywordflow">return</span> <span class="keyword">false</span>;</div>
<div class="line"><a name="l00261"></a><span class="lineno"> 261</span>&#160; }</div>
<div class="line"><a name="l00262"></a><span class="lineno"> 262</span>&#160;}</div>
</div><!-- fragment -->
<p class="reference">References <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa9a62e70841c4d06dd16306a85700d36">armnn::Activation</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9882ff3cfed27d6161c20a305e7a3484">armnn::BatchMatMul</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a4cd9f3996d60790cd11c04f842ebc43c">armnn::Cast</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4acb17869fe51048b5a5c4c6106551a255">armnn::Constant</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">armnn::Convolution2d</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">armnn::DepthwiseConvolution2d</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a699bcffd93aff3022014b9efc9eaefd1">armnn::ElementwiseBinary</a>, <a class="el" href="_gpu_fsa_layer_support_8cpp_source.html#l00067">FORWARD_LAYER_VALIDATE_FUNC</a>, <a class="el" href="_gpu_fsa_activation_8cpp_source.html#l00022">armnn::GpuFsaActivationValidate()</a>, <a class="el" href="_gpu_fsa_batch_mat_mul_8cpp_source.html#l00022">armnn::GpuFsaBatchMatMulValidate()</a>, <a class="el" href="_gpu_fsa_cast_8cpp_source.html#l00033">armnn::GpuFsaCastValidate()</a>, <a class="el" href="_gpu_fsa_convolution2d_8cpp_source.html#l00024">armnn::GpuFsaConvolution2dValidate()</a>, <a class="el" href="_gpu_fsa_depthwise_convolution2d_8cpp_source.html#l00026">armnn::GpuFsaDepthwiseConvolution2dValidate()</a>, <a class="el" href="_gpu_fsa_elementwise_binary_8cpp_source.html#l00024">armnn::GpuFsaElementwiseBinaryValidate()</a>, <a class="el" href="_gpu_fsa_pooling2d_8cpp_source.html#l00022">armnn::GpuFsaPooling2dValidate()</a>, <a class="el" href="_gpu_fsa_reshape_8cpp_source.html#l00022">armnn::GpuFsaReshapeValidate()</a>, <a class="el" href="_gpu_fsa_resize_8cpp_source.html#l00022">armnn::GpuFsaResizeValidate()</a>, <a class="el" href="_gpu_fsa_softmax_8cpp_source.html#l00022">armnn::GpuFsaSoftmaxValidate()</a>, <a class="el" href="_ignore_unused_8hpp_source.html#l00014">armnn::IgnoreUnused()</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a324118a6721dd6b8a9b9f4e327df2bf5">armnn::Input</a>, <a class="el" href="_gpu_fsa_layer_support_8cpp_source.html#l00031">armnn::IsGpuFsaBackendSupported()</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a29c2c02a361c9d7028472e5d92cd4a54">armnn::Output</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4ad662867a41bfb30b9f75dda2b5849001">armnn::Pooling2d</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa7c59ccedc6a3bd90c17f3b990afefad">armnn::Reshape</a>, <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9d723d04c40bfd81835c0766a698cf63">armnn::Resize</a>, and <a class="el" href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a31d953b9d49a6b4378f45097047976d0">armnn::Softmax</a>.</p>
</div>
</div>
<hr/>The documentation for this class was generated from the following files:<ul>
<li>src/backends/gpuFsa/<a class="el" href="_gpu_fsa_layer_support_8hpp_source.html">GpuFsaLayerSupport.hpp</a></li>
<li>src/backends/gpuFsa/<a class="el" href="_gpu_fsa_layer_support_8cpp_source.html">GpuFsaLayerSupport.cpp</a></li>
</ul>
</div><!-- contents -->
</div><!-- doc-content -->
<div class="ttc" id="anamespacearmnn_html_a10d20c479436685e213150471a4a395b"><div class="ttname"><a href="namespacearmnn.html#a10d20c479436685e213150471a4a395b">armnn::GpuFsaCastValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaCastValidate(const TensorInfo &amp;input, const TensorInfo &amp;output)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_cast_8cpp_source.html#l00033">GpuFsaCast.cpp:33</a></div></div>
<div class="ttc" id="a_gpu_fsa_layer_support_8cpp_html_aa24b6a243a6845480abc307561039d4a"><div class="ttname"><a href="_gpu_fsa_layer_support_8cpp.html#aa24b6a243a6845480abc307561039d4a">FORWARD_LAYER_VALIDATE_FUNC</a></div><div class="ttdeci">#define FORWARD_LAYER_VALIDATE_FUNC(func, reasonIfUnsupported,...)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_layer_support_8cpp_source.html#l00067">GpuFsaLayerSupport.cpp:67</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a7aff81ee45420f659fc30fa239ccc7c8"><div class="ttname"><a href="namespacearmnn.html#a7aff81ee45420f659fc30fa239ccc7c8">armnn::GpuFsaPooling2dValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaPooling2dValidate(const TensorInfo &amp;input, const Pooling2dDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_pooling2d_8cpp_source.html#l00022">GpuFsaPooling2d.cpp:22</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a78c4767bc9e09daae610861fd00007db"><div class="ttname"><a href="namespacearmnn.html#a78c4767bc9e09daae610861fd00007db">armnn::IsGpuFsaBackendSupported</a></div><div class="ttdeci">bool IsGpuFsaBackendSupported(Optional&lt; std::string &amp; &gt; reasonIfUnsupported, Args... args)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_layer_support_8cpp_source.html#l00031">GpuFsaLayerSupport.cpp:31</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a699bcffd93aff3022014b9efc9eaefd1"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a699bcffd93aff3022014b9efc9eaefd1">armnn::LayerType::ElementwiseBinary</a></div><div class="ttdeci">@ ElementwiseBinary</div></div>
<div class="ttc" id="anamespacearmnn_html_a7a26e6ad4d26aa9c32cfbd1d0d9110be"><div class="ttname"><a href="namespacearmnn.html#a7a26e6ad4d26aa9c32cfbd1d0d9110be">armnn::GpuFsaResizeValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaResizeValidate(const TensorInfo &amp;input, const ResizeDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_resize_8cpp_source.html#l00022">GpuFsaResize.cpp:22</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a31d953b9d49a6b4378f45097047976d0"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a31d953b9d49a6b4378f45097047976d0">armnn::LayerType::Softmax</a></div><div class="ttdeci">@ Softmax</div></div>
<div class="ttc" id="anamespacearmnn_html_a75837bce7e31ee2315b702d16833c0ff"><div class="ttname"><a href="namespacearmnn.html#a75837bce7e31ee2315b702d16833c0ff">armnn::GpuFsaReshapeValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaReshapeValidate(const TensorInfo &amp;input, const ReshapeDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_reshape_8cpp_source.html#l00022">GpuFsaReshape.cpp:22</a></div></div>
<div class="ttc" id="anamespacearmnn_html_aa8d17ff94651485b0aa4b58e1d4ef5b5"><div class="ttname"><a href="namespacearmnn.html#aa8d17ff94651485b0aa4b58e1d4ef5b5">armnn::GpuFsaActivationValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaActivationValidate(const TensorInfo &amp;input, const ActivationDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_activation_8cpp_source.html#l00022">GpuFsaActivation.cpp:22</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4ad662867a41bfb30b9f75dda2b5849001"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4ad662867a41bfb30b9f75dda2b5849001">armnn::LayerType::Pooling2d</a></div><div class="ttdeci">@ Pooling2d</div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a9882ff3cfed27d6161c20a305e7a3484"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9882ff3cfed27d6161c20a305e7a3484">armnn::LayerType::BatchMatMul</a></div><div class="ttdeci">@ BatchMatMul</div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4af97adbfc88b7012a0243215b1076e7e7">armnn::LayerType::DepthwiseConvolution2d</a></div><div class="ttdeci">@ DepthwiseConvolution2d</div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a4cd9f3996d60790cd11c04f842ebc43c"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a4cd9f3996d60790cd11c04f842ebc43c">armnn::LayerType::Cast</a></div><div class="ttdeci">@ Cast</div></div>
<div class="ttc" id="anamespacearmnn_html_a1c42ef784299460fb9db004dd395d5e1"><div class="ttname"><a href="namespacearmnn.html#a1c42ef784299460fb9db004dd395d5e1">armnn::GpuFsaConvolution2dValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaConvolution2dValidate(const TensorInfo &amp;input, const Convolution2dDescriptor &amp;descriptor, const TensorInfo &amp;weights, const Optional&lt; TensorInfo &gt; &amp;biases)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_convolution2d_8cpp_source.html#l00024">GpuFsaConvolution2d.cpp:24</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4aa7c59ccedc6a3bd90c17f3b990afefad"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa7c59ccedc6a3bd90c17f3b990afefad">armnn::LayerType::Reshape</a></div><div class="ttdeci">@ Reshape</div></div>
<div class="ttc" id="anamespacearmnn_html_a44affeeb090c3c6a3062830562672e84"><div class="ttname"><a href="namespacearmnn.html#a44affeeb090c3c6a3062830562672e84">armnn::IgnoreUnused</a></div><div class="ttdeci">void IgnoreUnused(Ts &amp;&amp;...)</div><div class="ttdef"><b>Definition:</b> <a href="_ignore_unused_8hpp_source.html#l00014">IgnoreUnused.hpp:14</a></div></div>
<div class="ttc" id="anamespacearmnn_html_abd83d555d05a33057f9e80475522a631"><div class="ttname"><a href="namespacearmnn.html#abd83d555d05a33057f9e80475522a631">armnn::GpuFsaElementwiseBinaryValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaElementwiseBinaryValidate(const TensorInfo &amp;input0, const TensorInfo &amp;input1, const ElementwiseBinaryDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_elementwise_binary_8cpp_source.html#l00024">GpuFsaElementwiseBinary.cpp:24</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a324118a6721dd6b8a9b9f4e327df2bf5"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a324118a6721dd6b8a9b9f4e327df2bf5">armnn::LayerType::Input</a></div><div class="ttdeci">@ Input</div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a9d723d04c40bfd81835c0766a698cf63"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a9d723d04c40bfd81835c0766a698cf63">armnn::LayerType::Resize</a></div><div class="ttdeci">@ Resize</div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4adb033d2f81b68f9a17e8f62de69fed4a">armnn::LayerType::Convolution2d</a></div><div class="ttdeci">@ Convolution2d</div></div>
<div class="ttc" id="anamespacearmnn_html_af2016aed3575b7302e79c72830c23025"><div class="ttname"><a href="namespacearmnn.html#af2016aed3575b7302e79c72830c23025">armnn::GpuFsaDepthwiseConvolution2dValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaDepthwiseConvolution2dValidate(const TensorInfo &amp;input, const DepthwiseConvolution2dDescriptor &amp;descriptor, const TensorInfo &amp;weights, const Optional&lt; TensorInfo &gt; &amp;biases)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_depthwise_convolution2d_8cpp_source.html#l00026">GpuFsaDepthwiseConvolution2d.cpp:26</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4aa9a62e70841c4d06dd16306a85700d36"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4aa9a62e70841c4d06dd16306a85700d36">armnn::LayerType::Activation</a></div><div class="ttdeci">@ Activation</div></div>
<div class="ttc" id="anamespacearmnn_html_ad0672c046428b5b75115d2c91d0162eb"><div class="ttname"><a href="namespacearmnn.html#ad0672c046428b5b75115d2c91d0162eb">armnn::GpuFsaSoftmaxValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaSoftmaxValidate(const TensorInfo &amp;input, const TensorInfo &amp;output, const SoftmaxDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_softmax_8cpp_source.html#l00022">GpuFsaSoftmax.cpp:22</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a47e56eb9bcbb170dddd202d770ed2cd8"><div class="ttname"><a href="namespacearmnn.html#a47e56eb9bcbb170dddd202d770ed2cd8">armnn::GpuFsaBatchMatMulValidate</a></div><div class="ttdeci">arm_compute::Status GpuFsaBatchMatMulValidate(const TensorInfo &amp;input0, const TensorInfo &amp;input1, const BatchMatMulDescriptor &amp;descriptor)</div><div class="ttdef"><b>Definition:</b> <a href="_gpu_fsa_batch_mat_mul_8cpp_source.html#l00022">GpuFsaBatchMatMul.cpp:22</a></div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4a29c2c02a361c9d7028472e5d92cd4a54"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4a29c2c02a361c9d7028472e5d92cd4a54">armnn::LayerType::Output</a></div><div class="ttdeci">@ Output</div></div>
<div class="ttc" id="anamespacearmnn_html_a56943a0946e5f15e5e58054b8e7a04a4acb17869fe51048b5a5c4c6106551a255"><div class="ttname"><a href="namespacearmnn.html#a56943a0946e5f15e5e58054b8e7a04a4acb17869fe51048b5a5c4c6106551a255">armnn::LayerType::Constant</a></div><div class="ttdeci">@ Constant</div></div>
<!-- start footer part -->
<div id="nav-path" class="navpath"><!-- id is needed for treeview function! -->
<ul>
<li class="navelem"><a class="el" href="namespacearmnn.html">armnn</a></li><li class="navelem"><a class="el" href="classarmnn_1_1_gpu_fsa_layer_support.html">GpuFsaLayerSupport</a></li>
<li class="footer">Generated on Thu May 16 2024 09:31:56 for Arm NN by
<a href="http://www.doxygen.org/index.html">
<img class="footer" src="doxygen.png" alt="doxygen"/></a> 1.8.17 </li>
</ul>
</div>
</body>
</html>