blob: c13b2a484552b686be6433af8d0f988e44b854c4 [file] [log] [blame]
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE tosa SYSTEM "tosa.dtd">
<tosa>
<version major="1" minor="1" patch="0" draft="true"/>
<profiles>
<profile profile="Base Inference" name="BI" status="Complete" description="Integer operations, primarily 8 and 32-bit values"/>
<profile profile="Main Inference" name="MI" status="Complete" description="FP16 and FP32 operations"/>
</profiles>
<profile_extensions>
<profile_extension name="EXT-INT16" description="16-bit integer operations" status="Complete">
<profile_supported>BI</profile_supported>
</profile_extension>
<profile_extension name="EXT-INT4" description="4-bit integer weights" status="Complete">
<profile_supported>BI</profile_supported>
</profile_extension>
<profile_extension name="EXT-BF16" description="BFloat16 operations" status="Complete">
<profile_supported>MI</profile_supported>
</profile_extension>
<profile_extension name="EXT-FP8E4M3" description="8-bit floating-point operations E4M3" status="Unstable">
<profile_supported>MI</profile_supported>
</profile_extension>
<profile_extension name="EXT-FP8E5M2" description="8-bit floating-point operations E5M2" status="Unstable">
<profile_supported>MI</profile_supported>
</profile_extension>
<profile_extension name="EXT-FFT" description="Fast Fourier Transform operations" status="Complete">
<profile_supported>MI</profile_supported>
</profile_extension>
<profile_extension name="EXT-VARIABLE" description="Stateful variable operations" status="Unstable">
<profile_supported>BI</profile_supported>
<profile_supported>MI</profile_supported>
</profile_extension>
</profile_extensions>
<levels>
<level name="none" max_rank="32" max_kernel="2147483647" max_stride="2147483647" max_scale="2048" max_log2_size="63" max_nesting="256" max_tensor_list_size="256">No level</level>
<level name="8K" max_rank="6" max_kernel="8192" max_stride="8192" max_scale="256" max_log2_size="31" max_nesting="6" max_tensor_list_size="64">Level 8K</level>
</levels>
<operators>
<operatorgroup name="tensor">
<operator>
<name>ARGMAX</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape1" tensor-element-type="in_t">
<description>Input tensor</description>
<levellimit value="rank(shape1)" limit="MAX_RANK"/>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis in range from 0 to rank(shape1) - 1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor, with rank = rank(shape1) - 1</description>
<rank min="0" max="MAX_RANK - 1"/>
</argument>
</arguments>
<types>
<type name='in_t' />
<type name='out_t' />
</types>
<typesupport mode="signed 8" in_t="i8_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_t="i16_t" out_t="i32_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3" in_t="fp8e4m3_t" out_t="i32_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_t="fp8e5m2_t" out_t="i32_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_t="fp16_t" out_t="i32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_t="bf16_t" out_t="i32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_t="fp32_t" out_t="i32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>AVG_POOL2D</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="[N,IH,IW,C]" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="4" max="4"/>
</argument>
<argument category="attribute" name="kernel" type="tensor_t" shape="[2]" tensor-element-type="i32_t">
<description>[kernel_y, kernel_x]</description>
<levellimit value="kernel_y" limit="MAX_KERNEL"/>
<levellimit value="kernel_x" limit="MAX_KERNEL"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="stride" type="tensor_t" shape="[2]" tensor-element-type="i32_t">
<description>[stride_y, stride_x]</description>
<levellimit value="stride_y" limit="MAX_STRIDE"/>
<levellimit value="stride_x" limit="MAX_STRIDE"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="pad" type="tensor_t" shape="[4]" tensor-element-type="i32_t">
<description>[pad_top, pad_bottom, pad_left, pad_right]</description>
<levellimit value="pad_top" limit="MAX_KERNEL"/>
<levellimit value="pad_bottom" limit="MAX_KERNEL"/>
<levellimit value="pad_left" limit="MAX_KERNEL"/>
<levellimit value="pad_right" limit="MAX_KERNEL"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="acc_type" type="tensor_t" shape="-" tensor-element-type="acc_type_t">
<description>Enumerated type, must be one of INT32, FP16, FP32, as defined in the Supported Data Types table for this operation</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="input_zp" type="tensor_t" shape="-" tensor-element-type="in_out_t">
<description>Input tensor zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="output_zp" type="tensor_t" shape="-" tensor-element-type="in_out_t">
<description>Output tensor zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,OH,OW,C]" tensor-element-type="in_out_t">
<description>Output tensor 4D</description>
<rank min="4" max="4"/>
</argument>
</arguments>
<types>
<type name='in_out_t' />
<type name='acc_t' />
</types>
<typesupport mode="signed 8 with int32 accumulate" in_out_t="i8_t" acc_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16 with int32 accumulate" in_out_t="i16_t" acc_t="i32_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3 with fp16 accumulate" in_out_t="fp8e4m3_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2 with fp16 accumulate" in_out_t="fp8e5m2_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16 with fp16 accumulate" in_out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 with fp32 accumulate" in_out_t="fp16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16 with fp32 accumulate" in_out_t="bf16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32 with fp32 accumulate" in_out_t="fp32_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>CONV2D</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="[N,IH,IW,IC]" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="4" max="4"/>
</argument>
<argument category="input" name="weight" type="tensor_t" shape="[OC,KH,KW,IC]" tensor-element-type="weight_t">
<description>Weight kernel size KH x KW</description>
<levellimit value="dilation_y * KH" limit="MAX_KERNEL"/>
<levellimit value="dilation_x * KW" limit="MAX_KERNEL"/>
<rank min="4" max="4"/>
</argument>
<argument category="input" name="bias" type="tensor_t" shape="[BC]" tensor-element-type="out_t">
<description>Per output channel bias data. +
Bias data will be broadcast if BC == 1.</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="pad" type="tensor_t" shape="[4]" tensor-element-type="i32_t">
<description>[pad_top, pad_bottom, pad_left, pad_right]</description>
<levellimit value="pad_top" limit="MAX_KERNEL"/>
<levellimit value="pad_bottom" limit="MAX_KERNEL"/>
<levellimit value="pad_left" limit="MAX_KERNEL"/>
<levellimit value="pad_right" limit="MAX_KERNEL"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="stride" type="tensor_t" shape="[2]" tensor-element-type="i32_t">
<description>[stride_y, stride_x]</description>
<levellimit value="stride_y" limit="MAX_STRIDE"/>
<levellimit value="stride_x" limit="MAX_STRIDE"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="dilation" type="tensor_t" shape="[2]" tensor-element-type="i32_t">
<description>[dilation_y, dilation_x]</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="acc_type" type="tensor_t" shape="-" tensor-element-type="acc_type_t">
<description>Enumerated type, must be one of INT32, INT48, FP16, FP32, as defined in the Supported Data Types table for this operation</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="input_zp" type="tensor_t" shape="-" tensor-element-type="in_t">
<description>Input tensor zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="weight_zp" type="tensor_t" shape="-" tensor-element-type="weight_t">
<description>Weight zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="local_bound" type="tensor_t" shape="-" tensor-element-type="bool_t" optional="true">
<description>
This optional attribute affects the floating-point compliance error bound.
The default of false allows for direct and transform based, fast convolution algorithms.
Only set to true if direct dot-product calculation precision is required.
</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,OH,OW,OC]" tensor-element-type="out_t">
<description>Output tensor</description>
<rank min="4" max="4"/>
</argument>
</arguments>
<types>
<type name='in_t' />
<type name='weight_t' />
<type name='out_t' />
<type name='acc_t' />
</types>
<typesupport mode="signed 8x8 with int32 accumulate" in_t="i8_t" weight_t="i8_t" out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8x4 with int32 accumulate" in_t="i8_t" weight_t="i4_t" out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="EXT-INT4"/>
</typesupport>
<typesupport mode="signed 16x8 with int48 accumulate" in_t="i16_t" weight_t="i8_t" out_t="i48_t" acc_t="i48_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3 with fp16 accumulate" in_t="fp8e4m3_t" weight_t="fp8e4m3_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2 with fp16 accumulate" in_t="fp8e5m2_t" weight_t="fp8e5m2_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16 with fp16 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 with fp32 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16 with fp32 accumulate" in_t="bf16_t" weight_t="bf16_t" out_t="bf16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32 with fp32 accumulate" in_t="fp32_t" weight_t="fp32_t" out_t="fp32_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>CONV3D</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="[N,ID,IH,IW,IC]" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="5" max="5"/>
</argument>
<argument category="input" name="weight" type="tensor_t" shape="[OC,KD,KH,KW,IC]" tensor-element-type="weight_t">
<description>Weight kernel size KDxKHxKW</description>
<levellimit value="dilation_d * KD" limit="MAX_KERNEL"/>
<levellimit value="dilation_y * KH" limit="MAX_KERNEL"/>
<levellimit value="dilation_x * KW" limit="MAX_KERNEL"/>
<rank min="5" max="5"/>
</argument>
<argument category="input" name="bias" type="tensor_t" shape="[BC]" tensor-element-type="out_t">
<description>Per output channel bias data. +
Bias data will be broadcast if BC == 1.</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="pad" type="tensor_t" shape="[6]" tensor-element-type="i32_t">
<description>[pad_d0, pad_d1, pad_top, pad_bottom, pad_left, pad_right]</description>
<levellimit value="pad_d0" limit="MAX_KERNEL"/>
<levellimit value="pad_d1" limit="MAX_KERNEL"/>
<levellimit value="pad_top" limit="MAX_KERNEL"/>
<levellimit value="pad_bottom" limit="MAX_KERNEL"/>
<levellimit value="pad_left" limit="MAX_KERNEL"/>
<levellimit value="pad_right" limit="MAX_KERNEL"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="stride" type="tensor_t" shape="[3]" tensor-element-type="i32_t">
<description>[stride_d, stride_y, stride_x]</description>
<levellimit value="stride_y" limit="MAX_STRIDE"/>
<levellimit value="stride_x" limit="MAX_STRIDE"/>
<levellimit value="stride_d" limit="MAX_STRIDE"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="dilation" type="tensor_t" shape="[3]" tensor-element-type="i32_t">
<description>[dilation_d, dilation_y, dilation_x]</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="acc_type" type="tensor_t" shape="-" tensor-element-type="acc_type_t">
<description>Enumerated type, must be one of INT32, INT48, FP16, FP32, as defined in the Supported Data Types table for this operation</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="input_zp" type="tensor_t" shape="-" tensor-element-type="in_t">
<description>Input tensor zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="weight_zp" type="tensor_t" shape="-" tensor-element-type="weight_t">
<description>Weight zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="local_bound" type="tensor_t" shape="-" tensor-element-type="bool_t" optional="true">
<description>
This optional attribute affects the floating-point compliance error bound.
The default of false allows for direct and transform based, fast convolution algorithms.
Only set to true if direct dot-product calculation precision is required.
</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,OD,OH,OW,OC]" tensor-element-type="out_t">
<description>Output tensor</description>
<rank min="5" max="5"/>
</argument>
</arguments>
<types>
<type name='in_t' />
<type name='weight_t' />
<type name='out_t' />
</types>
<typesupport mode="signed 8x8 with int32 accumulate" in_t="i8_t" weight_t="i8_t" out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8x4 with int32 accumulate" in_t="i8_t" weight_t="i4_t" out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="EXT-INT4"/>
</typesupport>
<typesupport mode="signed 16x8 with int48 accumulate" in_t="i16_t" weight_t="i8_t" out_t="i48_t" acc_t="i48_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3 with fp16 accumulate" in_t="fp8e4m3_t" weight_t="fp8e4m3_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2 with fp16 accumulate" in_t="fp8e5m2_t" weight_t="fp8e5m2_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16 with fp16 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 with fp32 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16 with fp32 accumulate" in_t="bf16_t" weight_t="bf16_t" out_t="bf16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32 with fp32 accumulate" in_t="fp32_t" weight_t="fp32_t" out_t="fp32_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>DEPTHWISE_CONV2D</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="[N,IH,IW,C]" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="4" max="4"/>
</argument>
<argument category="input" name="weight" type="tensor_t" shape="[KH,KW,C,M]" tensor-element-type="weight_t">
<description>Weight kernel size KH x KW</description>
<levellimit value="dilation_y * KH" limit="MAX_KERNEL"/>
<levellimit value="dilation_x * KW" limit="MAX_KERNEL"/>
<rank min="4" max="4"/>
</argument>
<argument category="input" name="bias" type="tensor_t" shape="[BC]" tensor-element-type="out_t">
<description>Per output channel bias data. +
Bias data will be broadcast if BC == 1.</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="pad" type="tensor_t" shape="[4]" tensor-element-type="i32_t">
<description>[pad_top, pad_bottom, pad_left, pad_right]</description>
<levellimit value="pad_top" limit="MAX_KERNEL"/>
<levellimit value="pad_bottom" limit="MAX_KERNEL"/>
<levellimit value="pad_left" limit="MAX_KERNEL"/>
<levellimit value="pad_right" limit="MAX_KERNEL"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="stride" type="tensor_t" shape="[2]" tensor-element-type="i32_t">
<description>[stride_y, stride_x]</description>
<levellimit value="stride_y" limit="MAX_STRIDE"/>
<levellimit value="stride_x" limit="MAX_STRIDE"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="dilation" type="tensor_t" shape="[2]" tensor-element-type="i32_t">
<description>[dilation_y, dilation_x]</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="acc_type" type="tensor_t" shape="-" tensor-element-type="acc_type_t">
<description>Enumerated type, must be one of INT32, INT48, FP16, FP32, as defined in the Supported Data Types table for this operation</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="input_zp" type="tensor_t" shape="-" tensor-element-type="in_t">
<description>Input tensor zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="weight_zp" type="tensor_t" shape="-" tensor-element-type="weight_t">
<description>Weight zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="local_bound" type="tensor_t" shape="-" tensor-element-type="bool_t" optional="true">
<description>
This optional attribute affects the floating-point compliance error bound.
The default of false allows for direct and transform based, fast convolution algorithms.
Only set to true if direct dot-product calculation precision is required.
</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,OH,OW,C*M]" tensor-element-type="out_t">
<description>Output tensor</description>
<rank min="4" max="4"/>
</argument>
</arguments>
<types>
<type name='in_t' />
<type name='weight_t' />
<type name='out_t' />
</types>
<typesupport mode="signed 8x8 with int32 accumulate" in_t="i8_t" weight_t="i8_t" out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8x4 with int32 accumulate" in_t="i8_t" weight_t="i4_t" out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="EXT-INT4"/>
</typesupport>
<typesupport mode="signed 16x8 with int48 accumulate" in_t="i16_t" weight_t="i8_t" out_t="i48_t" acc_t="i48_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3 with fp16 accumulate" in_t="fp8e4m3_t" weight_t="fp8e4m3_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2 with fp16 accumulate" in_t="fp8e5m2_t" weight_t="fp8e5m2_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16 with fp16 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 with fp32 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16 with fp32 accumulate" in_t="bf16_t" weight_t="bf16_t" out_t="bf16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32 with fp32 accumulate" in_t="fp32_t" weight_t="fp32_t" out_t="fp32_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>FFT2D</name>
<arguments>
<argument category="input" name="input_real" type="tensor_t" shape="[N,H,W]" tensor-element-type="in_out_t">
<description>Real part of the complex input. H,W must be powers of two.</description>
<levellimit value="H" limit="MAX_KERNEL"/>
<levellimit value="W" limit="MAX_KERNEL"/>
<rank min="3" max="3"/>
</argument>
<argument category="input" name="input_imag" type="tensor_t" shape="[N,H,W]" tensor-element-type="in_out_t">
<description>Imaginary part of the complex input. H,W must be powers of two.</description>
<rank min="3" max="3"/>
</argument>
<argument category="attribute" name="inverse" type="tensor_t" shape="-" tensor-element-type="bool_t">
<description>false for forward FFT, true for inverse FFT</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output_real" type="tensor_t" shape="[N,H,W]" tensor-element-type="in_out_t">
<description>Real part of the complex output.</description>
<rank min="3" max="3"/>
</argument>
<argument category="attribute" name="local_bound" type="tensor_t" shape="-" tensor-element-type="bool_t" optional="true">
<description>
This optional attribute affects the floating-point compliance error bound.
The default of false allows for direct and transform based, fast convolution algorithms.
Only set to true if direct dot-product calculation precision is required.
</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output_imag" type="tensor_t" shape="[N,H,W]" tensor-element-type="in_out_t">
<description>Imaginary part of the complex output.</description>
<rank min="3" max="3"/>
</argument>
</arguments>
<types>
<type name='in_out_t' />
</types>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="EXT-FFT"/>
</typesupport>
</operator>
<operator>
<name>FULLY_CONNECTED</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="[N,IC]" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="2" max="2"/>
</argument>
<argument category="input" name="weight" type="tensor_t" shape="[OC,IC]" tensor-element-type="weight_t">
<description>Weights</description>
<rank min="2" max="2"/>
</argument>
<argument category="input" name="bias" type="tensor_t" shape="[BC]" tensor-element-type="out_t">
<description>Per output channel bias data. +
Bias data will be broadcast if BC == 1.</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="input_zp" type="tensor_t" shape="-" tensor-element-type="in_t">
<description>Input tensor zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="weight_zp" type="tensor_t" shape="-" tensor-element-type="weight_t">
<description>Weight zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,OC]" tensor-element-type="out_t">
<description>Output tensor</description>
<rank min="2" max="2"/>
</argument>
</arguments>
<types>
<type name='in_t' />
<type name='weight_t' />
<type name='out_t' />
</types>
<typesupport mode="signed 8x8 with int32 accumulate" in_t="i8_t" weight_t="i8_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8x4 with int32 accumulate" in_t="i8_t" weight_t="i4_t" out_t="i32_t" version_added="1.0">
<op_profile name="EXT-INT4"/>
</typesupport>
<typesupport mode="signed 16x8 with int48 accumulate" in_t="i16_t" weight_t="i8_t" out_t="i48_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp16 with fp16 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 with fp32 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16 with fp32 accumulate" in_t="bf16_t" weight_t="bf16_t" out_t="fp32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32 with fp32 accumulate" in_t="fp32_t" weight_t="fp32_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>MATMUL</name>
<arguments>
<argument category="input" name="A" type="tensor_t" shape="[N,H,C]" tensor-element-type="in_t">
<description>Input tensor A, N matrices of size HxC</description>
<rank min="3" max="3"/>
</argument>
<argument category="input" name="B" type="tensor_t" shape="[N,C,W]" tensor-element-type="in_t">
<description>Input tensor B, N matrices of size CxW</description>
<rank min="3" max="3"/>
</argument>
<argument category="attribute" name="A_zp" type="tensor_t" shape="-" tensor-element-type="in_t">
<description>Input tensor A zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="B_zp" type="tensor_t" shape="-" tensor-element-type="in_t">
<description>Input tensor B zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,H,W]" tensor-element-type="out_t">
<description>Output tensor, N matrices of size HxW</description>
<rank min="3" max="3"/>
</argument>
</arguments>
<types>
<type name='in_t' />
<type name='out_t' />
</types>
<typesupport mode="signed 8x8 with int32 accumulate" in_t="i8_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16x16 with int48 accumulate" in_t="i16_t" out_t="i48_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3 with fp16 accumulate" in_t="fp8e4m3_t" out_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_t="fp8e5m2_t" out_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16 with fp16 accumulate" in_t="fp16_t" out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 with fp32 accumulate" in_t="fp16_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16 with fp32 accumulate" in_t="bf16_t" out_t="fp32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32 with fp32 accumulate" in_t="fp32_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>MAX_POOL2D</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="[N,IH,IW,C]" tensor-element-type="in_out_t">
<description>Input tensor 4D</description>
<rank min="4" max="4"/>
</argument>
<argument category="attribute" name="kernel" type="tensor_t" shape="[2]" tensor-element-type="i32_t">
<description>[kernel_y, kernel_x]</description>
<levellimit value="kernel_y" limit="MAX_KERNEL"/>
<levellimit value="kernel_x" limit="MAX_KERNEL"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="stride" type="tensor_t" shape="[2]" tensor-element-type="i32_t">
<description>[stride_y, stride_x]</description>
<levellimit value="stride_y" limit="MAX_STRIDE"/>
<levellimit value="stride_x" limit="MAX_STRIDE"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="pad" type="tensor_t" shape="[4]" tensor-element-type="i32_t">
<description>[pad_top, pad_bottom, pad_left, pad_right]</description>
<levellimit value="pad_top" limit="MAX_KERNEL"/>
<levellimit value="pad_bottom" limit="MAX_KERNEL"/>
<levellimit value="pad_left" limit="MAX_KERNEL"/>
<levellimit value="pad_right" limit="MAX_KERNEL"/>
<rank min="1" max="1"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,OH,OW,C]" tensor-element-type="in_out_t">
<description>Output tensor 4D</description>
<rank min="4" max="4"/>
</argument>
</arguments>
<types>
<type name='in_out_t' />
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>RFFT2D</name>
<arguments>
<argument category="input" name="input_real" type="tensor_t" shape="[N,H,W]" tensor-element-type="in_out_t">
<description>Real input. H,W must be powers of two.</description>
<levellimit value="H" limit="MAX_KERNEL"/>
<levellimit value="W" limit="MAX_KERNEL"/>
<rank min="3" max="3"/>
</argument>
<argument category="output" name="output_real" type="tensor_t" shape="[N,H,W/2 + 1]" tensor-element-type="in_out_t">
<description>Real part of the complex output</description>
<rank min="3" max="3"/>
</argument>
<argument category="attribute" name="local_bound" type="tensor_t" shape="-" tensor-element-type="bool_t" optional="true">
<description>
This optional attribute affects the floating-point compliance error bound.
The default of false allows for direct and transform based, fast convolution algorithms.
Only set to true if direct dot-product calculation precision is required.
</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output_imag" type="tensor_t" shape="[N,H,W/2 + 1]" tensor-element-type="in_out_t">
<description>Imaginary part of the complex output.</description>
<rank min="3" max="3"/>
</argument>
</arguments>
<types>
<type name='in_out_t' />
</types>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="EXT-FFT"/>
</typesupport>
</operator>
<operator>
<name>TRANSPOSE_CONV2D</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="[N,IH,IW,IC]" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="4" max="4"/>
</argument>
<argument category="input" name="weight" type="tensor_t" shape="[OC,KH,KW,IC]" tensor-element-type="weight_t">
<description>Weight kernel size KH x KW</description>
<levellimit value="KH" limit="MAX_KERNEL"/>
<levellimit value="KW" limit="MAX_KERNEL"/>
<rank min="4" max="4"/>
</argument>
<argument category="input" name="bias" type="tensor_t" shape="[BC]" tensor-element-type="out_t">
<description>Per output channel bias data. +
Bias data will be broadcast if BC == 1.</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="out_pad" type="tensor_t" shape="[4]" tensor-element-type="i32_t">
<description>[out_pad_top, out_pad_bottom, out_pad_left, out_pad_right]</description>
<levellimit value="out_pad_top" limit="MAX_KERNEL"/>
<levellimit value="out_pad_bottom" limit="MAX_KERNEL"/>
<levellimit value="out_pad_left" limit="MAX_KERNEL"/>
<levellimit value="out_pad_right" limit="MAX_KERNEL"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="stride" type="tensor_t" shape="[2]" tensor-element-type="i32_t">
<description>[stride_y, stride_x]</description>
<levellimit value="stride_y" limit="MAX_STRIDE"/>
<levellimit value="stride_x" limit="MAX_STRIDE"/>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="acc_type" type="tensor_t" shape="-" tensor-element-type="acc_type_t">
<description>Enumerated type, must be one of INT32, INT48, FP16, FP32, as defined in the Supported Data Types table for this operation</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="input_zp" type="tensor_t" shape="-" tensor-element-type="in_t">
<description>Input tensor zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="weight_zp" type="tensor_t" shape="-" tensor-element-type="weight_t">
<description>Weight zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="local_bound" type="tensor_t" shape="-" tensor-element-type="bool_t" optional="true">
<description>
This optional attribute affects the floating-point compliance error bound.
The default of false allows for direct and transform based, fast convolution algorithms.
Only set to true if direct dot-product calculation precision is required.
</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,OH,OW,OC]" tensor-element-type="out_t">
<description>Output tensor</description>
<rank min="4" max="4"/>
</argument>
</arguments>
<types>
<type name='in_t' />
<type name='weight_t' />
<type name='out_t' />
</types>
<typesupport mode="signed 8x8 with int32 accumulate" in_t="i8_t" weight_t="i8_t" out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8x4 with int32 accumulate" in_t="i8_t" weight_t="i4_t" out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="EXT-INT4"/>
</typesupport>
<typesupport mode="signed 16x8 with int48 accumulate" in_t="i16_t" weight_t="i8_t" out_t="i48_t" acc_t="i48_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3 with fp16 accumulate" in_t="fp8e4m3_t" out_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_t="fp8e5m2_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16 with fp16 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 with fp32 accumulate" in_t="fp16_t" weight_t="fp16_t" out_t="fp16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16 with fp32 accumulate" in_t="bf16_t" weight_t="bf16_t" out_t="bf16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32 with fp32 accumulate" in_t="fp32_t" weight_t="fp32_t" out_t="fp32_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="activation">
<operator>
<name>CLAMP</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="min_val" type="tensor_t" shape="-" tensor-element-type="in_out_t">
<description>Minimum clip value</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="max_val" type="tensor_t" shape="-" tensor-element-type="in_out_t">
<description>Maximum clip value</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type and shape as input</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>ERF</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type and shape as input</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>SIGMOID</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type and shape as input</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>TANH</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type and shape as input</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="elementwise-binary">
<operator>
<name>ADD</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>ARITHMETIC_RIGHT_SHIFT</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="round" type="tensor_t" shape="-" tensor-element-type="bool_t">
<description>If true then the shift is rounded</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
</operator>
<operator>
<name>BITWISE_AND</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
</operator>
<operator>
<name>BITWISE_OR</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
</operator>
<operator>
<name>BITWISE_XOR</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
</operator>
<operator>
<name>INTDIV</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>LOGICAL_AND</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>LOGICAL_LEFT_SHIFT</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>LOGICAL_RIGHT_SHIFT</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>LOGICAL_OR</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>LOGICAL_XOR</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>MAXIMUM</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>MINIMUM</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>MUL</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="shift" type="tensor_t" shape="-" tensor-element-type="i8_t">
<description>Result right shift (i32_t data type only)</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_t'/>
<type name='out_t'/>
</types>
<typesupport mode="signed 8" in_t="i8_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_t="i16_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_t="i32_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16" in_t="fp16_t" out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_t="bf16_t" out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_t="fp32_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>POW</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>SUB</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>TABLE</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="table" type="tensor_t" shape="[TABLE_SIZE]" tensor-element-type="table_t">
<description>Lookup table tensor</description>
<rank min="1" max="1"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_t'/>
<type name='table_t'/>
<type name='out_t'/>
<type name='TABLE_SIZE'/>
</types>
<typesupport mode="signed 8" in_t="i8_t" table_t="i8_t" TABLE_SIZE="256" out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_t="i16_t" table_t="i16_t" TABLE_SIZE="513" out_t="i32_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="elementwise-unary">
<operator>
<name>ABS</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>BITWISE_NOT</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
</operator>
<operator>
<name>CEIL</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>CLZ</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
</operator>
<operator>
<name>COS</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type and shape as input</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>EXP</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>FLOOR</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>LOG</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>LOGICAL_NOT</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="Boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>NEGATE</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="input1_zp" type="tensor_t" shape="-" tensor-element-type="in_out_t">
<description>Input 1 zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="output_zp" type="tensor_t" shape="-" tensor-element-type="in_out_t">
<description>Output zero point. Must be zero for non-int8 types.</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
<type name='acc_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" acc_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" acc_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" acc_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>RECIPROCAL</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>RSQRT</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="elementwise-ternary">
<operator>
<name>SELECT</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="bool_t">
<description>Input selector tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_out_t">
<description>Input value tensor if input1 is True</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input3" type="tensor_t" shape="shape3" tensor-element-type="in_out_t">
<description>Input value tensor if input1 is False</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type as input2 and input3</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="Boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>SIN</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type and shape as input</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="comparison">
<operator>
<name>EQUAL</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_t'/>
<type name='out_t'/>
</types>
<typesupport mode="signed 32" in_t="i32_t" out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_t="fp16_t" out_t="bool_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_t="bf16_t" out_t="bool_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_t="fp32_t" out_t="bool_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>GREATER</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_t'/>
<type name='out_t'/>
</types>
<typesupport mode="signed 32" in_t="i32_t" out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_t="fp16_t" out_t="bool_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_t="bf16_t" out_t="bool_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_t="fp32_t" out_t="bool_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>GREATER_EQUAL</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="input" name="input2" type="tensor_t" shape="shape2" tensor-element-type="in_t">
<description>Input tensor with the same rank as input1</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_t'/>
<type name='out_t'/>
</types>
<typesupport mode="signed 32" in_t="i32_t" out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_t="fp16_t" out_t="bool_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_t="bf16_t" out_t="bool_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_t="fp32_t" out_t="bool_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="reduction">
<operator>
<name>REDUCE_ALL</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis to reduce, in range from 0 to rank(shape1)-1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor. Same rank as the input tensor.</description>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>REDUCE_ANY</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis to reduce, in range from 0 to rank(shape1)-1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor. Same rank as the input tensor.</description>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>REDUCE_MAX</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis to reduce, in range from 0 to rank(shape1)-1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor. Same rank as the input tensor.</description>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>REDUCE_MIN</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis to reduce, in range from 0 to rank(shape1)-1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor. Same rank as the input tensor.</description>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>REDUCE_PRODUCT</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis to reduce, in range from 0 to rank(shape1)-1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor. Same rank as the input tensor.</description>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>REDUCE_SUM</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis to reduce, in range from 0 to rank(shape1)-1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor. Same rank as the input tensor.</description>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
<type name='acc_t'/>
</types>
<typesupport mode="signed 32" in_out_t="i32_t" acc_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" acc_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" acc_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="data-layout">
<operator>
<name>CONCAT</name>
<arguments>
<argument category="input" name="input1" type="tensor_list_t" shape="shapes1" tensor-element-type="in_out_t">
<description>List of input tensors. All inputs must have the same rank and data type</description>
<levellimit value="tensor_list_shape(input1)" limit="MAX_TENSOR_LIST_SIZE"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis along which concatenation is to occur, in range from 0 to rank(shape)-1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>PAD</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="input" name="padding" type="shape_t" shape="-" tensor-element-type="-">
<description>Number of pad elements at the start and end of each dimension.
The values in padding are interpreted as start, end of each dimension.
As an example for rank 2, the values would be interpreted as [start_dim0, end_dim0, start_dim1, end_dim1].</description>
</argument>
<argument category="attribute" name="pad_const" type="tensor_t" shape="-" tensor-element-type="in_out_t">
<description>Constant value to be used as padding</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type as the input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>RESHAPE</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape1)" limit="MAX_RANK"/>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="input" name="shape" type="shape_t" shape="-" tensor-element-type="-">
<description>shape_t giving the new shape.</description>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, size as the input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>REVERSE</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis to reverse, in range from 0 to rank(shape)-1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor. Same shape as input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>SLICE</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="input" name="start" type="shape_t" shape="[rank(shape1)]" tensor-element-type="size_t">
<description>List of integer coordinates, of length equal to the rank of input1. Start coordinate for slicing.</description>
<rank min="1" max="1"/>
</argument>
<argument category="input" name="size" type="shape_t" shape="[rank(shape1)]" tensor-element-type="size_t">
<description>List of integer size values, of length equal to the rank of input1. Size of the input to be
used.</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="1" max="1"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type as the input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>TILE</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="input" name="multiples" type="shape_t" shape="[rank(shape1)]" tensor-element-type="-">
<description>Number of times to replicate input1 in each dimension</description>
<rank min="1" max="1"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, rank as the input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>TRANSPOSE</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape1" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="perms" type="tensor_t" shape="[rank(shape1)]" tensor-element-type="i32_t">
<description>List of integers of length equal to the rank of input1. Values must be valid dimensions within shape1, and may not be repeated.</description>
<rank min="1" max="1"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of same type, rank as the input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="1" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="scatter-gather">
<operator>
<name>GATHER</name>
<arguments>
<argument category="input" name="values" type="tensor_t" shape="[N,K,C]" tensor-element-type="in_out_t">
<description>3D value tensor</description>
<rank min="3" max="3"/>
</argument>
<argument category="input" name="indices" type="tensor_t" shape="[N,W]" tensor-element-type="size_t">
<description>2D index tensor</description>
<rank min="2" max="2"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,W,C]" tensor-element-type="in_out_t">
<description>3D output tensor</description>
<rank min="3" max="3"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>SCATTER</name>
<arguments>
<argument category="input" name="values_in" type="tensor_t" shape="[N,K,C]" tensor-element-type="in_out_t">
<description>3D values in tensor</description>
<rank min="3" max="3"/>
</argument>
<argument category="input" name="indices" type="tensor_t" shape="[N,W]" tensor-element-type="size_t">
<description>2D index tensor</description>
<rank min="2" max="2"/>
</argument>
<argument category="input" name="input" type="tensor_t" shape="[N,W,C]" tensor-element-type="in_out_t">
<description>3D input tensor</description>
<rank min="3" max="3"/>
</argument>
<argument category="output" name="values_out" type="tensor_t" shape="[N,K,C]" tensor-element-type="in_out_t">
<description>3D output tensor</description>
<rank min="3" max="3"/>
</argument>
</arguments>
<types>
<type name='in_out_t'/>
</types>
<typesupport mode="signed 8" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="image">
<operator>
<name>RESIZE</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="[N,IH,IW,C]" tensor-element-type="in_t">
<description>Input tensor</description>
<rank min="4" max="4"/>
</argument>
<argument category="input" name="scale" type="shape_t" shape="[4]" tensor-element-type="-">
<description>[scale_y_n, scale_y_d, scale_x_n, scale_x_d]</description>
<levellimit value="scale_y_n/scale_y_d" limit="MAX_SCALE"/>
<levellimit value="scale_x_n/scale_x_d" limit="MAX_SCALE"/>
<rank min="1" max="1"/>
</argument>
<argument category="input" name="offset" type="shape_t" shape="[2]" tensor-element-type="-">
<description>[offset_y, offset_x]</description>
<rank min="1" max="1"/>
</argument>
<argument category="input" name="border" type="shape_t" shape="[2]" tensor-element-type="-">
<description>[border_y, border_x]</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="mode" type="tensor_t" shape="-" tensor-element-type="resize_mode_t">
<description>BILINEAR or NEAREST</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="[N,OH,OW,C]" tensor-element-type="out_t">
<description>Output tensor</description>
<rank min="4" max="4"/>
</argument>
</arguments>
<types>
<type name='resize_t'/>
<type name='in_t'/>
<type name='out_t'/>
</types>
<typesupport mode="signed 8, bilinear" resize_t="i16_t" in_t="i8_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8, nearest" resize_t="i16_t" in_t="i8_t" out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16, bilinear" resize_t="i16_t" in_t="i16_t" out_t="i48_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="signed 16, nearest" resize_t="i16_t" in_t="i16_t" out_t="i16_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp16" resize_t="fp16_t" in_t="fp16_t" out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" resize_t="bf16_t" in_t="bf16_t" out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" resize_t="fp32_t" in_t="fp32_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="type-conversion">
<operator>
<name>CAST</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_t'/>
<type name='out_t'/>
</types>
<typesupport mode="bool to signed 8" in_t="bool_t" out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="bool to signed 16" in_t="bool_t" out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="bool to signed 32" in_t="bool_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8 to bool" in_t="i8_t" out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8 to signed 16" in_t="i8_t" out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8 to signed 32" in_t="i8_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 8 to fp16" in_t="i8_t" out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8 to bf16" in_t="i8_t" out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="signed 8 to fp32" in_t="i8_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 16 to bool" in_t="i16_t" out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16 to signed 8" in_t="i16_t" out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16 to signed 32" in_t="i16_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16 to fp16" in_t="i16_t" out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 16 to bf16" in_t="i16_t" out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="signed 16 to fp32" in_t="i16_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 32 to bool" in_t="i32_t" out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32 to signed 8" in_t="i32_t" out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32 to signed 16" in_t="i32_t" out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32 to fp16" in_t="i32_t" out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 32 to bf16" in_t="i32_t" out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="signed 32 to fp32" in_t="i32_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16 to signed 8" in_t="bf16_t" out_t="i8_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="bf16 to signed 16" in_t="bf16_t" out_t="i16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="bf16 to signed 32" in_t="bf16_t" out_t="i32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="bf16 to fp8e4m3" in_t="bf16_t" out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-BF16" and_name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="bf16 to fp8e5m2" in_t="bf16_t" out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-BF16" and_name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="bf16 to fp32" in_t="bf16_t" out_t="fp32_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp8e4m3 to fp16" in_t="fp8e4m3_t" out_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e4m3 to bf16" in_t="fp8e4m3_t" out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3" and_name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp8e4m3 to fp32" in_t="fp8e4m3_t" out_t="fp32_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2 to fp16" in_t="fp8e5m2_t" out_t="fp16_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp8e5m2 to bf16" in_t="fp8e5m2_t" out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2" and_name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp8e5m2 to fp32" in_t="fp8e5m2_t" out_t="fp32_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16 to signed 8" in_t="fp16_t" out_t="i8_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 to signed 16" in_t="fp16_t" out_t="i16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 to signed 32" in_t="fp16_t" out_t="i32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp16 to fp8e4m3" in_t="fp16_t" out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp16 to fp8e5m2" in_t="fp16_t" out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16 to fp32" in_t="fp16_t" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp32 to signed 8" in_t="fp32_t" out_t="i8_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp32 to signed 16" in_t="fp32_t" out_t="i16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp32 to signed 32" in_t="fp32_t" out_t="i32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="fp32 to fp8e4m3" in_t="fp32_t" out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp32 to fp8e5m2" in_t="fp32_t" out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp32 to bf16" in_t="fp32_t" out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32 to fp16" in_t="fp32_t" out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>RESCALE</name>
<arguments>
<argument category="input" name="input" type="tensor_t" shape="shape" tensor-element-type="in_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor with the same shape as input</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="input_zp" type="tensor_t" shape="-" tensor-element-type="in_t">
<description>Input tensor zero point. int8/uint8 can have zero point within their valid range. uint16 zero point must be either 0 or 32768. All other types must have zero point equal to 0.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="output_zp" type="tensor_t" shape="-" tensor-element-type="out_t">
<description>Output tensor zero point.int8/uint8 can have zero point within their valid range. uint16 zero point must be either 0 or 32768. All other types must have zero point equal to 0.</description>
<rank min="0" max="0"/>
</argument>
<argument category="input" name="multiplier" type="tensor_t" shape="[NC]" tensor-element-type="mul_t">
<description>Scaling multiplier array</description>
<rank min="1" max="1"/>
</argument>
<argument category="input" name="shift" type="tensor_t" shape="[NC]" tensor-element-type="i8_t">
<description>Scaling shift array</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="scale32" type="tensor_t" shape="-" tensor-element-type="bool_t">
<description>if (scale32) mul_t=i32_t else mul_t=i16_t</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="double_round" type="tensor_t" shape="-" tensor-element-type="bool_t">
<description>Select double round mode</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="per_channel" type="tensor_t" shape="-" tensor-element-type="bool_t">
<description>if (per_channel) NC=shape[rank(shape)-1] else NC=1</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="input_unsigned" type="tensor_t" shape="-" tensor-element-type="bool_t">
<description>If True, treat the input values as unsigned.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="output_unsigned" type="tensor_t" shape="-" tensor-element-type="bool_t">
<description>If True, treat the output values as unsigned.</description>
<rank min="0" max="0"/>
</argument>
</arguments>
<types>
<type name='in_t'/>
<type name='out_t'/>
</types>
<typesupport mode="8-bit to 8-bit" in_t="i8_t" out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="8-bit to 16-bit" in_t="i8_t" out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="8-bit to 32-bit" in_t="i8_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="16-bit to 8-bit" in_t="i16_t" out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="16-bit to 16-bit" in_t="i16_t" out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="16-bit to 32-bit" in_t="i16_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="32-bit to 8-bit" in_t="i32_t" out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="32-bit to 16-bit" in_t="i32_t" out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="32-bit to 32-bit" in_t="i32_t" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="48-bit to 8-bit" in_t="i48_t" out_t="i8_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="48-bit to 16-bit" in_t="i48_t" out_t="i16_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="48-bit to 32-bit" in_t="i48_t" out_t="i32_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="data-node">
<operator>
<name>CONST</name>
<arguments>
<argument category="attribute" name="values" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Constant values</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='out_t' />
</types>
<typesupport mode="Boolean" out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="4-bit" out_t="i4_t" version_added="1.0">
<op_profile name="EXT-INT4"/>
</typesupport>
<typesupport mode="8-bit" out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="16-bit" out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="32-bit" out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="48-bit" out_t="i48_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3" out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>IDENTITY</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
<argument category="output" name="output" type="tensor_t" shape="shape" tensor-element-type="in_out_t">
<description>Output tensor of the same type, size as the input tensor</description>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='in_out_t' />
</types>
<typesupport mode="Boolean" in_out_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="4-bit" in_out_t="i4_t" version_added="1.0">
<op_profile name="EXT-INT4"/>
</typesupport>
<typesupport mode="8-bit" in_out_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="16-bit" in_out_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="32-bit" in_out_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="48-bit" in_out_t="i48_t" version_added="1.0">
<op_profile name="EXT-INT16"/>
</typesupport>
<typesupport mode="fp8e4m3" in_out_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_out_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_out_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_out_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_out_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="custom">
<operator>
<name>CUSTOM</name>
<arguments>
<argument category="input" name="input_list" type="tensor_list_t" shape="-" tensor-element-type="-">
<description>List of input tensors</description>
<levellimit value="tensor_list_shape(input_list)" limit="MAX_TENSOR_LIST_SIZE"/>
</argument>
<argument category="attribute" name="operator_name" type="String" shape="-" tensor-element-type="-">
<description>String which tells the backend which custom operator is being called</description>
</argument>
<argument category="attribute" name="domain_name" type="String" shape="-" tensor-element-type="-">
<description>String idenifier which can help avoid name collisions on the operator field.
Different implementations of a given operator would be in different domains.
Implementations can choose which domains they want to support.</description>
</argument>
<argument category="attribute" name="implementation_attrs" type="String" shape="-" tensor-element-type="-">
<description>String value containing implementation specific attributes which apply to the operation</description>
</argument>
<argument category="output" name="output_list" type="tensor_list_t" shape="-" tensor-element-type="-">
<description>List of output tensors</description>
<levellimit value="tensor_list_shape(output_list)" limit="MAX_TENSOR_LIST_SIZE"/>
</argument>
</arguments>
<types>
<type name='tensor_list_t' />
</types>
<typesupport mode="All" tensor_list_t="-" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="control-flow">
<operator>
<name>COND_IF</name>
<arguments>
<argument category="input" name="condition" type="tensor_t" shape="shape" tensor-element-type="bool_t">
<description>Input condition as a size 1 tensor</description>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="input" name="input_list" type="tensor_list_t" shape="-" tensor-element-type="-">
<description>List of input tensors</description>
<levellimit value="tensor_list_shape(input_list)" limit="MAX_TENSOR_LIST_SIZE"/>
</argument>
<argument category="attribute" name="then_graph" type="tosa_graph_t" shape="-" tensor-element-type="-">
<description>TOSA graph to execute if condition is true</description>
</argument>
<argument category="attribute" name="else_graph" type="tosa_graph_t" shape="-" tensor-element-type="-">
<description>TOSA graph to execute if condition is false</description>
</argument>
<argument category="output" name="output_list" type="tensor_list_t" shape="-" tensor-element-type="-">
<description>List of output tensors</description>
<levellimit value="tensor_list_shape(output_list)" limit="MAX_TENSOR_LIST_SIZE"/>
</argument>
</arguments>
<types>
<type name='cond_t' />
</types>
<typesupport mode="Boolean" cond_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>WHILE_LOOP</name>
<arguments>
<argument category="input" name="input_list" type="tensor_list_t" shape="-" tensor-element-type="-">
<description>List of input tensors</description>
<levellimit value="tensor_list_shape(input_list)" limit="MAX_TENSOR_LIST_SIZE"/>
</argument>
<argument category="attribute" name="cond_graph" type="tosa_graph_t" shape="-" tensor-element-type="-">
<description>TOSA graph to evaluate the condition</description>
</argument>
<argument category="attribute" name="body_graph" type="tosa_graph_t" shape="-" tensor-element-type="-">
<description>TOSA graph to execute the loop body</description>
</argument>
<argument category="output" name="output_list" type="tensor_list_t" shape="-" tensor-element-type="-">
<description>List of output tensors</description>
<levellimit value="tensor_list_shape(output_list)" limit="MAX_TENSOR_LIST_SIZE"/>
</argument>
</arguments>
<types>
<type name='cond_t' />
</types>
<typesupport mode="Boolean" cond_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="variable">
<operator>
<name>VARIABLE</name>
<arguments>
<argument category="attribute" name="uid" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Globally unique identifier for the declared variable tensor.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="var_shape" type="tensor_t" shape="var_shape" tensor-element-type="size_t">
<description>The variable tensor shape</description>
<rank min="1" max="1"/>
</argument>
<argument category="attribute" name="type" type="tensor_t" shape="-" tensor-element-type="var_t">
<description>Type of the tensor variable elements.</description>
<rank min="0" max="0"/>
</argument>
<argument category="attribute" name="initial_value" type="tensor_t" shape="shape" tensor-element-type="in_t" optional="true">
<description>Initial value of the variable tensor. This argument is optional with default value NULL.</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='var_t' />
</types>
<typesupport mode="signed 8" var_t="i8_t" version_added="1.0">
<op_profile name="BI" and_name="EXT-VARIABLE"/>
</typesupport>
<typesupport mode="fp16" var_t="fp16_t" version_added="1.0">
<op_profile name="MI" and_name="EXT-VARIABLE"/>
</typesupport>
<typesupport mode="fp32" var_t="fp32_t" version_added="1.0">
<op_profile name="MI" and_name="EXT-VARIABLE"/>
</typesupport>
</operator>
<operator>
<name>VARIABLE_WRITE</name>
<arguments>
<argument category="attribute" name="uid" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Globally unique identifier of the variable tensor that is writing to</description>
<rank min="0" max="0"/>
</argument>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='var_t' />
</types>
<typesupport mode="signed 8" var_t="i8_t" version_added="1.0">
<op_profile name="BI" and_name="EXT-VARIABLE"/>
</typesupport>
<typesupport mode="fp16" var_t="fp16_t" version_added="1.0">
<op_profile name="MI" and_name="EXT-VARIABLE"/>
</typesupport>
<typesupport mode="fp32" var_t="fp32_t" version_added="1.0">
<op_profile name="MI" and_name="EXT-VARIABLE"/>
</typesupport>
</operator>
<operator>
<name>VARIABLE_READ</name>
<arguments>
<argument category="attribute" name="uid" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Globally unique identifier of the variable tensor that is reading from </description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output1" type="tensor_t" shape="shape" tensor-element-type="out_t">
<description>Output tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="0" max="MAX_RANK"/>
</argument>
</arguments>
<types>
<type name='var_t' />
</types>
<typesupport mode="signed 8" var_t="i8_t" version_added="1.0">
<op_profile name="BI" and_name="EXT-VARIABLE"/>
</typesupport>
<typesupport mode="fp16" var_t="fp16_t" version_added="1.0">
<op_profile name="MI" and_name="EXT-VARIABLE"/>
</typesupport>
<typesupport mode="fp32" var_t="fp32_t" version_added="1.0">
<op_profile name="MI" and_name="EXT-VARIABLE"/>
</typesupport>
</operator>
</operatorgroup>
<operatorgroup name="shape">
<operator>
<name>ADD_SHAPE</name>
<arguments>
<argument category="input" name="input1" type="shape_t" shape="-" tensor-element-type="-">
<description>Input 1</description>
</argument>
<argument category="input" name="input2" type="shape_t" shape="-" tensor-element-type="-">
<description>Input 2</description>
</argument>
<argument category="output" name="output" type="shape_t" shape="-" tensor-element-type="-">
<description>Output shape</description>
</argument>
</arguments>
<types>
<type name='shape_t' />
</types>
<typesupport mode="shape" shape_t="size_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>CONCAT_SHAPE</name>
<arguments>
<argument category="input" name="input1" type="shape_list_t" shape="-" tensor-element-type="-">
<description>List of input shape values</description>
</argument>
<argument category="output" name="output" type="shape_t" shape="-" tensor-element-type="-">
<description>Output shape</description>
</argument>
</arguments>
<types>
<type name='shape_t' />
</types>
<typesupport mode="shape" shape_t="size_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>CONST_SHAPE</name>
<arguments>
<argument category="attribute" name="values" type="shape_t" shape="-" tensor-element-type="-">
<description>Constant shape</description>
</argument>
<argument category="output" name="output" type="shape_t" shape="-" tensor-element-type="-">
<description>Output shape</description>
</argument>
</arguments>
<types>
<type name='shape_t' />
</types>
<typesupport mode="shape" shape_t="size_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>DIM</name>
<arguments>
<argument category="input" name="input1" type="tensor_t" shape="shape" tensor-element-type="in_t">
<description>Input tensor</description>
<levellimit value="rank(shape)" limit="MAX_RANK"/>
<rank min="1" max="MAX_RANK"/>
</argument>
<argument category="attribute" name="axis" type="tensor_t" shape="-" tensor-element-type="i32_t">
<description>Axis in range from 0 to rank(shape) - 1</description>
<rank min="0" max="0"/>
</argument>
<argument category="output" name="output" type="shape_t" shape="-" tensor-element-type="-" >
<description>Output shape type of size 1 giving the size of the shape for the given axis</description>
</argument>
</arguments>
<types>
<type name='in_t'/>
</types>
<typesupport mode="boolean" in_t="bool_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
<typesupport mode="signed 8" in_t="i8_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 16" in_t="i16_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="signed 32" in_t="i32_t" version_added="1.0">
<op_profile name="BI"/>
</typesupport>
<typesupport mode="fp8e4m3" in_t="fp8e4m3_t" version_added="1.0">
<op_profile name="EXT-FP8E4M3"/>
</typesupport>
<typesupport mode="fp8e5m2" in_t="fp8e5m2_t" version_added="1.0">
<op_profile name="EXT-FP8E5M2"/>
</typesupport>
<typesupport mode="fp16" in_t="fp16_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
<typesupport mode="bf16" in_t="bf16_t" version_added="1.0">
<op_profile name="EXT-BF16"/>
</typesupport>
<typesupport mode="fp32" in_t="fp32_t" version_added="1.0">
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>DIV_SHAPE</name>
<arguments>
<argument category="input" name="input1" type="shape_t" shape="-" tensor-element-type="-">
<description>Input 1</description>
</argument>
<argument category="input" name="input2" type="shape_t" shape="-" tensor-element-type="-">
<description>Input 2</description>
</argument>
<argument category="output" name="output" type="shape_t" shape="-" tensor-element-type="-">
<description>Output shape</description>
</argument>
</arguments>
<types>
<type name='shape_t' />
</types>
<typesupport mode="shape" shape_t="size_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>MUL_SHAPE</name>
<arguments>
<argument category="input" name="input1" type="shape_t" shape="-" tensor-element-type="-">
<description>Input 1</description>
</argument>
<argument category="input" name="input2" type="shape_t" shape="-" tensor-element-type="-">
<description>Input 2</description>
</argument>
<argument category="output" name="output" type="shape_t" shape="-" tensor-element-type="-">
<description>Output shape</description>
</argument>
</arguments>
<types>
<type name='shape_t' />
</types>
<typesupport mode="shape" shape_t="size_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
<operator>
<name>SUB_SHAPE</name>
<arguments>
<argument category="input" name="input1" type="shape_t" shape="-" tensor-element-type="-">
<description>Input 1</description>
</argument>
<argument category="input" name="input2" type="shape_t" shape="-" tensor-element-type="-">
<description>Input 2</description>
</argument>
<argument category="output" name="output" type="shape_t" shape="-" tensor-element-type="-">
<description>Output shape</description>
</argument>
</arguments>
<types>
<type name='shape_t' />
</types>
<typesupport mode="shape" shape_t="size_t" version_added="1.0">
<op_profile name="BI"/>
<op_profile name="MI"/>
</typesupport>
</operator>
</operatorgroup>
</operators>
<enum name="resize_mode_t" description="Valid resize types">
<enumval value="0" name="NEAREST_NEIGHBOR" description="Nearest neighbor resize"/>
<enumval value="1" name="BILINEAR" description="Bilinear resize"/>
</enum>
<enum name="acc_type_t" description="Allowed accumulator types">
<enumval value="0" name="INT32" description="32-bit integer"/>
<enumval value="1" name="FP16" description="16-bit floating-point"/>
<enumval value="2" name="FP32" description="32-bit floating-point"/>
<enumval value="3" name="INT48" description="48-bit integer"/>
</enum>
<enum name="var_t" description="Variable tensor data type">
<enumval value="0" name="BOOLEAN" description="Boolean"/>
<enumval value="1" name="INT8" description="8-bit integer"/>
<enumval value="2" name="INT16" description="16-bit integer"/>
<enumval value="3" name="INT32" description="32-bit integer"/>
<enumval value="4" name="FP16" description="16-bit floating-point"/>
<enumval value="5" name="BF16" description="16-bit brain floating-point"/>
<enumval value="6" name="FP32" description="32-bit floating-point"/>
</enum>
</tosa>