blob: 8c88f479d245256a6efd26f5dbc73523c6a01e89 [file] [log] [blame]
//
// This confidential and proprietary software may be used only as
// authorised by a licensing agreement from ARM Limited
// (C) COPYRIGHT 2020-2022 ARM Limited
// ALL RIGHTS RESERVED
// The entire notice above must be reproduced on all authorised
// copies and copies may only be made to the extent permitted
// by a licensing agreement from ARM Limited.
=== Elementwise Unary Operators
==== ABS
Elementwise absolute value operation
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Floating-point behavior:*
|===
|Input|-infinity|+infinity|-0|+0|NaN
|Output|+infinity|+infinity|+0|+0|NaN
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape, index);
if (in_out_t == float_t && value1 == -0.0) {
value1 = 0.0;
}
if (value1 < 0.0)
value1 = apply_sub<in_out_t>(0, value1);
tensor_write<in_out_t>(output, shape, index, value1);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|Any|signed 32|int32_t
|MI, MT|floating-point|float_t
|===
==== BITWISE_NOT
Elementwise bitwise NOT of input tensor.
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape, index);
in_out_t result = ~value1;
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|Any|signed 8|int8_t
|Any|signed 16|int16_t
|Any|signed 32|int32_t
|===
==== CEIL
Elementwise ceiling operation
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Floating-point behavior:*
|===
|Input|-infinity|+infinity|-0|+0|NaN
|Output|-infinity|+infinity|-0|+0|NaN
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape, index);
in_out_t result = apply_ceil<in_out_t>(value1);
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|MI, MT|floating-point|float_t
|===
==== CLZ
Elementwise count leading zeros operation
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape, index);
in_out_t result = count_leading_zeros(value1);
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|Any|signed 32|int32_t
|===
==== EXP
Elementwise e to the x operation
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Floating-point behavior:*
|===
|Input|-infinity|+infinity|-0|+0|NaN
|Output|+0|+infinity|1|1|NaN
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape, index);
in_out_t result = apply_exp<in_out_t>(value1);
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|MI, MT|fp16|fp16_t
|MI, MT|bf16|bf16_t
|MI, MT|fp32|fp32_t
|===
==== FLOOR
Elementwise floor operation
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Floating-point behavior:*
|===
|Input|-infinity|+infinity|-0|+0|NaN
|Output|-infinity|+infinity|-0|+0|NaN
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape, index);
in_out_t result = apply_floor<in_out_t>(value1);
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|MI, MT|fp16|fp16_t
|MI, MT|bf16|bf16_t
|MI, MT|fp32|fp32_t
|===
==== LOG
Elementwise natural logarithm operation
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Floating-point behavior:*
|===
|Input|-infinity|+infinity|-0|+0|NaN
|Output|NaN|+infinity|-infinity|-infinity|NaN
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape, index);
in_out_t result = apply_log<in_out_t>(value1);
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|MI, MT|fp16|fp16_t
|MI, MT|bf16|bf16_t
|MI, MT|fp32|fp32_t
|===
==== LOGICAL_NOT
Elementwise logical NOT of input.
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape1, index);
in_out_t result = !value1;
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|Any|bool|bool_t
|===
==== NEGATE
Elementwise negation operation
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Attribute|in_out_t|input1_zp|-|Input 1 zero point. Must be zero for non-int8 types.
|Attribute|in_out_t|output_zp|-|Output zero point. Must be zero for non-int8 types.
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Floating-point behavior:*
|===
|Input|-infinity|+infinity|-0|+0|NaN
|Output|+infinity|-infinity|+0|-0|NaN
|===
*Operation Function:*
[source,c++]
----
ERROR_IF(in_out_t != int8_t && input1_zp != 0) // Zero point only for int8_t
ERROR_IF(in_out_t != int8_t && output_zp != 0) // Zero point only for int8_t
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape, index);
acc_t value = (acc_t)value1 - input1_zp;
value = apply_sub<acc_t>(0, value);
in_out_t result = (in_out_t)apply_clip<acc_t>(value + output_zp, minimum<in_out_t>, maximum<in_out_t>);
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t|acc_t
|Any|signed 8|int8_t|int32_t
|Any|signed 16|int16_t|int32_t
|Any|signed 32|int32_t|int32_t
|MI, MT|fp16|fp16_t|fp16_t
|MI, MT|bf16|bf16_t|bf16_t
|MI, MT|fp32|fp32_t|fp32_t
|===
==== RECIPROCAL
Elementwise reciprocal operation. For integer operation, a TABLE should be used with the appropriate ranges.
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Floating-point behavior:*
|===
|Input|-infinity|+infinity|-0|+0|NaN
|Output|-0|+0|-infinity|+infinity|NaN
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape1, index);
in_out_t result = 1.0 / value1;
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|MI, MT|fp16|fp16_t
|MI, MT|bf16|bf16_t
|MI, MT|fp32|fp32_t
|===
==== RSQRT
Elementwise reciprocal square root operation. For integer operation, a TABLE should be used with the appropriate ranges.
*Arguments:*
|===
|Argument|Type|Name|Shape|Description
|Input|in_out_t*|input1|shape|Input tensor
|Output|in_out_t*|output|shape|Output tensor of same type, size as the input tensor
|===
*Floating-point behavior:*
|===
|Input|-infinity|+infinity|-0|+0|NaN
|Output|NaN|+0|-infinity|+infinity|NaN
|===
*Operation Function:*
[source,c++]
----
for_each(index in shape) {
in_out_t value1 = tensor_read<in_out_t>(input1, shape1, index);
in_out_t result;
if (value1 < 0) {
result = NaN;
}
else {
result = 1.0 / apply_sqrt<in_out_t>(value1);
}
tensor_write<in_out_t>(output, shape, index, result);
}
----
*Supported Data Types:*
|===
|Profile|Mode|in_out_t
|MI, MT|fp16|fp16_t
|MI, MT|bf16|bf16_t
|MI, MT|fp32|fp32_t
|===