Louis Verhaard | fa2f92a | 2020-09-21 11:56:18 +0200 | [diff] [blame] | 1 | # Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
| 16 | # |
| 17 | # Description: |
| 18 | # Unit tests for support_operators |
Michael McGeagh | 37ded34 | 2020-10-01 15:37:44 +0100 | [diff] [blame^] | 19 | import numpy as np |
| 20 | |
Louis Verhaard | fa2f92a | 2020-09-21 11:56:18 +0200 | [diff] [blame] | 21 | from ethosu.vela.data_type import DataType |
| 22 | from ethosu.vela.supported_operators import SupportedOperators |
| 23 | from ethosu.vela.tensor import create_const_tensor |
Michael McGeagh | 37ded34 | 2020-10-01 15:37:44 +0100 | [diff] [blame^] | 24 | from ethosu.vela.tensor import QuantizationParameters |
Louis Verhaard | fa2f92a | 2020-09-21 11:56:18 +0200 | [diff] [blame] | 25 | from ethosu.vela.tensor import Tensor |
| 26 | from ethosu.vela.test import testutil |
| 27 | |
| 28 | support = SupportedOperators() |
| 29 | |
| 30 | |
| 31 | def create_strided_slice_op(in_shape, out_shape, start_offsets, end_offsets): |
| 32 | in0 = Tensor(in_shape, DataType.uint8, "in") |
| 33 | in1 = create_const_tensor("begin", [len(start_offsets)], DataType.uint8, start_offsets) |
| 34 | in2 = create_const_tensor("end", [len(end_offsets)], DataType.uint8, end_offsets) |
| 35 | in3 = create_const_tensor("strides", [len(end_offsets)], DataType.uint8, len(end_offsets) * [1]) |
| 36 | out = Tensor(out_shape, DataType.uint8, "out") |
| 37 | attrs = {"ellipsis_mask": 0, "new_axis_mask": 0, "shrink_axis_mask": 0, "begin_mask": 0, "end_mask": 0} |
| 38 | return testutil.create_op("StridedSlice", [in0, in1, in2, in3], out, attrs=attrs) |
| 39 | |
| 40 | |
| 41 | def create_strided_slice(): |
| 42 | # Creates a valid strided slice operator with some valid inputs/outputs |
| 43 | op = create_strided_slice_op([1, 10, 10, 10], [1, 5, 5, 10], [127, 2, 2, 0], [0, 7, -3, 0]) |
| 44 | op.attrs["begin_mask"] = 1 |
| 45 | op.attrs["end_mask"] = 9 |
| 46 | assert support.is_operator_supported(op) |
| 47 | return op |
| 48 | |
| 49 | |
| 50 | def test_strided_slice(): |
| 51 | # Tests support for StridedSlice operator |
| 52 | op = create_strided_slice() |
| 53 | # Setting one of new_axis_mask/shrink_axis_mask to non-zero is ok |
| 54 | op.attrs["new_axis_mask"] = 2 |
| 55 | assert support.is_operator_supported(op) |
| 56 | op = create_strided_slice() |
| 57 | op.attrs["shrink_axis_mask"] = 3 |
| 58 | assert support.is_operator_supported(op) |
| 59 | # But setting both to non-zero is not supported |
| 60 | op.attrs["new_axis_mask"] = 2 |
| 61 | assert not support.is_operator_supported(op) |
| 62 | # begin values must not be None |
| 63 | op.inputs[1].values = None |
| 64 | assert not support.is_operator_supported(op) |
| 65 | # Unsupported strides |
| 66 | op = create_strided_slice() |
| 67 | op.inputs[3].values = [1, 1, 2, 1] |
| 68 | assert not support.is_operator_supported(op) |
| 69 | # Wrong number of input tensors |
| 70 | op = create_strided_slice() |
| 71 | op.add_input_tensor(op.inputs[0].clone()) |
| 72 | assert not support.is_operator_supported(op) |
| 73 | # Unsupported ellipsis mask |
| 74 | op = create_strided_slice() |
| 75 | op.attrs["ellipsis_mask"] = 1 |
| 76 | assert not support.is_operator_supported(op) |
| 77 | # Examples where end offset <= begin offset |
| 78 | op = create_strided_slice() |
| 79 | op.inputs[1].values = [0, 7, 2, 0] |
| 80 | assert not support.is_operator_supported(op) |
| 81 | op = create_strided_slice() |
| 82 | op.inputs[2].values = [0, 7, 2, 0] |
| 83 | assert not support.is_operator_supported(op) |
| 84 | op = create_strided_slice() |
| 85 | op.attrs["begin_mask"] = 0 |
| 86 | assert not support.is_operator_supported(op) |
| 87 | op = create_strided_slice() |
| 88 | op.attrs["end_mask"] = 0 |
| 89 | assert not support.is_operator_supported(op) |
Michael McGeagh | 37ded34 | 2020-10-01 15:37:44 +0100 | [diff] [blame^] | 90 | |
| 91 | |
| 92 | def test_constraint_tens_defined_shape(): |
| 93 | # Tensors cannot have None in them |
| 94 | inp = Tensor([1, 8, None, 8], DataType.uint8, "in") |
| 95 | out = Tensor([1, 8, 8, 8], DataType.uint8, "out") |
| 96 | op = testutil.create_op("Relu", [inp], out) |
| 97 | assert not support.is_operator_supported(op) |
| 98 | |
| 99 | |
| 100 | def test_constraint_tens_shapeless(): |
| 101 | # Shapeless input is allowed if its of a certain type: |
| 102 | op = testutil.create_elemwise_op("Mul", "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8]) |
| 103 | assert support.is_operator_supported(op) |
| 104 | # Shapeless output is not allowed at all: |
| 105 | op = testutil.create_elemwise_op("Mul", "scalar_mul", [1, 8, 8, 8], [1, 8, 8, 8], []) |
| 106 | assert not support.is_operator_supported(op) |
| 107 | # Invalid shapeless input due to op type: |
| 108 | inp = Tensor([], DataType.uint8, "in") |
| 109 | out = Tensor([1, 8, 8, 8], DataType.uint8, "out") |
| 110 | op = testutil.create_op("Relu", [inp], out) |
| 111 | assert not support.is_operator_supported(op) |
| 112 | |
| 113 | |
| 114 | def test_constraint_tens_shape_size(): |
| 115 | # Tensors cannot be > 4D |
| 116 | inp = Tensor([1, 1, 8, 8, 8], DataType.uint8, "in") |
| 117 | out = Tensor([1, 1, 8, 8, 8], DataType.uint8, "out") |
| 118 | op = testutil.create_op("Relu", [inp], out) |
| 119 | assert not support.is_operator_supported(op) |
| 120 | |
| 121 | |
| 122 | def test_constraint_tens_dtype(): |
| 123 | # Tensors can only be of type uint8, int8, int16 (and int32) |
| 124 | inp = Tensor([1, 8, 8, 8], DataType.float32, "in") |
| 125 | out = Tensor([1, 8, 8, 8], DataType.float32, "out") |
| 126 | op = testutil.create_op("Relu", [inp], out) |
| 127 | assert not support.is_operator_supported(op) |
| 128 | # For int32, only select op types are allowed: |
| 129 | op = testutil.create_elemwise_op("Mul", "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8], DataType.int32) |
| 130 | assert support.is_operator_supported(op) |
| 131 | inp = Tensor([1, 8, 8, 8], DataType.int32, "in") |
| 132 | out = Tensor([1, 8, 8, 8], DataType.int32, "out") |
| 133 | op = testutil.create_op("Relu", [inp], out) |
| 134 | assert not support.is_operator_supported(op) |
| 135 | |
| 136 | |
| 137 | def test_constraint_tens_dimension(): |
| 138 | # Tensors can only have values in the inclusive range of 1-65535 |
| 139 | inp = Tensor([1, 8, 8, 0], DataType.uint8, "in") |
| 140 | out = Tensor([1, 8, 8, 0], DataType.uint8, "out") |
| 141 | op = testutil.create_op("Relu", [inp], out) |
| 142 | assert not support.is_operator_supported(op) |
| 143 | inp = Tensor([1, 8, 8, 65536], DataType.uint8, "in") |
| 144 | out = Tensor([1, 8, 8, 65536], DataType.uint8, "out") |
| 145 | op = testutil.create_op("Relu", [inp], out) |
| 146 | assert not support.is_operator_supported(op) |
| 147 | |
| 148 | |
| 149 | def test_constraint_faf(): |
| 150 | # Fused activation functions, if set, must be a valid op type |
| 151 | inp = Tensor([1, 8, 8, 8], DataType.uint8, "in") |
| 152 | out = Tensor([1, 8, 8, 8], DataType.uint8, "out") |
| 153 | op = testutil.create_op("Relu", [inp], out, attrs={"fused_activation_function": "Conv2D"}) |
| 154 | assert not support.is_operator_supported(op) |
| 155 | |
| 156 | |
| 157 | def test_constraint_tens_quant_scale(): |
| 158 | # Quantization scale cannot be infinit |
| 159 | op = testutil.create_elemwise_op("Mul", "scalar_mul", [1, 8, 8, 8], [], [1, 8, 8, 8]) |
| 160 | op.inputs[0].quantization = QuantizationParameters() |
| 161 | op.inputs[0].quantization.scale_f32 = np.inf |
| 162 | assert not support.is_operator_supported(op) |