Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 1 | # Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
| 16 | # |
| 17 | # Description: |
| 18 | # Unit tests for graph_optimiser |
| 19 | import numpy as np |
Louis Verhaard | ebf4af6 | 2021-01-27 15:57:57 +0100 | [diff] [blame^] | 20 | import pytest |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 21 | |
Louis Verhaard | ae2d553 | 2020-12-11 17:19:54 +0100 | [diff] [blame] | 22 | from ethosu.vela.data_type import DataType |
Louis Verhaard | ebf4af6 | 2021-01-27 15:57:57 +0100 | [diff] [blame^] | 23 | from ethosu.vela.graph_optimiser import calc_explicit_padding |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 24 | from ethosu.vela.graph_optimiser import convert_batched_fc_shape |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 25 | from ethosu.vela.graph_optimiser import optimise_graph_a |
Louis Verhaard | ae2d553 | 2020-12-11 17:19:54 +0100 | [diff] [blame] | 26 | from ethosu.vela.graph_optimiser import optimise_pad |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 27 | from ethosu.vela.graph_optimiser import rewrite_fully_connected_input |
Louis Verhaard | ae2d553 | 2020-12-11 17:19:54 +0100 | [diff] [blame] | 28 | from ethosu.vela.nn_graph import Graph |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 29 | from ethosu.vela.operation import Op |
Louis Verhaard | ae2d553 | 2020-12-11 17:19:54 +0100 | [diff] [blame] | 30 | from ethosu.vela.operation import Padding |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 31 | from ethosu.vela.rewrite_graph import verify_graph_health |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 32 | from ethosu.vela.tensor import create_const_tensor |
patrik.gustavsson | eeb8515 | 2020-12-21 17:10:40 +0000 | [diff] [blame] | 33 | from ethosu.vela.tensor import Shape4D |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 34 | from ethosu.vela.tensor import Tensor |
| 35 | from ethosu.vela.test import testutil |
| 36 | |
| 37 | |
| 38 | def test_convert_batched_fc(): |
| 39 | """Tests shape conversion of batched fully connected""" |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 40 | ifm_shape = [4, 8] |
| 41 | ifm = create_const_tensor("test_in", ifm_shape, np.uint8, np.zeros(ifm_shape)) |
| 42 | w_shape = [8, 4] |
| 43 | weights = create_const_tensor("weight_in", w_shape, np.uint8, np.zeros(w_shape)) |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 44 | ofm = Tensor(ifm.shape, np.uint8, "test_out") |
| 45 | op = testutil.create_op(Op.FullyConnected, [ifm, weights], ofm) |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame] | 46 | |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 47 | ifm.consumer_list.append(op) |
| 48 | |
| 49 | prev_op = op.clone() |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 50 | prev_op.ifm_shapes = op.ifm_shapes.copy() |
| 51 | prev_op.ofm_shapes = op.ofm_shapes.copy() |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame] | 52 | |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 53 | rewrite_fully_connected_input(op, None, None) |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 54 | conv_op = convert_batched_fc_shape(op, None, None) |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 55 | assert conv_op.ifm == prev_op.ifm |
| 56 | assert conv_op.ofm == prev_op.ofm |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 57 | assert op.ifm_shapes[0] == Shape4D([1, 2, 2, 8]) |
| 58 | assert op.ofm_shapes[0] == Shape4D([1, 2, 2, 8]) |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 59 | assert conv_op.type == Op.FullyConnected |
| 60 | assert len(conv_op.ifm.shape) == 2 |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 61 | assert len(conv_op.ofm.shape) == 2 |
| 62 | assert conv_op.ifm.shape == conv_op.ofm.shape |
| 63 | |
| 64 | ifm.shape = [1, 8] |
| 65 | weights.shape = [8, 1] |
| 66 | ofm.shape = [1, 8] |
| 67 | op = testutil.create_op(Op.FullyConnected, [ifm, weights], ofm) |
| 68 | ifm.consumer_list.append(op) |
| 69 | |
| 70 | prev_op = op.clone() |
| 71 | prev_op.ifm_shapes = op.ifm_shapes.copy() |
| 72 | prev_op.ofm_shapes = op.ofm_shapes.copy() |
| 73 | |
Patrik Gustavsson | 2c2522d | 2021-01-29 11:51:31 +0100 | [diff] [blame] | 74 | rewrite_fully_connected_input(op, None, None) |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 75 | conv_op = convert_batched_fc_shape(op, None, None) |
| 76 | |
| 77 | assert conv_op.ifm == prev_op.ifm |
| 78 | assert conv_op.ofm == prev_op.ofm |
| 79 | assert op.ifm_shapes[0] == prev_op.ifm_shapes[0] |
| 80 | assert op.ofm_shapes[0] == prev_op.ofm_shapes[0] |
| 81 | assert conv_op.type == Op.FullyConnected |
| 82 | assert len(conv_op.ifm.shape) == 2 |
| 83 | assert len(conv_op.ofm.shape) == 2 |
Diqing Zhong | 94457b1 | 2020-12-09 15:22:40 +0100 | [diff] [blame] | 84 | assert conv_op.ifm.shape == conv_op.ofm.shape |
Louis Verhaard | ae2d553 | 2020-12-11 17:19:54 +0100 | [diff] [blame] | 85 | |
| 86 | |
Louis Verhaard | ebf4af6 | 2021-01-27 15:57:57 +0100 | [diff] [blame^] | 87 | explicit_padding_test_data = [ |
| 88 | # Kernel size 2 |
| 89 | [(17, 1, 2, 1, 1), (1, 1)], |
| 90 | [(18, 1, 2, 0, 1), (0, 1)], |
| 91 | [(18, 1, 2, 1, 0), (1, 0)], |
| 92 | # Kernel size 3 |
| 93 | [(18, 2, 3, 1, 1), (1, 0)], |
| 94 | [(25, 2, 3, 1, 1), (1, 1)], |
| 95 | # Kernel size 4 |
| 96 | [(18, 1, 4, 1, 2), (1, 2)], |
| 97 | [(18, 1, 4, 2, 1), (2, 1)], |
| 98 | [(19, 1, 4, 2, 2), (2, 2)], |
| 99 | # Kernel size 5 |
| 100 | [(19, 1, 5, 1, 2), (1, 2)], |
| 101 | [(19, 1, 5, 0, 2), (0, 2)], |
| 102 | [(19, 1, 5, 1, 0), (1, 0)], |
| 103 | # Kernel size 21 |
| 104 | [(41, 2, 21, 8, 10), (8, 10)], |
| 105 | [(41, 3, 21, 10, 10), (10, 9)], |
| 106 | [(42, 3, 21, 10, 10), (10, 8)], |
| 107 | [(42, 3, 21, 9, 10), (9, 9)], |
| 108 | [(41, 3, 21, 10, 6), (10, 6)], |
| 109 | ] |
| 110 | |
| 111 | |
| 112 | @pytest.mark.parametrize("test_input, expected_result", explicit_padding_test_data) |
| 113 | def test_calc_explicit_padding(test_input, expected_result): |
| 114 | input_size, stride, filter_size, explicit_pad_before, explicit_pad_after = test_input |
| 115 | before, after = calc_explicit_padding(input_size, stride, filter_size, explicit_pad_before, explicit_pad_after) |
| 116 | assert (before, after) == expected_result |
| 117 | |
| 118 | |
Louis Verhaard | ae2d553 | 2020-12-11 17:19:54 +0100 | [diff] [blame] | 119 | def test_optimise_pad(): |
| 120 | """ |
| 121 | Tests that the PAD operator is bypassed when followed by a convolution operator, |
| 122 | and that the padding of the convolution operation is correctly updated |
| 123 | """ |
| 124 | # Create Pad operation followed by Conv2D |
| 125 | quant = testutil.default_quant_params() |
| 126 | in_tens = Tensor([1, 76, 75, 64], DataType.uint8, "input") |
| 127 | in_tens.quantization = quant |
| 128 | pad_input = create_const_tensor("pad_input", [4, 2], DataType.int32, [[0, 0], [2, 1], [1, 1], [0, 0]]) |
| 129 | temp_tens = Tensor([1, 79, 77, 64], DataType.uint8, "pad_out") |
| 130 | temp_tens.quantization = quant.clone() |
| 131 | out_tens = Tensor([1, 76, 75, 64], DataType.uint8, "output") |
| 132 | out_tens.quantization = quant.clone() |
| 133 | weight_tens = Tensor([5, 3, 64, 64], DataType.uint8, "weights") |
| 134 | weight_tens.values = np.zeros(weight_tens.shape) |
| 135 | weight_tens.quant_values = np.zeros(weight_tens.shape, np.uint8) |
| 136 | weight_tens.quantization = quant.clone() |
| 137 | |
| 138 | bias_tens = Tensor([64], DataType.int32, "biases") |
| 139 | pad_op = testutil.create_op(Op.Pad, [in_tens, pad_input], temp_tens) |
| 140 | attrs = {"padding": Padding.VALID, "stride_w": 2, "stride_h": 2, "dilation_w_factor": 1, "dilation_h_factor": 1} |
| 141 | attrs["strides"] = (1, attrs["stride_h"], attrs["stride_w"], 1) |
| 142 | pad_op.run_on_npu = True |
| 143 | conv2d_op = testutil.create_op(Op.Conv2D, [temp_tens, weight_tens, bias_tens], out_tens, attrs) |
| 144 | conv2d_op.run_on_npu = True |
| 145 | nng = Graph() |
| 146 | sg = testutil.create_subgraph([pad_op, conv2d_op]) |
| 147 | nng.subgraphs.append(sg) |
| 148 | arch = testutil.create_arch() |
| 149 | |
| 150 | optimise_pad(conv2d_op, nng, arch) |
| 151 | |
| 152 | op = sg.output_tensors[0].ops[0] |
| 153 | assert op.type == Op.Conv2D |
| 154 | assert op.attrs["padding"] == Padding.EXPLICIT |
| 155 | assert op.attrs["explicit_padding"] == (2, 1, 1, 1) |
| 156 | assert op.ifm.shape == [1, 76, 75, 64] |
| 157 | assert pad_op not in op.ifm.ops |
Patrik Gustavsson | 3a26920 | 2021-01-21 08:28:55 +0100 | [diff] [blame] | 158 | |
| 159 | |
| 160 | def test_remove_reshape(): |
| 161 | """ |
| 162 | Tests that the expected reshape are removed in graph_optimisation |
| 163 | """ |
| 164 | |
| 165 | def setup_network(): |
| 166 | quant = testutil.default_quant_params() |
| 167 | # create reshape1 op |
| 168 | ifm_shape = [64, 16] |
| 169 | reshape1_ofm_shape = [1, 4, 16, 16] |
| 170 | reshape1_ifm = create_const_tensor("reshape1_in", ifm_shape, DataType.uint8, np.zeros(ifm_shape)) |
| 171 | reshape1_ifm.quantization = quant |
| 172 | reshape1_ofm = create_const_tensor( |
| 173 | "reshape1_out", reshape1_ofm_shape, DataType.uint8, np.zeros(reshape1_ofm_shape) |
| 174 | ) |
| 175 | reshape1_ofm.quantization = quant |
| 176 | shape_tens = create_const_tensor("reshape1_shape", [1], DataType.int32, reshape1_ofm_shape) |
| 177 | reshape1_op = testutil.create_op(Op.Reshape, [reshape1_ifm, shape_tens], reshape1_ofm, set_ifm_ofm_shapes=False) |
| 178 | reshape1_op.attrs["new_shape"] = reshape1_ofm_shape |
| 179 | reshape1_op.run_on_npu = True |
| 180 | |
| 181 | # create reshape2 op |
| 182 | reshape2_ofm_shape = [1, 8, 8, 16] |
| 183 | reshape2_ofm = create_const_tensor( |
| 184 | "reshape2_out", reshape2_ofm_shape, DataType.uint8, np.zeros(reshape2_ofm_shape) |
| 185 | ) |
| 186 | reshape2_ofm.quantization = quant |
| 187 | shape_tens = create_const_tensor("reshape2_shape", [1], DataType.int32, reshape2_ofm_shape) |
| 188 | reshape2_op = testutil.create_op(Op.Reshape, [reshape1_ofm, shape_tens], reshape2_ofm, set_ifm_ofm_shapes=False) |
| 189 | reshape2_op.attrs["new_shape"] = reshape2_ofm_shape |
| 190 | reshape2_op.run_on_npu = True |
| 191 | |
| 192 | # create conv op |
| 193 | conv_ofm = Tensor([1, 8, 8, 16], DataType.uint8, "output") |
| 194 | conv_ofm.quantization = quant.clone() |
| 195 | weight_tens = Tensor([1, 1, 16, 16], DataType.uint8, "weights") |
| 196 | weight_tens.values = np.zeros(weight_tens.shape) |
| 197 | weight_tens.quant_values = np.zeros(weight_tens.shape, np.uint8) |
| 198 | weight_tens.quantization = quant.clone() |
| 199 | bias_tens = Tensor([16], DataType.int32, "biases") |
| 200 | |
| 201 | attrs = {"padding": Padding.SAME, "stride_w": 1, "stride_h": 1, "dilation_w_factor": 1, "dilation_h_factor": 1} |
| 202 | attrs["strides"] = (1, attrs["stride_h"], attrs["stride_w"], 1) |
| 203 | |
| 204 | conv2d_op = testutil.create_op( |
| 205 | Op.Conv2D, [reshape1_ofm, weight_tens, bias_tens], conv_ofm, attrs=attrs, set_ifm_ofm_shapes=False |
| 206 | ) |
| 207 | conv2d_op.run_on_npu = True |
| 208 | |
| 209 | # create reshape3 op |
| 210 | ofm_shape = [8, 8, 16] |
| 211 | reshape3_ofm = create_const_tensor("reshape3_out", ofm_shape, DataType.uint8, np.zeros(ofm_shape)) |
| 212 | reshape3_ofm.quantization = quant |
| 213 | shape_tens = create_const_tensor("reshape3_shape", [1], DataType.int32, ofm_shape) |
| 214 | reshape3_op = testutil.create_op(Op.Reshape, [conv_ofm, shape_tens], reshape3_ofm, set_ifm_ofm_shapes=False) |
| 215 | reshape3_op.attrs["new_shape"] = ofm_shape |
| 216 | reshape3_op.run_on_npu = True |
| 217 | nng = Graph() |
| 218 | sg = testutil.create_subgraph([reshape1_op, reshape2_op, conv2d_op, reshape3_op]) |
| 219 | nng.subgraphs.append(sg) |
| 220 | |
| 221 | return nng, reshape1_op, reshape2_op, conv2d_op, reshape3_op |
| 222 | |
| 223 | # Test1 no Reshape op is expected to remain in the NPU subgrapgh |
| 224 | # but first one will be put on CPU |
| 225 | # Network is Reshape-Reshape-Conv-Reshape |
| 226 | # Result is cpu_Reshape-Conv |
| 227 | nng, reshape1_op, reshape2_op, conv2d_op, reshape3_op = setup_network() |
| 228 | arch = testutil.create_arch() |
| 229 | assert verify_graph_health(nng) |
| 230 | nng = optimise_graph_a(nng, arch) |
| 231 | assert verify_graph_health(nng) |
| 232 | assert conv2d_op.ifm == reshape1_op.ofm |
| 233 | assert conv2d_op.ofm == reshape3_op.ofm |
| 234 | |
| 235 | # Test2 reshape2 with different quantisation, this Reshape op is expected to remain |
| 236 | # Network is Reshape-Reshape-Conv-Reshape |
| 237 | # expected is cpu_Reshape-Reshape-Conv |
| 238 | nng, reshape1_op, reshape2_op, conv2d_op, reshape3_op = setup_network() |
| 239 | quant_zp32 = testutil.default_quant_params() |
| 240 | quant_zp32.zero_point = 32 |
| 241 | reshape2_op.ofm.quantization = quant_zp32 |
| 242 | assert verify_graph_health(nng) |
| 243 | nng = optimise_graph_a(nng, arch) |
| 244 | assert verify_graph_health(nng) |
| 245 | assert conv2d_op.ofm == reshape3_op.ofm |