blob: 5e7e11271fc19c62546aeaa012b272955e75e311 [file] [log] [blame]
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +02001# Copyright (C) 2021 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
16# Description:
17# Common functions and definitions used during the graph optimization.
Patrik Gustavssonc74682c2021-08-17 14:26:38 +020018from typing import Tuple
19
Patrik Gustavssondf995102021-08-23 15:33:59 +020020import numpy as np
21
Patrik Gustavssonf436ada2021-09-14 14:56:48 +020022from . import lut
Tim Halld6efcd32022-09-02 15:01:01 +010023from .architecture_features import Accelerator
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020024from .data_type import DataType
25from .debug_database import DebugDatabase
Patrik Gustavssondf995102021-08-23 15:33:59 +020026from .errors import UnsupportedFeatureError
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020027from .errors import VelaError
28from .operation import Op
Patrik Gustavssondf995102021-08-23 15:33:59 +020029from .operation_util import create_avgpool_nop
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020030from .shape4d import Shape4D
Patrik Gustavssonf436ada2021-09-14 14:56:48 +020031from .tensor import create_const_tensor
32from .tensor import QuantizationParameters
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020033
Jonas Ohlsson81942e92021-08-20 09:33:28 +020034memory_only_ops = (
35 Op.Reshape,
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +020036 Op.QuantizedReshape,
Jonas Ohlsson81942e92021-08-20 09:33:28 +020037 Op.Squeeze,
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +020038 Op.ExpandDims,
Patrik Gustavssonef3ebdd2021-10-01 11:10:25 +020039 Op.Identity,
Jonas Ohlsson81942e92021-08-20 09:33:28 +020040)
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020041
42
43def _avoid_nhcwb16_for_concat(tens):
44 # If axis corresponds to C-dimension, NHCWB16 can only be used in the output if all the concat_start's are a
45 # multiple of 16. This as, it is only then the address offset for the ofm, for all operations, will be 16 byte
46 # aligned. For other values of axis the address offsets will be 16 byte aligned, as they are all based on c = 0
47 # and those addresses are always 16 byte aligned due to the NHCWB16 format.
48 return any(op.write_offset.depth % 16 != 0 for op in tens.ops if op.write_offset is not None)
49
50
51def _avoid_nhcwb16_for_split(tens):
52 # If read offset is not a multiple of 16 in the C-dimension, NHCWB16 need to be avoided in the input
James Ward6bf16132021-09-08 11:14:20 +010053
54 # Return True if NHCWB16 needs to be avoided
55 def offset_not_aligned(read_offset):
56 return read_offset is not None and (read_offset.depth % 16) != 0
57
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020058 for cons_op in tens.consumer_list:
59 if cons_op.ifm == tens:
James Ward6bf16132021-09-08 11:14:20 +010060 if offset_not_aligned(cons_op.read_offsets[0]):
61 return True
62 if cons_op.ifm2 is not None and cons_op.ifm2 == tens:
63 if offset_not_aligned(cons_op.read_offsets[1]):
64 return True
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +020065 return False
66
67
68def _avoid_nhcwb16_for_shapes(tens):
69 # check all producers/consumers to see if any op shape is preventing NHCWB16
70 for cons_op in tens.consumer_list:
71 if cons_op.ifm == tens:
72 cons_op_shape = cons_op.ifm_shapes[0]
73 elif cons_op.type.is_binary_elementwise_op() and cons_op.ifm2 == tens:
74 cons_op_shape = cons_op.ifm_shapes[1]
75 else:
76 assert False
77 if Shape4D(tens.shape) != cons_op_shape:
78 return True
79
80 for prod_op in tens.ops:
81 if Shape4D(tens.shape) != prod_op.ofm_shapes[0]:
82 return True
83
84 return False
85
86
87# Check if non linear format can be used
88def check_format_restrictions(tens, arch):
89 if len(tens.ops) < 1:
90 return
91 if tens.ops[0].type in (Op.Placeholder, Op.SubgraphInput, Op.Const) or any(
92 cons is None for cons in tens.consumer_list
93 ):
94 return
95
96 # Check if any of the producers/consumers is run on CPU
97 if not all(cons.run_on_npu for cons in tens.consumer_list):
98 return
99 if not all(prod.run_on_npu for prod in tens.ops):
100 return
101
102 # "Concat" ofm exception:
103 if _avoid_nhcwb16_for_concat(tens):
104 return
105
106 # "Split" ifm exception:
107 if _avoid_nhcwb16_for_split(tens):
108 return
109
110 # Shapes checking: check all producers/consumers are NHCWB16 compatible with tens.shape
111 if _avoid_nhcwb16_for_shapes(tens):
112 return
113
114 for op in tens.consumer_list:
Tim Halld6efcd32022-09-02 15:01:01 +0100115 if op.type == Op.ReduceSum and (
116 tens.dtype == DataType.int32 or arch.accelerator_config == Accelerator.Ethos_U65_512
117 ):
118 # ReduceSum requires NHWC input
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +0200119 return
120 if op.type == Op.Reshape:
121 # Using NHCWB16 format for a no-op reshape is only an option if subsequent
122 # consumers do not also need to perform a reshape or if the OFM is going to
123 # be processed by CPU operations. No-op reshape consumers with empty lists
124 # (those that have no consumers, or null-consumers used as list terminators)
125 # must use normal NHWC output.
126
127 def incompatible_consumers(oper):
128 if oper and oper.type == Op.Reshape:
129 for consumer in oper.outputs[0].consumer_list:
130 yield from incompatible_consumers(consumer)
131 yield not oper or not oper.run_on_npu
132
133 if not any(incompatible_consumers(op)):
134
135 def get_rewrites(oper):
136 if oper and oper.type == Op.Reshape:
137 for consumer in oper.outputs[0].consumer_list:
138 yield from get_rewrites(consumer)
139 yield oper
140
141 # Detect no-op reshapes by comparing their full input and output tensor shapes.
142 inshape = op.ifm_shapes[0]
143 compatible_shape = [(inshape == oper.ofm_shapes[0]) for oper in get_rewrites(op)]
144 if not (compatible_shape and all(compatible_shape)):
145 return
146 else:
147 return
148
149 tens.needs_linear_format = False
150
151
Patrik Gustavssonc74682c2021-08-17 14:26:38 +0200152def calc_explicit_padding(input_size, stride, filter_size, pad_before, pad_after) -> Tuple[int, int]:
153 """
154 Based on explicit padding provided in a PAD operation, returns the corresponding hardware padding
155 that provides equivalent results.
156 """
157 total_padding = needed_total_padding(input_size, stride, filter_size)
158
159 # The bottom/right padding might need downward adjustment depending on stride/input size
160 total_minus_before = total_padding - pad_before
161 output_pad_after = pad_after
162 while output_pad_after > 0 and output_pad_after % stride != total_minus_before % stride:
163 output_pad_after -= 1
164 return pad_before, output_pad_after
165
166
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +0200167def needed_total_padding(input_size, stride, filter_size):
168 out_size = (input_size + stride - 1) // stride
169 needed_input = (out_size - 1) * stride + filter_size
170 total_padding = max(0, needed_input - input_size)
171 return total_padding
172
173
174# Set input/output tensor equivalence to the same id for memory operations
175def set_tensor_equivalence(op, arch, nng):
176 if op.type in memory_only_ops:
177 eid = op.outputs[0].equivalence_id
178 for inp in op.inputs:
179 inp.equivalence_id = eid
180 return op
181
182
183def set_ifm_ofm_op_shapes(op, arch, nng):
184 if op.run_on_npu and op.type.needs_shapes():
185 if op.ifm_shapes or op.ofm_shapes:
186 # Shapes already set
187 return op
188 op.set_ifm_ofm_shapes()
189 return op
190
191
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200192def bypass_memory_only_ops(op):
193 assert op.type in memory_only_ops
Patrik Gustavssondf995102021-08-23 15:33:59 +0200194 ofm = op.ofm
195 ifm = op.ifm
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200196
Patrik Gustavssondf995102021-08-23 15:33:59 +0200197 # Check if ifm/ofm are network ifm/ofm
198 ifm_is_sg_ifm = ifm.ops[0].type in (Op.Placeholder, Op.SubgraphInput, Op.Const)
199 ifm_is_sg_ofm = any(ifm_cons is None for ifm_cons in ifm.consumer_list)
200 ofm_is_sg_ofm = any(ofm_cons is None for ofm_cons in ofm.consumer_list)
201 # Check if ifm/ofm is produced respectively consumed by CPU
202 ifm_is_cpu_produced = any(ifm_prod is not None and not ifm_prod.run_on_npu for ifm_prod in op.ifm.ops)
203 ofm_is_cpu_consumed = any(ofm_cons is not None and not ofm_cons.run_on_npu for ofm_cons in op.ofm.consumer_list)
204
205 # This case should be handled prior to this function
206 assert not ((ifm_is_sg_ifm or ifm_is_sg_ofm or ifm_is_cpu_produced) and (ofm_is_sg_ofm or ofm_is_cpu_consumed))
207
208 if ofm_is_sg_ofm or ofm_is_cpu_consumed:
209 # Bypassed by replacing ifm with ofm
210 ofm.ops = []
211 for prev_op in ifm.ops:
212 prev_op.outputs = [ofm]
213 ofm.ops.append(prev_op)
214
215 # All ifm consumers need to use ofm as input
216 for ifm_cons in ifm.consumer_list:
217 for ifm_idx, cons_ifm in enumerate(ifm_cons.inputs):
218 if cons_ifm == ifm:
219 ifm_cons.set_input_tensor(ofm, ifm_idx)
220 else:
221 # Bypassed by replacing ofm with ifm
222 for cons in ofm.consumer_list:
223 for ifm_idx, cons_ifm in enumerate(cons.inputs):
224 if cons_ifm == ofm:
225 cons.set_input_tensor(ifm, ifm_idx)
226
227
Patrik Gustavssonf1580f02021-09-01 12:43:02 +0200228def move_splitsliceread_to_consumer(op, cons_op):
229 assert op.type == Op.SplitSliceRead
230
231 if cons_op.ifm == op.ofm:
232 cons_op.read_offsets[0] = op.read_offsets[0]
233 cons_op.read_shapes[0] = op.read_shapes[0]
234 cons_op.set_input_tensor(op.ifm, cons_op.type.info.indices.ifms[0])
235 cons_op.ifm_shapes[0] = op.ifm_shapes[0]
236 elif cons_op.type.is_binary_elementwise_op() and cons_op.ifm2 == op.ofm:
237 cons_op.read_offsets[1] = op.read_offsets[0]
238 cons_op.read_shapes[1] = op.read_shapes[0]
239 cons_op.set_input_tensor(op.ifm, cons_op.type.info.indices.ifms[1])
240 cons_op.ifm_shapes[1] = op.ifm_shapes[0]
241
Patrik Gustavssonf1580f02021-09-01 12:43:02 +0200242 op.ofm.consumer_list.remove(cons_op)
243 op.ofm.ops = []
244 op.ifm.consumer_list.remove(op)
245
246
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200247def check_memory_only_removed(op, arch):
248 if op.run_on_npu and op.type in memory_only_ops:
249 # Memory only operators should have been removed
250 raise VelaError(f"Memory only {op.type} op {op} expected to have been removed, still remains")
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +0200251
252
253def record_optimised(op, arch):
254 if op.type != Op.Const:
255 DebugDatabase.add_optimised(op, op)
Patrik Gustavssondf995102021-08-23 15:33:59 +0200256
257
258def insert_copy_op_after_tens(tens):
259 tens_cons_list_copy = tens.consumer_list.copy()
260
261 # Create a avg_pool nop op with ifm as input
262 copy_tens = tens.clone()
263 copy_op = create_avgpool_nop(tens.name + "_avgpool")
264 copy_op.add_input_tensor(tens)
265 copy_op.set_output_tensor(copy_tens)
266 copy_op.set_ifm_ofm_shapes()
267 copy_op.run_on_npu = True
268
269 # Set copy_ifm consumers
270 for tens_cons in tens_cons_list_copy:
271 if tens_cons is not None:
272 for ifm_idx, cons_inp in enumerate(tens_cons.inputs):
273 if cons_inp == tens:
274 tens_cons.set_input_tensor(copy_tens, ifm_idx)
275
276 DebugDatabase.add_optimised(tens.ops[0], copy_op)
277
278
279def fix_sg_input_output(op, arch, nng):
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200280 if not op.run_on_npu or op.type not in memory_only_ops:
Patrik Gustavssondf995102021-08-23 15:33:59 +0200281 return op
282
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200283 # For the memory only operators we want to remove, tensors are removed.
Patrik Gustavssondf995102021-08-23 15:33:59 +0200284 # But in order to to do this, they cannot be outputs of the sg,
285 # this need to be fixed prior to the removal.
286 # Solution is to add a avgpool NOP, to maintain the original tensor.
287 # This is also valid when reshape ifm/ofm is produced respectively
288 # consumed by CPU
289
290 # Check if operator ifm/ofm are sg ifm/ofm
291 ifm_is_sg_ifm = op.ifm.ops[0].type in (Op.Placeholder, Op.SubgraphInput, Op.Const)
292 ifm_is_sg_ofm = any(ifm_cons is None for ifm_cons in op.ifm.consumer_list)
293 ofm_is_sg_ofm = any(ofm_cons is None for ofm_cons in op.ofm.consumer_list)
294 # Check if ifm/ofm is produced respectively consumed by CPU
295 ifm_is_cpu_produced = any(ifm_prod is not None and not ifm_prod.run_on_npu for ifm_prod in op.ifm.ops)
296 ofm_is_cpu_consumed = any(ofm_cons is not None and not ofm_cons.run_on_npu for ofm_cons in op.ofm.consumer_list)
297
298 if (ifm_is_sg_ofm or ifm_is_sg_ifm or ifm_is_cpu_produced) and (ofm_is_sg_ofm or ofm_is_cpu_consumed):
Jonas Ohlsson0957e3e2021-09-01 15:57:21 +0200299 # Both ifm and ofm need to persist, but only ifm need a copy, in order to remove the memory only operator.
Patrik Gustavssondf995102021-08-23 15:33:59 +0200300 insert_copy_op_after_tens(op.ifm)
301
302 return op
303
304
305def convert_depthwise_to_conv(op, arch, nng):
306 # Depthwise is equivalent to a single conv2d if the ifm depth is 1 and
307 # the ofm depth equals the depth multipler.
308 # If those conditions are true, then we can perform a simple
309 # switch of the operator type (and weight order)
310
311 if op.type == Op.DepthwiseConv2DBias and (op.attrs["depth_multiplier"] != 1):
312 ifm_shape = op.ifm_shapes[0]
313 weight_tensor = op.inputs[1]
314 ofm_shape = op.ofm_shapes[0]
315 if (ifm_shape.depth == 1) and (ofm_shape.depth == op.attrs["depth_multiplier"]):
316 # Change op type to Conv2d
317 op.type = Op.Conv2DBias
318 del op.attrs["channel_multiplier"]
319 del op.attrs["depth_multiplier"]
320
321 weight_tensor.values = np.transpose(weight_tensor.values, (0, 1, 3, 2))
322 weight_tensor.set_all_shapes(list(weight_tensor.values.shape))
323 else:
324 raise UnsupportedFeatureError(
325 f"Unsupported 'DEPTHWISE_CONV_2D' with depth_multiplier = {op.attrs['depth_multiplier']},",
326 f" ifm channels = {ifm_shape.depth}, ofm channels = {ofm_shape.depth}",
327 )
328 DebugDatabase.add_optimised(op, op)
329 return op
Patrik Gustavssonf436ada2021-09-14 14:56:48 +0200330
331
332def convert_to_lut(op, lut_values, lut_name):
333 # Rewrite the operation by Add with scalar 0 + LUT activation
334 ifm = op.inputs[0]
335 if ifm is None:
336 return op
337 assert ifm.dtype.size_in_bytes() == 1
338 op.type = Op.Add
339 op.name = op.name + "_lut_" + lut_name
340 # Mark as no-op to enable potential fusing optimizations
341 op.attrs["is_nop"] = True
342 # Create an input tensor containing scalar zero
343 quantization = QuantizationParameters(0.0, 255.0)
344 quantization.scale_f32 = ifm.quantization.scale_f32
345 quantization.zero_point = 0
346 tens = create_const_tensor(op.inputs[0].name + "_scalar0", [], ifm.dtype, [0], np.uint8, quantization=quantization)
347 op.add_input_tensor(tens)
348 op.ifm_shapes.append(Shape4D(tens.shape)) # TODO no shape?
349
350 # The LUT must be applied without any preceding rescaling (the LUT itself performs the rescale),
351 # so even if the OFM has a different scale than the IFM, the generated OFM scale instructions
352 # should be the same as the IFM
353 op.forced_output_quantization = ifm.quantization
354 lut_tensor = lut.create_lut_tensor(op.name + "_values", lut_values, DataType.int8)
355 op.set_activation_lut(lut_tensor)
356 op.set_ifm_ofm_shapes()
357 return op