blob: fc148f38024c287e4c87ad24d1b287c76322a2da [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# NPU performance estimation functions to estimate performance of a Pass and CascadedPass. Uses a model that takes the
18# maximum of the 'cycles required for bandwidth' and 'cycles required for computing'.
19#
20# Called during scheduling to evaluate different proposals, as well as post-scheduling to provide a final performance
21# estimate.
Tim Hall79d07d22020-04-27 18:20:16 +010022import enum
Diego Russoea6111a2020-04-14 18:41:58 +010023
Tim Hall79d07d22020-04-27 18:20:16 +010024import numpy as np
Diego Russoea6111a2020-04-14 18:41:58 +010025
26from . import numeric_util
Diego Russoe8a10452020-04-21 17:39:10 +010027from .architecture_features import Block
Diego Russoe8a10452020-04-21 17:39:10 +010028from .nn_graph import PassPlacement
29from .nn_graph import SchedulerRewrite
Diego Russoea6111a2020-04-14 18:41:58 +010030from .operation import NpuBlockType
Louis Verhaard93dc5532020-06-07 12:40:18 +020031from .register_command_stream_generator import get_op_kernel
Diego Russoe8a10452020-04-21 17:39:10 +010032from .tensor import MemArea
33from .tensor import shape_num_elements
34from .tensor import TensorBlockTraversal
35from .tensor import TensorPurpose
Tim Hall79d07d22020-04-27 18:20:16 +010036
37
38def rolling_buffer_dims_from_passes(arch, ps1, block_config_ps1, ps2, block_config_ps2):
Tim Hall79d07d22020-04-27 18:20:16 +010039 ofm_block = Block(block_config_ps2[-3], block_config_ps2[-4], block_config_ps2[-1])
Louis Verhaard93dc5532020-06-07 12:40:18 +020040 kernel = get_op_kernel(ps2)
Tim Hall79d07d22020-04-27 18:20:16 +010041
42 if ps2.npu_block_type in set((NpuBlockType.ConvolutionMxN, NpuBlockType.VectorProduct)):
Louis Verhaard93dc5532020-06-07 12:40:18 +020043 op = ps2.primary_op
Louis Verhaardaee5d752020-09-30 09:01:52 +020044 ifm_block_depth = arch.calc_ifm_block_depth(op.ifm.shape[-1], op.ifm.dtype.size_in_bits())
Tim Hall79d07d22020-04-27 18:20:16 +010045 else:
46 ifm_block_depth = block_config_ps2[-1]
47
Louis Verhaard93dc5532020-06-07 12:40:18 +020048 ifm_block = arch.get_ifm_block_size(ifm_block_depth, ofm_block, kernel, arch.ofm_block_max)
Tim Hall79d07d22020-04-27 18:20:16 +010049
50 # The performed height calculation is for worst case
51 height = numeric_util.round_up(ifm_block.height + block_config_ps1[0], block_config_ps1[0])
52 width = ifm_block.width
Louis Verhaard93dc5532020-06-07 12:40:18 +020053 return [height, width]
Tim Hall79d07d22020-04-27 18:20:16 +010054
55
56class PassCycles(enum.IntEnum):
57 Dpu = 0
58 ElementWise = 1
59 Cpu = 2
60 SramAccess = 3
61 TotalPerPass = 4
62 DramAccess = 5
63 OnChipFlashAccess = 6
64 OffChipFlashAccess = 7
65 Total = 8
66 Size = 9
67
68 def display_name(self):
69 return (
70 "DPU",
71 "Element wise",
72 "CPU",
73 "SRAM Access",
74 "Total per Pass",
75 "DRAM Access",
76 "On-chip Flash Access",
77 "Off-chip Flash Access",
78 "Total",
79 "Size",
80 )[self.value]
81
82 def identifier_name(self):
83 return (
84 "dpu",
85 "element_wise",
86 "cpu",
87 "sram_access",
88 "total_per_pass",
89 "dram_access",
90 "on_chip_flash_access",
91 "off_chip_flash_access",
92 "total",
93 "size",
94 )[self.value]
95
96 @staticmethod
97 def all():
98 return (
99 PassCycles.Dpu,
100 PassCycles.ElementWise,
101 PassCycles.Cpu,
102 PassCycles.SramAccess,
103 PassCycles.DramAccess,
104 PassCycles.OnChipFlashAccess,
105 PassCycles.OffChipFlashAccess,
106 PassCycles.Total,
107 )
108
109
110class MacCount(enum.IntEnum):
111 NeuralNetworkMacs = 0
112 HardwareMacs = 1
113 Size = 2
114
115 def display_name(self):
116 return ("Neural Network Macs", "Hardware Macs", "Size")[self.value]
117
118 def identifier_name(self):
119 return ("nn_macs", "hardware_macs", "size")[self.value]
120
121 @staticmethod
122 def all():
123 return (MacCount.NeuralNetworkMacs, MacCount.HardwareMacs)
124
125
126class BandwidthDirection(enum.IntEnum):
127 Read = 0
128 Write = 1
129 Size = 2
130
131 def display_name(self):
132 return self.name
133
134 def identifier_name(self):
135 return self.name.lower()
136
137 @staticmethod
138 def all():
139 return (BandwidthDirection.Read, BandwidthDirection.Write)
140
141
142def make_bandwidth_array():
143 return np.zeros((MemArea.Size, TensorPurpose.Size, BandwidthDirection.Size))
144
145
146def make_macs_array():
147 return np.zeros(MacCount.Size, np.int)
148
149
150def make_cycles_array():
151 return np.zeros(PassCycles.Size)
152
153
154def make_metrics_arrays():
155 return (make_bandwidth_array(), make_macs_array(), make_cycles_array())
156
157
158def get_n_blocks_and_area(
159 ifm_brick_size, ifm_height_width, orig_skirt, clamped_skirt, block_config, min_block_size, strides
160):
161
162 ifm_block_config = (block_config[0] * strides[1], block_config[1] * strides[2])
163
164 n_normal_blocks = []
165 remainder_size = []
166 for i in range(2):
167 non_skirt_dim = ifm_height_width[i] - orig_skirt[i] - orig_skirt[2 + i]
168 n_blocks = non_skirt_dim // ifm_block_config[i]
169 n_normal_blocks.append(n_blocks)
170 remainder_dim = numeric_util.round_up(
171 ((non_skirt_dim - n_blocks * ifm_block_config[i] - 1) // strides[i + 1]) + 1, min_block_size[i]
172 )
173 remainder_size.append(remainder_dim)
174
175 # this will actually calculate reads into the edge padding.
176
177 # there are four cases in total, handling the edges that will not fill a complete block.
178
179 # 0000000001
180 # 0000000001
181 # 0000000001
182 # 0000000001
183 # 0000000001
184 # 0000000001
185 # 2222222223
186 total_blocks = 0
187 total_area = 0
188
189 block_setup = (
190 (n_normal_blocks[0] * n_normal_blocks[1], block_config),
191 (1 * n_normal_blocks[1], (remainder_size[0], block_config[1])),
192 (n_normal_blocks[0] * 1, (block_config[0], remainder_size[1])),
193 (1 * 1, remainder_size),
194 )
195
196 for n_blocks, block_size in block_setup:
197 if block_size[0] == 0 or block_size[1] == 0:
198 continue
199 read_dims = [0, 0]
200 for i in range(2):
201 read_dims[i] = (
202 numeric_util.round_up(clamped_skirt[i], ifm_brick_size[i + 1])
203 + block_size[i] * strides[i + 1]
204 + numeric_util.round_up(clamped_skirt[2 + i], ifm_brick_size[i + 1])
205 )
206 assert n_blocks >= 0
207 total_blocks += n_blocks
208 total_area += n_blocks * read_dims[0] * read_dims[1]
209 assert total_blocks >= 1
210 return total_blocks, total_area, block_setup
211
212
213def performance_metrics_for_pass(arch, ps, block_config=None, rewrite_list=[], force_outputs_to_fast_storage=False):
214 if block_config is None:
215 block_config = ps.block_config
216 bws = make_bandwidth_array()
217 macs = make_macs_array()
218 cycles = make_cycles_array()
219 blocks = 0
220 ifm_read_multiple = 1
221 weight_read_multiple = 0
222
223 if ps.placement in set((PassPlacement.MemoryOnly, PassPlacement.StartupInit)):
224 return bws, macs, cycles, blocks, ifm_read_multiple, weight_read_multiple # nothing real happening in this pass
225
226 min_block_size = arch.min_block_sizes[ps.npu_block_type]
227
228 skirt = (0, 0, 0, 0)
229 explicit_padding = (0, 0, 0, 0)
230 primary_op = ps.primary_op
231 replacement_read_bws = {}
Charles Xub02c8d92020-06-25 16:05:25 +0200232 if ps.placement == PassPlacement.Cpu:
233 cycles[PassCycles.Cpu] = arch.cpu_cycle_estimate(ps.ops[0])
234 elif primary_op:
Tim Hall79d07d22020-04-27 18:20:16 +0100235 skirt = primary_op.attrs.get("skirt", skirt)
236 explicit_padding = primary_op.attrs.get("explicit_padding", explicit_padding)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200237 assert primary_op.type.npu_block_type == ps.npu_block_type
238 npu_block_type = primary_op.type.npu_block_type
Tim Hall79d07d22020-04-27 18:20:16 +0100239
240 ifm_tensor, _, weight_tensor, ofm_tensor = ps.get_primary_op_ifm_ifm2_weights_ofm()
241
Tim Hallc30f4952020-06-15 20:47:35 +0100242 if npu_block_type in set(
243 (NpuBlockType.ConvolutionMxN, NpuBlockType.ConvolutionDepthWise, NpuBlockType.Pooling)
244 ):
Charles Xu3e9c4342020-04-22 08:31:43 +0200245 # extent the ifm to full dimension
246 ifm_tensor_brick_size = tuple(numeric_util.full_shape(4, list(ifm_tensor.brick_size), 1))
247 ifm_tensor_shape = numeric_util.full_shape(4, ifm_tensor.shape, 1)
248 ifm_tensor_bandwidth_shape = numeric_util.full_shape(4, ifm_tensor.bandwidth_shape, 1)
Tim Hall79d07d22020-04-27 18:20:16 +0100249
250 batch_size = ifm_tensor.shape[0]
Charles Xu3e9c4342020-04-22 08:31:43 +0200251 ifm_depth = ifm_tensor_bandwidth_shape[3]
Tim Hall79d07d22020-04-27 18:20:16 +0100252
253 # add in padding
254 ifm_tensor_shape[1] += explicit_padding[0] + explicit_padding[2] # height += top and bottom
255 ifm_tensor_shape[2] += explicit_padding[1] + explicit_padding[3] # width += left and right
256
257 strides = primary_op.attrs["strides"]
258 if npu_block_type != NpuBlockType.Pooling:
259 weight_tensor_shape = weight_tensor.shape
260 weight_tensor_bandwidth_shape = weight_tensor.bandwidth_shape
261 weight_tensor_element_size = weight_tensor.element_size()
262 weight_tensor_bandwidth_compression_scale = weight_tensor.bandwidth_compression_scale
263 nn_ops = (
264 int(ofm_tensor.shape[0])
265 * int(ofm_tensor.shape[1])
266 * int(ofm_tensor.shape[2])
267 * int(weight_tensor_shape[0])
268 * int(weight_tensor_shape[1])
269 * int(weight_tensor_shape[2])
270 * int(weight_tensor_shape[3])
Tim Hall79d07d22020-04-27 18:20:16 +0100271 )
272 else:
273 weight_tensor_shape = [
274 primary_op.attrs["ksize"][1],
275 primary_op.attrs["ksize"][2],
276 1,
277 ifm_tensor_shape[3],
278 ]
279 weight_tensor_bandwidth_shape = weight_tensor_shape
280 weight_tensor_element_size = 0
281 weight_tensor_bandwidth_compression_scale = 0.0
282 nn_ops = 0 # pooling doesn't count as NN ops
283
284 kernel_dims = weight_tensor_shape[:2]
285
286 sub_kernel_limits = arch.sub_kernel_limits[npu_block_type]
287 # count the sub kernels; the IFM block needs to be refetched for each of them
288 n_sub_kernels_y = numeric_util.round_up_divide(kernel_dims[0], sub_kernel_limits[0])
289 n_sub_kernels_x = numeric_util.round_up_divide(kernel_dims[1], sub_kernel_limits[1])
290 n_sub_kernels = n_sub_kernels_y * n_sub_kernels_x
291
292 clamped_skirt = list(skirt)
293 clamped_skirt[2] = min(clamped_skirt[2], sub_kernel_limits[0] - 1 - clamped_skirt[0])
294 clamped_skirt[3] = min(clamped_skirt[3], sub_kernel_limits[1] - 1 - clamped_skirt[1])
295 n_blocks, area, block_setup = get_n_blocks_and_area(
Charles Xu3e9c4342020-04-22 08:31:43 +0200296 ifm_tensor_brick_size,
Tim Hall79d07d22020-04-27 18:20:16 +0100297 ifm_tensor_shape[1:3],
298 skirt,
299 clamped_skirt,
300 block_config,
301 min_block_size,
302 strides,
303 )
304
305 blocks = n_blocks * numeric_util.round_up_divide(weight_tensor_shape[3], block_config[3])
306
307 n_weight_stages = numeric_util.round_up_divide(weight_tensor_bandwidth_shape[3], block_config[3])
308 if npu_block_type == NpuBlockType.ConvolutionDepthWise or npu_block_type == NpuBlockType.Pooling:
309 n_weight_stages = 1 # force to no reread
310
311 ifm_tensor_bw = (
312 n_sub_kernels
313 * batch_size
314 * area
315 * ifm_depth
316 * n_weight_stages
317 * ifm_tensor.element_size()
318 * ifm_tensor.bandwidth_compression_scale
319 )
320 replacement_read_bws[ifm_tensor] = ifm_tensor_bw
321 ifm_read_multiple = n_weight_stages
322
323 replacement_read_bws[weight_tensor] = (
324 batch_size
325 * shape_num_elements(weight_tensor_bandwidth_shape)
326 * weight_tensor_element_size
327 * weight_tensor_bandwidth_compression_scale
328 * n_blocks
329 ) # read once per block and batch
330 weight_read_multiple = n_blocks
331
332 n_kernel_xy = kernel_dims[0] * kernel_dims[1]
333 n_input_channels_at_a_time = block_config[2]
334
335 if npu_block_type == NpuBlockType.Pooling or weight_tensor.block_traversal in set(
336 (TensorBlockTraversal.PartKernelFirst, TensorBlockTraversal.DepthWise)
337 ):
338 n_input_channels_at_a_time = numeric_util.round_up_divide(n_input_channels_at_a_time, 4)
339 n_kernel_xy = max(
340 n_kernel_xy, 4
341 ) # need at least 4, as this is the minimum duty cycle for secondary accumulator writes
342 if weight_tensor is not None:
Diego Russoea6111a2020-04-14 18:41:58 +0100343 n_kernel_xy = numeric_util.round_up(n_kernel_xy, 4) # weights need to be read in blocks of 4
Tim Hall79d07d22020-04-27 18:20:16 +0100344
345 num_mac_ops = 0
346 for n_blocks_for_size, block_size in block_setup:
347 num_mac_ops += (
348 batch_size
349 * n_blocks_for_size
350 * block_size[0]
351 * block_size[1]
352 * numeric_util.round_up(weight_tensor_shape[2], n_input_channels_at_a_time)
353 * numeric_util.round_up(weight_tensor_shape[3], block_config[3])
354 * n_kernel_xy
355 )
356
357 if npu_block_type == NpuBlockType.Pooling:
358 # TODO: improve pooling estimation
359 cycles[PassCycles.Dpu] = num_mac_ops / arch.num_macs_per_cycle / 2
360 else:
361 cycles[PassCycles.Dpu] = num_mac_ops / arch.num_macs_per_cycle
362 macs[MacCount.NeuralNetworkMacs] += nn_ops
363 macs[MacCount.HardwareMacs] += num_mac_ops
364
365 elif npu_block_type == NpuBlockType.VectorProduct:
366 nn_macs = (
367 ifm_tensor.shape[0]
368 * numeric_util.round_up(weight_tensor.shape[-2], block_config[2])
369 * numeric_util.round_up(weight_tensor.shape[-1], block_config[3])
370 )
371 num_mac_ops = nn_macs
372
373 cycles[PassCycles.Dpu] = num_mac_ops / arch.num_macs_per_cycle
374 macs[MacCount.NeuralNetworkMacs] += nn_macs
375 macs[MacCount.HardwareMacs] += num_mac_ops
376
377 blocks = 1 * numeric_util.round_up_divide(weight_tensor.shape[-1], block_config[3])
378
379 non_zero_fraction = 1.0
380 if ifm_tensor.values is not None:
381 nz_vector = np.amax(ifm_tensor.values != 0, axis=0) # max across batch axis
382 non_zero_fraction = np.average(nz_vector)
383
384 replacement_read_bws[ifm_tensor] = ifm_tensor.bandwidth()
385 replacement_read_bws[weight_tensor] = weight_tensor.bandwidth() * non_zero_fraction
386 ifm_read_multiple = 1
387 weight_read_multiple = non_zero_fraction
388 else:
389 if ps.placement == PassPlacement.Npu and len(ps.outputs):
390 # Assume element-wise operation going through the element pipelines.
391 # Work out how many elements we have and calculate performance.
392 out = ps.outputs[0]
393 elms = out.elements()
394
395 cycles[PassCycles.ElementWise] = numeric_util.round_up_divide(elms, arch.num_elem_wise_units)
396
Tim Hall79d07d22020-04-27 18:20:16 +0100397 # apply the desired rewrites
398 for rewrite_op, tens, _, _, _, ps_to_rewrite in rewrite_list:
399 if ps != ps_to_rewrite:
400 continue
401 if rewrite_op == SchedulerRewrite.Nop:
402 pass # these are fine, no bandwidth changes
403 elif rewrite_op in (SchedulerRewrite.ChangeTensorSubPurpose,):
404 bws[arch.fast_storage_mem_area][tens.purpose][BandwidthDirection.Read] += replacement_read_bws[tens]
405 replacement_read_bws[tens] = 0
406
407 for tens in ps.outputs:
408 if force_outputs_to_fast_storage:
409 bws[arch.fast_storage_mem_area][tens.purpose][BandwidthDirection.Write] += tens.bandwidth()
410 else:
411 bws[tens.mem_area][tens.purpose][BandwidthDirection.Write] += tens.bandwidth()
412
413 for tens in ps.intermediates:
414 bws[tens.mem_area][tens.purpose][BandwidthDirection.Write] += tens.bandwidth()
415
416 if tens in replacement_read_bws:
417 bw = replacement_read_bws[tens]
418 else:
419 bw = tens.bandwidth()
420
421 bws[tens.mem_area][tens.purpose][BandwidthDirection.Read] += bw
422
423 for tens in ps.inputs:
424 if tens in replacement_read_bws:
425 bw = replacement_read_bws[tens]
426 else:
427 bw = tens.bandwidth()
428
429 bws[tens.mem_area][tens.purpose][BandwidthDirection.Read] += bw
430
431 cycles[PassCycles.SramAccess] = np.sum(bws[MemArea.Sram]) / arch.memory_bandwidths_per_cycle[MemArea.Sram]
432 cycles[PassCycles.TotalPerPass] = np.max(cycles[: PassCycles.TotalPerPass])
433
434 # quick build access counts for only current pass, even though these aren't the final numbers
435 update_summary_cycles(arch, bws, macs, cycles)
436
437 return bws, macs, cycles, blocks, ifm_read_multiple, weight_read_multiple
438
439
440def update_summary_cycles(arch, bws, macs, cycles):
441 cycles[PassCycles.DramAccess] = np.sum(bws[MemArea.Dram]) / arch.memory_bandwidths_per_cycle[MemArea.Dram]
442 cycles[PassCycles.OnChipFlashAccess] = (
443 np.sum(bws[MemArea.OnChipFlash]) / arch.memory_bandwidths_per_cycle[MemArea.OnChipFlash]
444 )
445 cycles[PassCycles.OffChipFlashAccess] = (
446 np.sum(bws[MemArea.OffChipFlash]) / arch.memory_bandwidths_per_cycle[MemArea.OffChipFlash]
447 )
448
449 cycles[PassCycles.Total] = np.max(cycles[: PassCycles.Total])
450 return cycles
451
452
453def collate_stats_for_cascaded_pass(arch, bws, macs, cycles):
454 return bws, macs, cycles
455
456
457def performance_for_cascaded_pass(arch, cps):
458 total_bws = make_bandwidth_array()
459 total_macs = make_macs_array()
460 total_cycles = make_cycles_array()
461
462 for ps in cps.passes:
463 bws, macs, cycles, blocks, _, _ = performance_metrics_for_pass(arch, ps)
464 ps.bandwidths = bws
465 ps.macs = macs
466 ps.cycles = cycles
467 ps.n_blocks = blocks
468 total_bws += bws
469 total_macs += macs
470 total_cycles += cycles
471
472 bws, macs, cycles = collate_stats_for_cascaded_pass(arch, total_bws, total_macs, total_cycles)
473 cps.bandwidths = bws
474 cps.macs = macs
475 cps.cycles = cycles
476 return bws, macs, cycles
477
478
479def calc_performance_for_network(nng, arch):
480 total_bws = make_bandwidth_array()
481 total_macs = np.zeros(MacCount.Size)
482 total_cycles = np.zeros(PassCycles.Size)
483
484 for sg in nng.subgraphs:
485 for cps in sg.cascaded_passes:
486 bws, macs, cycles = performance_for_cascaded_pass(arch, cps)
487 total_bws += bws
488 total_macs += macs
489 total_cycles += cycles
Tim Hall79d07d22020-04-27 18:20:16 +0100490
491 nng.bandwidths = total_bws
492 nng.macs = total_macs
493 nng.cycles = total_cycles