blob: 1489860755dba95d8b8bfde362f862e31ef981eb [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Register level (low-level) command stream generation for Ethos-U55. Takes a high-level command stream and generates
18# all the register settings. Calculates dependencies between commands and inserts wait operations. And generates a bit
19# stream suitable for interpretation by the Ethos-U55 processor.
Tim Hall79d07d22020-04-27 18:20:16 +010020from collections import defaultdict
Diego Russoe8a10452020-04-21 17:39:10 +010021from enum import Enum
22from enum import IntEnum
Diego Russoea6111a2020-04-14 18:41:58 +010023
24import numpy as np
25
26from . import scaling
Diego Russoe8a10452020-04-21 17:39:10 +010027from .architecture_features import ArchitectureFeatures
28from .architecture_features import Block
29from .architecture_features import Kernel
30from .architecture_features import Rect
31from .architecture_features import SharedBufferArea
32from .architecture_features import SHRAMElements
33from .data_type import BaseType
34from .data_type import DataType
35from .ethos_u55_regs.ethos_u55_regs import acc_format
36from .ethos_u55_regs.ethos_u55_regs import activation
37from .ethos_u55_regs.ethos_u55_regs import cmd0
38from .ethos_u55_regs.ethos_u55_regs import cmd1
39from .ethos_u55_regs.ethos_u55_regs import elementwise_mode
40from .ethos_u55_regs.ethos_u55_regs import ifm_precision
41from .ethos_u55_regs.ethos_u55_regs import rounding
Tim Hall79d07d22020-04-27 18:20:16 +010042from .high_level_command_stream import CommandType
Diego Russoe8a10452020-04-21 17:39:10 +010043from .numeric_util import clamp_sigmoid
44from .numeric_util import clamp_tanh
45from .numeric_util import quantise_float32
46from .numeric_util import round_away_zero
47from .numeric_util import round_up
48from .numeric_util import round_up_to_int
Tim Hall79d07d22020-04-27 18:20:16 +010049from .operation import NpuBlockType
Tim Hall79d07d22020-04-27 18:20:16 +010050from .shared_buffer_allocation import SharedBufferAllocation
Diego Russoe8a10452020-04-21 17:39:10 +010051from .tensor import MemArea
52from .tensor import TensorBlockTraversal
53from .tensor import TensorFormat
Tim Hall79d07d22020-04-27 18:20:16 +010054
55
56class RegisterMachine:
57 def __init__(self):
58 self.n_banks = 1
59 self.registers = [defaultdict(lambda: None) for _ in range(self.n_banks)]
60 self.bank_idx = 0
61
62 def set_register(self, reg, value):
63 is_changed = self.registers[self.bank_idx][reg] != value
64 self.registers[self.bank_idx][reg] = value
65 # is_changed = True # force command
66 return is_changed
67
68 def switch_bank(self):
69 self.bank_idx = (self.bank_idx + 1) % self.n_banks
70
71
72class CmdMode(IntEnum):
73 NoPayload = 0x0000
74 Payload32 = 0x4000
75 Mask = 0xC000
76 CmdOpMask = 0x03FF
77
78
79class BasePointerIndex(IntEnum):
80 ReadOnly = 0 # base address slot index for weights and scaling
81 Scratch = 1 # base address slot index for scratch memory area
82
83
84# TODO: Replace with definitions from ethos_u55_regs
85class IFM2Broadcast(IntEnum):
86 BroadcastHdim = 1 << 0
87 BroadcastWdim = 1 << 1
88 BroadcastCdim = 1 << 2
89 ReverseOperandOrder = 1 << 6
90 UseIFM2Scalar = 1 << 7
91
92
93class CommandStreamEmitter:
94 def __init__(self):
95 self.cmd_stream = []
96 self.reg_machine = [RegisterMachine(), RegisterMachine()]
97 self.last_absolute_wait = defaultdict(int)
98
99 def get_reg_machine(self, cmd):
100 if "DMA" in cmd.name:
101 return self.reg_machine[1]
102 else:
103 return self.reg_machine[0]
104
105 def size_in_bytes(self):
106 sz = 0
107 for cmd in self.cmd_stream:
108 sz += len(cmd) * 4
109 return sz
110
111 def to_list(self):
112 return [elem for cmd in self.cmd_stream for elem in cmd]
113
114 def print_cmds(self):
115 print("Code: Command: Param: Payload:")
116 for words_for_one_command in self.cmd_stream:
117 code = words_for_one_command[0] & 0x0000FFFF # lower 16 bits
118 param = words_for_one_command[0] >> 16 # higher 16 bits
119
120 payload_mode = CmdMode(code & CmdMode.Mask)
121
122 # code and command
123 s = " 0x%04x " % code
124 if payload_mode == CmdMode.NoPayload:
125 s += str(cmd0(code & CmdMode.CmdOpMask))
126 else:
127 s += str(cmd1(code & CmdMode.CmdOpMask))
128
129 s = s.ljust(40)
130 s += "%5d" % param
131
132 # payload
133 if payload_mode == CmdMode.Payload32:
134 s += " 0x%08x (%d)" % (words_for_one_command[1], words_for_one_command[1])
135 else:
136 s += " -"
137
138 print(s)
139
140 def cmd0_with_param(self, cmd, param):
141 if isinstance(param, Enum):
142 param = int(param.value)
143 else:
144 param = int(param)
145 param = param & 0xFFFF
146 command = cmd.value | (param << 16)
147 if not self.get_reg_machine(cmd).set_register(cmd, (command, param)):
148 return
149
150 # This is not a redundant command, actually write it
151 self.cmd_stream.append((command,))
152
153 def cmd1_with_offset(self, cmd, offset, param=0x0):
154 offset = int(offset) & 0xFFFFFFFFF
155 command = cmd.value | CmdMode.Payload32.value | (param << 16)
156
157 if not self.get_reg_machine(cmd).set_register(cmd, (command, offset)):
158 return
159
160 # This is not a redundant command, actually write it
161 self.cmd_stream.append((command, offset))
162
163 def cmd_wait(self, cmd, param, absolute_wait_time):
164 if absolute_wait_time <= self.last_absolute_wait[cmd]:
165 return
166
167 self.last_absolute_wait[cmd] = absolute_wait_time
168 param = int(param)
169 command = ((param & 0xFFFF) << 16) | cmd.value
170 self.cmd_stream.append((command,))
171
172 def cmd_do_operation(self, cmd, param=0):
173 param = int(param)
174 command = ((param & 0xFFFF) << 16) | cmd.value
175
176 self.cmd_stream.append((command,))
177 self.get_reg_machine(cmd).switch_bank()
178
179
180def calc_command_dependencies(cmd_stream, arch):
181 cmd_starts = {}
182 cmd_ends = {}
183 memory_accesses = {}
184
185 # Keep track of accumulated number of commands in command stream.
186 # First element kernel ops: (# of blocks, # of commands)
187 # Second element DMA ops: (# of commands)
188 pos = np.array((np.array((0, 0)), np.array([0])))
189
190 dependencies = {}
191
192 for cmd in cmd_stream:
193 cmd_starts[cmd] = pos
194 op_count = cmd.get_operation_count()
195 # Keep track of both num blocks and commands
196 cmd_add = 0 if (op_count[0] == 0) else 1
197 pos = np.array((pos[0] + np.array((op_count[0], cmd_add)), pos[1] + np.array([op_count[1]])))
198 cmd_ends[cmd] = np.array((pos[0], pos[1]))
199 memory_accesses[cmd] = cmd.get_memory_accesses()
200
201 for idx, cmd in enumerate(cmd_stream):
202 curr_accesses = memory_accesses[cmd]
203 # Keep track of command dependency.
204 # First element kernel ops: (# of blocks, # of commands)
205 # Second element DMA ops: (# of commands)
206 dep_offsets = np.array((np.array((-1, -1)), np.array([-1])))
207 dep_cmds = [None] * CommandType.Size.value
208 if idx > 0:
209 # Look at the previous commands in backwards order
210 for prev_cmd in cmd_stream[idx - 1 :: -1]:
211 assert prev_cmd is not cmd
212 if dep_cmds[prev_cmd.cmdtype] is None:
213 is_dependency = False
214 if cmd.cmdtype == CommandType.NpuStripe and prev_cmd.cmdtype == CommandType.NpuStripe:
215 # Special handling here, as dpu -> dpu operations require additional care
216 if not SharedBufferAllocation.is_compatible(prev_cmd.ps.shared_buffer, cmd.ps.shared_buffer):
217 is_dependency = True
218 elif memory_accesses[prev_cmd].conflicts(curr_accesses):
219 is_dependency = True
220 else:
221 if memory_accesses[prev_cmd].conflicts(curr_accesses):
222 is_dependency = True
223
224 if is_dependency:
225 new_offset = cmd_ends[prev_cmd][prev_cmd.cmdtype]
226 if new_offset[0] > dep_offsets[prev_cmd.cmdtype][0]:
227 dep_cmds[prev_cmd.cmdtype] = prev_cmd
228 dep_offsets[prev_cmd.cmdtype] = new_offset
229
230 # Check if we've got dependencies for all commands, in which case we can early out
231 for dep in dep_cmds:
232 if dep is None:
233 break
234 else:
235 break # all handled
236
237 # Convert absolute to relative dependencies, using None to signal the special case of no
238 # dependency of this kind
239 res = [None] * CommandType.Size.value
240 for i in range(CommandType.Size.value):
241 if dep_cmds[i] is not None:
242 res[i] = cmd_starts[cmd][i] - dep_offsets[i]
243
244 dependencies[cmd] = cmd_starts[cmd], res
245
246 return dependencies
247
248
249def get_op_kernel(ps):
250 if ps.primary_op is None:
251 return None
252
253 strides = ps.primary_op.attrs.get("strides", (1, 1, 1, 1))
254 dilation = ps.primary_op.attrs.get("dilation", (1, 1, 1, 1))
255 if ps.weight_tensor:
256 if ps.npu_block_type in set((NpuBlockType.VectorProduct, NpuBlockType.ElementWise)):
257 k_h = 1
258 k_w = 1
259 else:
260 k_h = ps.weight_tensor.shape[0]
261 k_w = ps.weight_tensor.shape[1]
262 else:
263 k_h = ps.primary_op.attrs.get("filter_height", 1)
264 k_w = ps.primary_op.attrs.get("filter_width", 1)
265
266 return Kernel(k_w, k_h, strides[2], strides[1], dilation[2], dilation[1])
267
268
269def full_shape(shape, fill):
270 return ([fill] * (4 - len(shape))) + shape
271
272
273def has_prev_op_dependency(prev_cmd, cmd):
274 if prev_cmd is None:
275 return False
276 if (prev_cmd.cmdtype == cmd.cmdtype == CommandType.NpuStripe) and (prev_cmd.ps != cmd.ps):
277 if prev_cmd.ofm_tensor == cmd.ifm_tensor:
278 return True
279 else:
280 return prev_cmd.ofm_tensor.equivalence_id == cmd.ifm_tensor.equivalence_id
281 return False
282
283
284def get_op_ofm_rect(cmd):
285 start = full_shape(cmd.ofm_box.start_coord, 0)
286 end = full_shape(cmd.ofm_box.end_coord, 1)
287 return Rect(start[-2], start[-3], start[-1], end[-2] - 1, end[-3] - 1, end[-1] - 1)
288
289
290def get_op_ifm_rect(cmd):
291 start = full_shape(cmd.ifm_box.start_coord, 0)
292 end = full_shape(cmd.ifm_box.end_coord, 1)
293 return Rect(start[-2], start[-3], start[-1], end[-2] - 1, end[-3] - 1, end[-1] - 1)
294
295
296def get_op_ifmofm_block_depth(arch, cmd):
297 # Note: NOT equivalent to the normal ifm block depth calculation since
298 # it takes into account 'depthless' block operations by returning full
299 # depth
300 if cmd.ps.npu_block_type in (NpuBlockType.ConvolutionDepthWise, NpuBlockType.Pooling, NpuBlockType.ElementWise):
301 return cmd.ofm_box.get_size_shape()[-1]
302
303 return arch.calc_ifm_block_depth(cmd.ifm_box.get_size_shape()[-1], cmd.ifm_tensor.dtype.bits)
304
305
306def get_op_padding_lt(cmd):
307 if cmd.ps.npu_block_type not in (
308 NpuBlockType.ConvolutionDepthWise,
309 NpuBlockType.Pooling,
310 NpuBlockType.ConvolutionMxN,
311 ):
312 return (0, 0)
313
314 explicit_padding = list(cmd.ps.primary_op.attrs["explicit_padding"]) # (top, left, bottom, right)
315
316 # Check if this is for horizontal ifm streaming
317 if not (cmd.is_first_h_stripe and cmd.is_last_h_stripe):
318 explicit_padding[0] = cmd.pad_top
319 explicit_padding[2] = cmd.pad_bottom
320
321 return (explicit_padding[1], explicit_padding[0])
322
323
324def generate_register_command_stream(nng, sg, arch, verbose=False):
325 emit = CommandStreamEmitter()
326
327 base_ptr_idx_map = {
328 MemArea.Sram: BasePointerIndex.Scratch,
329 MemArea.OnChipFlash: BasePointerIndex.ReadOnly,
330 MemArea.OffChipFlash: BasePointerIndex.ReadOnly,
331 MemArea.Dram: BasePointerIndex.ReadOnly,
332 }
333
334 # Maps an AccumulatorType enum to the corresponding acc_format value
335 acc_format_map = {
336 SHRAMElements.Acc16: acc_format.FP_S5_10.value,
337 SHRAMElements.Acc32: acc_format.INT_32BIT.value,
338 SHRAMElements.Acc40: acc_format.INT_40BIT.value,
339 }
340
341 # Maps an elementwise op type to an elementwise_mode enum value used by NPU_OP_ELEMENTWISE
342 elementwise_mode_map = {
343 "MulAct": elementwise_mode.MUL.value,
344 "AddAct": elementwise_mode.ADD.value,
345 "SubAct": elementwise_mode.SUB.value,
346 "Minimum": elementwise_mode.MIN.value,
347 "Maximum": elementwise_mode.MAX.value,
348 "LeakyRelu": elementwise_mode.LRELU.value,
349 "Abs": elementwise_mode.ABS.value,
350 }
351
352 cmd_stream = []
353 for cmd in sg.high_level_command_stream:
354 if cmd.cmdtype == CommandType.NpuStripe and cmd.ps.npu_block_type == NpuBlockType.Default:
355 print("Warning: Skipping register command stream generation for", cmd.ps)
356 else:
357 cmd_stream.append(cmd)
358
359 dependencies = calc_command_dependencies(cmd_stream, arch)
360
361 # Initialise operator dependency state
362 prev_ifm_rect = cur_ifm_rect = None
363 prev_ifm_block_depth = cur_ifm_block_depth = None
364 prev_ofm_rect = cur_ofm_rect = None
365 prev_ofm_block = cur_ofm_block = None
366 prev_kernel = cur_kernel = None
367 prev_cmd = None
368
369 def emit_wait_commands(cmd):
370 # The command is fully set up, emit whatever wait commands we need
371 absolute_dep, relative_dep = dependencies[cmd]
372 if relative_dep[CommandType.NpuStripe] is not None:
373 if cmd.cmdtype == CommandType.DMA:
374 param = relative_dep[CommandType.NpuStripe][1]
375 if param <= 3:
376 emit.cmd_wait(cmd0.NPU_OP_KERNEL_WAIT, param, absolute_dep[CommandType.NpuStripe][1])
377 else:
378 param = relative_dep[CommandType.NpuStripe][0]
379 param = min(param, 0xFFFF) # Clamp to allowable wait amount
380
381 if relative_dep[CommandType.DMA] is not None:
382 param = relative_dep[CommandType.DMA][0]
383 param = min(param, 0xF) # Clamp to allowable wait amount
384 emit.cmd_wait(cmd0.NPU_OP_DMA_WAIT, param, absolute_dep[CommandType.DMA][0])
Tim Hall79d07d22020-04-27 18:20:16 +0100385
Tim Hall79d07d22020-04-27 18:20:16 +0100386 for cmd in cmd_stream:
387 if cmd.cmdtype == CommandType.DMA:
388 start_coord = cmd.box.start_coord
389
390 src_addr = cmd.in_tensor.address_for_coordinate(start_coord)
391 dst_addr = cmd.out_tensor.address_for_coordinate(start_coord)
392
393 if cmd.in_tensor.compressed_values is not None:
394 stream_index = cmd.in_tensor.compressed_stream_index_from_coord(start_coord)
395 sz = cmd.in_tensor.size_of_compressed_stream(stream_index)
396 else:
397 sz = cmd.in_tensor.address_for_coordinate(cmd.box.end_coord, is_top_box=True) - src_addr
398
399 # TODO: Yoda support needs to use feature_maps_not_in_fast_storage and force_outputs_to_fast_storage
400 emit.cmd0_with_param(cmd0.NPU_SET_DMA0_SRC_REGION, base_ptr_idx_map[cmd.in_tensor.mem_area])
401 emit.cmd1_with_offset(cmd1.NPU_SET_DMA0_SRC, src_addr)
402 emit.cmd0_with_param(cmd0.NPU_SET_DMA0_DST_REGION, base_ptr_idx_map[cmd.out_tensor.mem_area])
403 emit.cmd1_with_offset(cmd1.NPU_SET_DMA0_DST, dst_addr)
404 emit.cmd1_with_offset(cmd1.NPU_SET_DMA0_LEN, sz)
405 dma_channel = 0
406 mode = 0 # From external to external
407
408 emit_wait_commands(cmd)
409 emit.cmd_do_operation(cmd0.NPU_OP_DMA_START, dma_channel * 16 + mode)
410
411 elif cmd.cmdtype == CommandType.NpuStripe:
412
413 ps = cmd.ps
414 primary_op = ps.primary_op
415 npu_block_type = ps.npu_block_type
416 # Specifies if global scale from the NPU_SET_OFM_SCALE register should be used instead of per-channel scale
417 use_global_scale = False
418 # Specifies type of rounding to be used.
419 rounding_mode = rounding.TFL
Dwight Lidman3ec04ac2020-04-30 11:54:48 +0200420 if primary_op.type == 'ResizeBilinear':
421 rounding_mode = rounding.TRUNCATE
Tim Hall79d07d22020-04-27 18:20:16 +0100422 fmf = primary_op.attrs.get("fused_memory_function", None)
423 faf = primary_op.attrs.get("fused_activation_function", None)
424
425 # Specifies which operand to apply scaling to in bitexact elementwise ADD/SUB
426 op_to_scale = 0
427
428 # Update state history
429 prev_ifm_rect = cur_ifm_rect
430 prev_ifm_block_depth = cur_ifm_block_depth
431 prev_ofm_rect = cur_ofm_rect
432 prev_ofm_block = cur_ofm_block
433 prev_kernel = cur_kernel
434
435 block_config = ps.block_config
436 emit.cmd0_with_param(cmd0.NPU_SET_OFM_BLK_HEIGHT_M1, block_config[0] - 1)
437 emit.cmd0_with_param(cmd0.NPU_SET_OFM_BLK_WIDTH_M1, block_config[1] - 1)
438 emit.cmd0_with_param(cmd0.NPU_SET_OFM_BLK_DEPTH_M1, block_config[3] - 1)
439
440 shared_buffer = ps.shared_buffer
441
442 if npu_block_type == NpuBlockType.ElementWise:
443 ifm2_broadcast = 0
444
445 if cmd.ifm_tensor.shape == []:
446 # The scalar has to be the ifm2 tensor so switch the ifms
447 cmd.ifm_tensor, cmd.ifm2_tensor = cmd.ifm2_tensor, cmd.ifm_tensor
448 cmd.ifm_box, cmd.ifm2_box = cmd.ifm2_box, cmd.ifm_box
449
450 # Set ReverseOperandOrder bit to IFM2_BROADCAST
451 ifm2_broadcast |= IFM2Broadcast.ReverseOperandOrder
452
453 # Calculate scales needed for arithmetic elementwise operators
454 if primary_op.type in set(("AddAct", "MulAct", "SubAct",)):
455 input_scale = cmd.ifm_tensor.quantization.scale_f32
456 input2_scale = cmd.ifm2_tensor.quantization.scale_f32
457 output_scale = cmd.ofm_tensor.quantization.scale_f32
458 use_global_scale = True
459
460 if primary_op.type == "MulAct":
461 if (faf == "Sigmoid") or (faf == "Tanh"):
462 output_scale = 1 / 0x3000
463
464 ofm_scale, shift = scaling.elementwise_mul_scale(input_scale, input2_scale, output_scale)
465 emit.cmd1_with_offset(cmd1.NPU_SET_OFM_SCALE, ofm_scale, shift)
466 else: # AddAct/SubAct
467 if (faf == "Sigmoid") or (faf == "Tanh"):
468 output_scale = 1 / 0x3000
469
470 if input_scale == input2_scale:
471 opa_scale, opb_scale, ofm_scale, shift = scaling.simplified_elementwise_add_sub_scale(
472 input_scale, input2_scale, output_scale
473 )
474 opa_shift = 0 # Unused for this case
475 else:
476 # Use advanced implementation only when input scales differ
477 bitdepth = cmd.ifm_tensor.dtype.bits
478 (
479 opa_scale,
480 opa_shift,
481 ofm_scale,
482 shift,
483 op_to_scale,
484 ) = scaling.advanced_elementwise_add_sub_scale(
485 input_scale, input2_scale, output_scale, bitdepth
486 )
487 opb_scale = 0 # Unused for this case
488 if ifm2_broadcast & IFM2Broadcast.ReverseOperandOrder:
489 # If the operand order is reversed we also have to swap which operand is scaled
490 if op_to_scale == scaling.OperandToScale.OPa:
491 op_to_scale = scaling.OperandToScale.OPb
492 else:
493 op_to_scale = scaling.OperandToScale.OPa
494
495 emit.cmd1_with_offset(cmd1.NPU_SET_OPA_SCALE, opa_scale, opa_shift)
496 emit.cmd1_with_offset(cmd1.NPU_SET_OPB_SCALE, opb_scale)
497 emit.cmd1_with_offset(cmd1.NPU_SET_OFM_SCALE, ofm_scale, shift)
498
499 if primary_op.type in set(("LeakyRelu", "Abs",)):
500 output_scale = cmd.ofm_tensor.quantization.scale_f32
501 use_global_scale = True
502
503 if primary_op.type == "LeakyRelu":
504 output_scale *= primary_op.attrs["alpha"]
505
506 ofm_scale, shift = scaling.quantise_scale(output_scale)
507 emit.cmd1_with_offset(cmd1.NPU_SET_OFM_SCALE, ofm_scale, shift)
508
509 # For elementwise set the required SHRAM to be equal to the total size of SHRAM
510 shram_required = arch.shram_total_banks
511 emit.cmd0_with_param(cmd0.NPU_SET_IFM_IB_END, shram_required)
512
513 # Acc buffers not needed so set AB_START to size of SHRAM
514 emit.cmd0_with_param(cmd0.NPU_SET_AB_START, arch.shram_total_banks)
515
516 # Is not a unary operator
517 if cmd.ifm2_tensor is not None:
518 if cmd.ifm2_tensor.shape == []:
519 # IFM2 is a constant, set UseIFM2Scalar bit to IFM2_BROADCAST
520 ifm2_broadcast |= IFM2Broadcast.UseIFM2Scalar
521 else:
522 ifm_box_shape = cmd.ifm_box.get_size_shape()
523 ifm2_box_shape = cmd.ifm2_box.get_size_shape()
524
525 if len(cmd.ifm_tensor.shape) > 1 and ifm_box_shape[1] != ifm2_box_shape[1]:
526 # Broadcast in 'H' dimension
527 assert cmd.ifm2_tensor.shape[1] == 1
528 ifm2_broadcast |= IFM2Broadcast.BroadcastHdim
529
530 if len(cmd.ifm_tensor.shape) > 2 and ifm_box_shape[2] != ifm2_box_shape[2]:
531 # Broadcast in 'W' dimension
532 assert cmd.ifm2_tensor.shape[2] == 1
533 ifm2_broadcast |= IFM2Broadcast.BroadcastWdim
534
535 if len(cmd.ifm_tensor.shape) > 3 and ifm_box_shape[3] != ifm2_box_shape[3]:
536 # Broadcast in 'C' dimension
537 assert cmd.ifm2_tensor.shape[3] == 1
538 ifm2_broadcast |= IFM2Broadcast.BroadcastCdim
539
540 # Set IFM2_IB_START to the latter half of the IB space
541 ifm_ib_start = shared_buffer.bank_locations[SharedBufferArea.IFM]
542 emit.cmd0_with_param(
543 cmd0.NPU_SET_IFM2_IB_START, (shram_required - ifm_ib_start) / 2 + ifm_ib_start
544 )
545
546 emit.cmd0_with_param(cmd0.NPU_SET_IFM2_BROADCAST, ifm2_broadcast)
547
548 else:
549 emit.cmd0_with_param(
550 cmd0.NPU_SET_IFM_IB_END,
551 shared_buffer.bank_locations[SharedBufferArea.IFM]
552 + shared_buffer.banks_required[SharedBufferArea.IFM],
553 )
554 emit.cmd0_with_param(cmd0.NPU_SET_AB_START, shared_buffer.bank_locations[SharedBufferArea.Accumulators])
555
556 emit.cmd0_with_param(cmd0.NPU_SET_ACC_FORMAT, acc_format_map[shared_buffer.use_accumulator_element])
557
Dwight Lidman3ec04ac2020-04-30 11:54:48 +0200558 if primary_op.type == 'ResizeBilinear':
559 # perform nearest neighbor upscale
560 emit.cmd0_with_param(cmd0.NPU_SET_IFM_UPSCALE, 1)
561 else:
562 emit.cmd0_with_param(cmd0.NPU_SET_IFM_UPSCALE, 0)
Tim Hall79d07d22020-04-27 18:20:16 +0100563
564 if npu_block_type in set(
565 (NpuBlockType.ConvolutionMxN, NpuBlockType.ConvolutionDepthWise, NpuBlockType.Pooling)
566 ):
567 # Set up padding
568 explicit_padding = list(primary_op.attrs["explicit_padding"]) # (top, left, bottom, right)
569
570 # Check if this is for horizontal ifm streaming
571 if not (cmd.is_first_h_stripe and cmd.is_last_h_stripe):
572 explicit_padding[0] = cmd.pad_top
573 explicit_padding[2] = cmd.pad_bottom
574
575 # Indexing from end since a 1x1 Avgpool might have been added with non 4-dimensional input/output,
576 # because of activation function needed to be fused.
577 if cmd.ifm_box.start_coord[-2] > 0:
578 explicit_padding[1] = 0
579 if cmd.ifm_box.end_coord[-2] < cmd.ifm_tensor.shape[-2]:
580 explicit_padding[3] = 0
581
582 emit.cmd0_with_param(cmd0.NPU_SET_IFM_PAD_TOP, explicit_padding[0])
583 emit.cmd0_with_param(cmd0.NPU_SET_IFM_PAD_LEFT, explicit_padding[1])
584 emit.cmd0_with_param(cmd0.NPU_SET_IFM_PAD_BOTTOM, explicit_padding[2])
585 emit.cmd0_with_param(cmd0.NPU_SET_IFM_PAD_RIGHT, explicit_padding[3])
586
Dwight Lidman0538a772020-05-06 14:09:17 +0200587 # set kernel x stride low bit
588 stride = primary_op.attrs["strides"][2] - 1 & 1
589 # set kernel y stride low bit
590 stride |= (primary_op.attrs["strides"][1] - 1 & 1) << 1
591 # set kernel x stride extension bits
592 stride |= (primary_op.attrs["strides"][2] - 1 >> 1) << 6
593 # set kernel y stride extension bits
594 stride |= (primary_op.attrs["strides"][1] - 1 >> 1) << 9
595
Tim Hall79d07d22020-04-27 18:20:16 +0100596
597 if npu_block_type == NpuBlockType.Pooling:
598 k_height, k_width = primary_op.attrs["ksize"][1:3]
599 emit.cmd0_with_param(cmd0.NPU_SET_KERNEL_HEIGHT_M1, k_height - 1)
600 emit.cmd0_with_param(cmd0.NPU_SET_KERNEL_WIDTH_M1, k_width - 1)
601
602 valid_padding = sum(explicit_padding) == 0
603
Dwight Lidman3ec04ac2020-04-30 11:54:48 +0200604 if primary_op.type in set(("AvgPool", "AvgPoolAct", "ResizeBilinear")) and valid_padding:
Tim Hall79d07d22020-04-27 18:20:16 +0100605 # For valid padding vela has to output scaling values
606 if faf == "Sigmoid" or faf == "Tanh":
607 rescale = 0x3000 * cmd.ifm_tensor.quantization.scale_f32
608 rescale_bits = len(bin(round_up_to_int(rescale))) - 2 + 1
609
610 scale, shift = scaling.quantise_pooling_scale(k_height * k_width, rescale_bits)
611 scale = int(round_away_zero(scale * rescale))
612 else:
613 # In case avg pool fused with concat or other memory operation, rescaling might be needed.
614 # k_height == k_width == 1 is allways true in this case
615 # Normally the scale is maximised, to get maximum precision, which means that
616 # if rescale != 1, scale need to consider the number of bits needed for rescaling
617 rescale = cmd.ifm_tensor.quantization.scale_f32 / cmd.ofm_tensor.quantization.scale_f32
618 rescale_bits = 0
619 if k_height == k_width == 1:
620 if fmf == "ConcatSliceWrite":
621 rounding_mode = rounding.NATURAL
622 if rescale > 1:
623 rescale_bits = len(bin(round_up_to_int(rescale))) - 2 + 1
624 elif rescale < 1:
625 rescale_bits = -(len(bin(round_up_to_int(1 / rescale))) - 2 - 1)
626 scale, shift = scaling.quantise_pooling_scale(k_height * k_width, rescale_bits)
627 scale = int(round_away_zero(scale * rescale))
628
629 emit.cmd1_with_offset(cmd1.NPU_SET_OFM_SCALE, scale, shift)
630 # Valid-padded average pool should use the global scale from
631 # NPU_SET_OFM_SCALE register, which is set above.
632 use_global_scale = True
633
634 else: # Convolution
635 assert cmd.weight_tensor.block_traversal != TensorBlockTraversal.Default
Fredrik Svedbergd67c0aa2020-03-30 13:15:28 +0200636 # Reduced precision quantization and natural rounding used for int16
637 if cmd.ifm_tensor.dtype == DataType.int16:
638 rounding_mode = rounding.NATURAL
Tim Hall79d07d22020-04-27 18:20:16 +0100639 emit.cmd0_with_param(cmd0.NPU_SET_KERNEL_HEIGHT_M1, cmd.weight_tensor.shape[0] - 1)
640 emit.cmd0_with_param(cmd0.NPU_SET_KERNEL_WIDTH_M1, cmd.weight_tensor.shape[1] - 1)
641 if cmd.weight_tensor.block_traversal == TensorBlockTraversal.PartKernelFirst:
642 # Part-kernel-first weight ordering
643 assert npu_block_type == NpuBlockType.ConvolutionMxN
644 stride |= 1 << 2
645
646 emit.cmd0_with_param(cmd0.NPU_SET_KERNEL_STRIDE, stride)
647
648 elif npu_block_type in set((NpuBlockType.VectorProduct,)):
649 # Vector product is implemented using a 1x1 convolution so need
650 # to setup the appropriate padding and kernel info
651 emit.cmd0_with_param(cmd0.NPU_SET_IFM_PAD_TOP, 0)
652 emit.cmd0_with_param(cmd0.NPU_SET_IFM_PAD_LEFT, 0)
653 emit.cmd0_with_param(cmd0.NPU_SET_IFM_PAD_BOTTOM, 0)
654 emit.cmd0_with_param(cmd0.NPU_SET_IFM_PAD_RIGHT, 0)
655
656 # kernel stride reg = 0 means stride(1,1) + depth first weight
657 # order + dilation(0,0) + kernel_split_size=8
658 emit.cmd0_with_param(cmd0.NPU_SET_KERNEL_STRIDE, 0)
659
660 emit.cmd0_with_param(cmd0.NPU_SET_KERNEL_HEIGHT_M1, 0)
661 emit.cmd0_with_param(cmd0.NPU_SET_KERNEL_WIDTH_M1, 0)
662
663 if npu_block_type in set(
664 (NpuBlockType.ConvolutionMxN, NpuBlockType.ConvolutionDepthWise, NpuBlockType.VectorProduct)
665 ):
666 # Emit Weight base address commands, only maps the area required for
667 # this command's weights from the larger tensor.
668 stream_index = cmd.weight_tensor.compressed_stream_index_from_coord(cmd.weight_box.start_coord)
669 weight_addr = cmd.weight_tensor.address_for_coordinate(cmd.weight_box.start_coord)
670 weight_len = cmd.weight_tensor.size_of_compressed_stream(stream_index)
671 # Select weight/scale region depending on where permanent storage was defined
672 weight_region = base_ptr_idx_map[cmd.weight_tensor.mem_area]
673 if arch.permanent_storage_mem_area == MemArea.Sram:
674 weight_region = BasePointerIndex.ReadOnly
675 emit.cmd0_with_param(cmd0.NPU_SET_WEIGHT_REGION, weight_region)
676 emit.cmd1_with_offset(cmd1.NPU_SET_WEIGHT_BASE, weight_addr)
677 emit.cmd1_with_offset(cmd1.NPU_SET_WEIGHT_LENGTH, weight_len)
678
679 # Emit Scale & Bias base address commands, with length matching the amount required by
680 # the weight tensors.
681 if cmd.scale_tensor is not None:
682 # Get address and size of the scale/bias data area
683 scale_addr = cmd.scale_tensor.address_for_coordinate(cmd.weight_box.start_coord[-1:])
684 scale_len = (
685 cmd.scale_tensor.address_for_coordinate(cmd.weight_box.end_coord[-1:], True) - scale_addr
686 )
687 # Emit base address for NPU to access scale & bias data
688 scale_region = base_ptr_idx_map[cmd.scale_tensor.mem_area]
689 if arch.permanent_storage_mem_area == MemArea.Sram:
690 scale_region = BasePointerIndex.ReadOnly
691 emit.cmd0_with_param(cmd0.NPU_SET_SCALE_REGION, scale_region)
692 emit.cmd1_with_offset(cmd1.NPU_SET_SCALE_BASE, scale_addr)
693 emit.cmd1_with_offset(cmd1.NPU_SET_SCALE_LENGTH, round_up(scale_len, 16))
694
695 ofm_quant = cmd.ofm_tensor.quantization
696 ofm_quant_qmin = cmd.ofm_tensor.quantization.quant_min
697 ofm_quant_qmax = cmd.ofm_tensor.quantization.quant_max
698 ifm_min = cmd.ifm_tensor.quantization.min
699 ifm_max = cmd.ifm_tensor.quantization.max
700
701 # Emit commands for any fused activation function
Diego Russoea6111a2020-04-14 18:41:58 +0100702 if faf is None:
Tim Hall79d07d22020-04-27 18:20:16 +0100703 emit.cmd0_with_param(cmd0.NPU_SET_ACTIVATION, activation.NONE)
704 # Even if no activation function, values need to be set to override previous values
705 faf_min = ofm_quant_qmin
706 faf_max = ofm_quant_qmax
707 elif faf == "Relu":
708 emit.cmd0_with_param(cmd0.NPU_SET_ACTIVATION, activation.NONE)
709 faf_min = quantise_float32(0.0, ofm_quant.scale_f32, ofm_quant.zero_point)
710 faf_max = ofm_quant_qmax
711 elif faf == "Relu6":
712 emit.cmd0_with_param(cmd0.NPU_SET_ACTIVATION, activation.NONE)
713 faf_min = quantise_float32(0.0, ofm_quant.scale_f32, ofm_quant.zero_point)
714 faf_max = quantise_float32(6.0, ofm_quant.scale_f32, ofm_quant.zero_point)
715 elif faf == "ReluN1To1":
716 emit.cmd0_with_param(cmd0.NPU_SET_ACTIVATION, activation.NONE)
717 faf_min = quantise_float32(-1.0, ofm_quant.scale_f32, ofm_quant.zero_point)
718 faf_max = quantise_float32(1.0, ofm_quant.scale_f32, ofm_quant.zero_point)
719 elif faf == "Tanh":
720 emit.cmd0_with_param(cmd0.NPU_SET_ACTIVATION, activation.TANH)
721 faf_min = quantise_float32(clamp_tanh(ifm_min), ofm_quant.scale_f32, ofm_quant.zero_point)
722 faf_max = quantise_float32(clamp_tanh(ifm_max), ofm_quant.scale_f32, ofm_quant.zero_point)
723 elif faf == "Sigmoid":
724 emit.cmd0_with_param(cmd0.NPU_SET_ACTIVATION, activation.SIGMOID)
725 faf_min = quantise_float32(clamp_sigmoid(ifm_min), ofm_quant.scale_f32, ofm_quant.zero_point)
726 faf_max = quantise_float32(clamp_sigmoid(ifm_max), ofm_quant.scale_f32, ofm_quant.zero_point)
727 else:
728 raise Exception("Unsupported fused_activation_function = " + faf)
729
730 # Activation range needs to be set based upon the quantisation range and the fused activation range
731 emit.cmd0_with_param(cmd0.NPU_SET_ACTIVATION_MIN, max(ofm_quant_qmin, faf_min))
732 emit.cmd0_with_param(cmd0.NPU_SET_ACTIVATION_MAX, min(ofm_quant_qmax, faf_max))
733
734 out_shape = cmd.ofm_box.get_size_shape()
735 if len(out_shape) >= 4:
736 emit.cmd0_with_param(cmd0.NPU_SET_OFM_HEIGHT_M1, out_shape[-3] - 1)
737 else:
738 emit.cmd0_with_param(cmd0.NPU_SET_OFM_HEIGHT_M1, 0)
739 if len(out_shape) >= 2:
740 emit.cmd0_with_param(cmd0.NPU_SET_OFM_WIDTH_M1, out_shape[-2] - 1)
741 else:
742 emit.cmd0_with_param(cmd0.NPU_SET_OFM_WIDTH_M1, 0)
743 emit.cmd0_with_param(cmd0.NPU_SET_OFM_DEPTH_M1, out_shape[-1] - 1)
744
745 if npu_block_type in set((NpuBlockType.ConvolutionMxN, NpuBlockType.VectorProduct)):
746 in_shape = cmd.ifm_box.get_size_shape()
747 emit.cmd0_with_param(cmd0.NPU_SET_IFM_DEPTH_M1, in_shape[-1] - 1)
748 else:
749 emit.cmd0_with_param(cmd0.NPU_SET_IFM_DEPTH_M1, out_shape[-1] - 1)
750
Jacob Bohlin3c678292020-04-27 10:27:25 +0200751 for tens, box, region_op, ptr_ops, stride_ops, zero_point_op in (
Tim Hall79d07d22020-04-27 18:20:16 +0100752 (
753 cmd.ifm_tensor,
754 cmd.ifm_box,
Jacob Bohlin3c678292020-04-27 10:27:25 +0200755 cmd0.NPU_SET_IFM_REGION,
Tim Hall79d07d22020-04-27 18:20:16 +0100756 (cmd1.NPU_SET_IFM_BASE0, cmd1.NPU_SET_IFM_BASE1, cmd1.NPU_SET_IFM_BASE2, cmd1.NPU_SET_IFM_BASE3),
757 (cmd1.NPU_SET_IFM_STRIDE_C, cmd1.NPU_SET_IFM_STRIDE_Y, cmd1.NPU_SET_IFM_STRIDE_X),
758 cmd0.NPU_SET_IFM_ZERO_POINT,
759 ),
760 (
761 cmd.ifm2_tensor,
762 cmd.ifm2_box,
Jacob Bohlin3c678292020-04-27 10:27:25 +0200763 cmd0.NPU_SET_IFM2_REGION,
Tim Hall79d07d22020-04-27 18:20:16 +0100764 (
765 cmd1.NPU_SET_IFM2_BASE0,
766 cmd1.NPU_SET_IFM2_BASE1,
767 cmd1.NPU_SET_IFM2_BASE2,
768 cmd1.NPU_SET_IFM2_BASE3,
769 ),
770 (cmd1.NPU_SET_IFM2_STRIDE_C, cmd1.NPU_SET_IFM2_STRIDE_Y, cmd1.NPU_SET_IFM2_STRIDE_X),
771 cmd0.NPU_SET_IFM2_ZERO_POINT,
772 ),
773 (
774 cmd.ofm_tensor,
775 cmd.ofm_box,
Jacob Bohlin3c678292020-04-27 10:27:25 +0200776 cmd0.NPU_SET_OFM_REGION,
Tim Hall79d07d22020-04-27 18:20:16 +0100777 (cmd1.NPU_SET_OFM_BASE0, cmd1.NPU_SET_OFM_BASE1, cmd1.NPU_SET_OFM_BASE2, cmd1.NPU_SET_OFM_BASE3),
778 (cmd1.NPU_SET_OFM_STRIDE_C, cmd1.NPU_SET_OFM_STRIDE_Y, cmd1.NPU_SET_OFM_STRIDE_X),
779 cmd0.NPU_SET_OFM_ZERO_POINT,
780 ),
781 ):
782
Diego Russoea6111a2020-04-14 18:41:58 +0100783 if tens is None:
Tim Hall79d07d22020-04-27 18:20:16 +0100784 continue
785
Diego Russoea6111a2020-04-14 18:41:58 +0100786 need_zero_point = (faf is not None) or (fmf == "ConcatSliceWrite")
Tim Hall79d07d22020-04-27 18:20:16 +0100787 if (
788 primary_op.type in set(("AvgPool", "AvgPoolAct")) and not need_zero_point
Diego Russoea6111a2020-04-14 18:41:58 +0100789 ) or tens.quantization is None:
Tim Hall79d07d22020-04-27 18:20:16 +0100790 # Actual integer operation, just set scale to 1 and zero point to 0
791 emit.cmd0_with_param(zero_point_op, 0)
792 else:
793 assert tens.quantization.zero_point is not None, "need an actual zero point set"
794 emit.cmd0_with_param(zero_point_op, int(tens.quantization.zero_point))
795
796 if tens.shape == []:
797 # Empty shape, elementwise constant
798 ifm2_scalar = tens.quant_values.astype(np.uint8)
799 assert ifm2_scalar.size == 1
800 emit.cmd0_with_param(cmd0.NPU_SET_IFM2_SCALAR, ifm2_scalar.item(0))
801 continue
802
803 height_0, height_1, width_0, addresses = tens.addresses_for_rolling_buffer(
804 box.start_coord, box.end_coord
805 )
806 if npu_block_type != NpuBlockType.VectorProduct:
807 if tens == cmd.ifm_tensor:
808 emit.cmd0_with_param(cmd0.NPU_SET_IFM_HEIGHT0_M1, height_0 - 1)
809 emit.cmd0_with_param(cmd0.NPU_SET_IFM_HEIGHT1_M1, height_1 - 1)
810 emit.cmd0_with_param(cmd0.NPU_SET_IFM_WIDTH0_M1, width_0 - 1)
811 elif tens == cmd.ofm_tensor:
812 emit.cmd0_with_param(cmd0.NPU_SET_OFM_HEIGHT0_M1, height_0 - 1)
813 emit.cmd0_with_param(cmd0.NPU_SET_OFM_HEIGHT1_M1, height_1 - 1)
814 emit.cmd0_with_param(cmd0.NPU_SET_OFM_WIDTH0_M1, width_0 - 1)
815 elif tens == cmd.ifm2_tensor:
816 emit.cmd0_with_param(cmd0.NPU_SET_IFM2_HEIGHT0_M1, height_0 - 1)
817 emit.cmd0_with_param(cmd0.NPU_SET_IFM2_HEIGHT1_M1, height_1 - 1)
818 emit.cmd0_with_param(cmd0.NPU_SET_IFM2_WIDTH0_M1, width_0 - 1)
819 else:
820 if len(out_shape) == 2:
821 # TODO: N is put in W-dimension for now
822 # Should be spread over H and W, but then block size selectetion,
823 # and stride calculation should be changed
824 if tens == cmd.ifm_tensor:
825 emit.cmd0_with_param(cmd0.NPU_SET_IFM_WIDTH0_M1, out_shape[-2] - 1)
826 elif tens == cmd.ofm_tensor:
827 emit.cmd0_with_param(cmd0.NPU_SET_OFM_WIDTH0_M1, out_shape[-2] - 1)
828 else:
829 assert False
830
Jacob Bohlin3c678292020-04-27 10:27:25 +0200831 if tens.mem_area == MemArea.Sram:
832 emit.cmd0_with_param(region_op, BasePointerIndex.Scratch)
833 else:
834 emit.cmd0_with_param(region_op, BasePointerIndex.ReadOnly)
835
Tim Hall79d07d22020-04-27 18:20:16 +0100836 for idx, addr in enumerate(addresses):
837 if addr is None:
838 addresses[idx] = 0
839
840 emit.cmd1_with_offset(ptr_ops[0], addresses[0])
841 emit.cmd1_with_offset(ptr_ops[1], addresses[1])
842 emit.cmd1_with_offset(ptr_ops[2], addresses[2])
843 emit.cmd1_with_offset(ptr_ops[3], addresses[3])
844
845 strides = tens.get_strides()
846 emit.cmd1_with_offset(stride_ops[0], strides[1]) # stride between 16-byte channel blocks (C)
847 emit.cmd1_with_offset(stride_ops[2], strides[3]) # stride between horisontal values (W)
848 emit.cmd1_with_offset(stride_ops[1], strides[2]) # stride between vertical values (H)
849
850 if tens.format == TensorFormat.NHCWB16:
851 # Check that all BasePointer addresses are aligned to 16 bytes
852 assert (int(addresses[0]) % 16) == 0
853 assert (int(addresses[1]) % 16) == 0
854 assert (int(addresses[2]) % 16) == 0
855 assert (int(addresses[3]) % 16) == 0
856
857 ofm_dtype = cmd.ofm_tensor.dtype
858 assert ofm_dtype.type & BaseType.Int
859 prec = 0
860 if ofm_dtype.size_in_bits() == 8:
861 prec = 0
862 elif ofm_dtype.size_in_bits() == 16:
863 prec = 2
864 else:
865 assert 0
866
867 if ofm_dtype.type & BaseType.Signed:
868 prec += 1
869
870 if use_global_scale:
871 # Set global scale bit, as opposed to using per channel scale
872 prec |= 1 << 8
873
874 if cmd.ofm_tensor.format == TensorFormat.NHCWB16:
875 prec |= 1 << 6
876
877 prec |= rounding_mode.value << 14
878
879 emit.cmd0_with_param(cmd0.NPU_SET_OFM_PRECISION, prec)
880
881 prec = None
882 weight_bits = 8
883 if cmd.weight_tensor is not None:
884 weight_bits = cmd.weight_tensor.dtype.size_in_bits()
885
886 ifm_dtype = cmd.ifm_tensor.dtype
887
888 assert weight_bits == 8, "Unsupported weight bit depth"
889 assert ifm_dtype.size_in_bits() in {8, 16}
890
891 if ifm_dtype.size_in_bits() == 8:
892 if ifm_dtype.type & BaseType.Signed:
Diqing Zhongfed918b2020-04-27 10:27:34 +0200893 prec = ifm_precision.S8
Tim Hall79d07d22020-04-27 18:20:16 +0100894 else:
Diqing Zhongfed918b2020-04-27 10:27:34 +0200895 prec = ifm_precision.U8
Tim Hall79d07d22020-04-27 18:20:16 +0100896 elif ifm_dtype.size_in_bits() == 16:
897 if ifm_dtype.type & BaseType.Signed:
Diqing Zhongfed918b2020-04-27 10:27:34 +0200898 prec = ifm_precision.S16
Tim Hall79d07d22020-04-27 18:20:16 +0100899 else:
Diqing Zhongfed918b2020-04-27 10:27:34 +0200900 prec = ifm_precision.U16
Tim Hall79d07d22020-04-27 18:20:16 +0100901
902 ifm_prec = prec.value
903 ifm2_prec = ifm_prec
904
905 if cmd.ifm_tensor.format == TensorFormat.NHCWB16:
906 ifm_prec |= 1 << 6
907
908 ifm_prec |= op_to_scale << 8
909
910 emit.cmd0_with_param(cmd0.NPU_SET_IFM_PRECISION, ifm_prec)
911
912 if cmd.ifm2_tensor is not None:
913 if cmd.ifm2_tensor.format == TensorFormat.NHCWB16:
914 ifm2_prec |= 1 << 6
915 emit.cmd0_with_param(cmd0.NPU_SET_IFM2_PRECISION, ifm2_prec)
916
917 emit_wait_commands(cmd)
918
919 # Get op parameters
920 cur_ifm_block_depth = get_op_ifmofm_block_depth(arch, cmd)
921 cur_ofm_block = Block(ps.block_config[1], ps.block_config[0], ps.block_config[3])
922 cur_ofm_rect = get_op_ofm_rect(cmd)
923 cur_ifm_rect = get_op_ifm_rect(cmd)
924 cur_kernel = get_op_kernel(cmd.ps)
925 cur_padLT = get_op_padding_lt(cmd)
926 if (prev_kernel is not None) and (cur_kernel is not None) and has_prev_op_dependency(prev_cmd, cmd):
927 if cmd.ifm_tensor.shape == prev_cmd.ofm_tensor.shape:
928 blockdep = arch.calc_block_dep(
929 prev_ifm_rect,
930 prev_ofm_rect,
931 prev_ifm_block_depth,
932 prev_ofm_block,
933 prev_kernel,
934 cur_ifm_rect,
935 cur_ofm_rect,
936 cur_ifm_block_depth,
937 cur_ofm_block,
938 cur_kernel,
939 cur_padLT,
940 )
941 else:
942 blockdep = 0
943 else:
944 blockdep = ArchitectureFeatures.MAX_BLOCKDEP
945
946 # Set between every op (dependent or not)
947 blockdep = min(blockdep, arch.max_blockdep)
948 emit.cmd0_with_param(cmd0.NPU_SET_BLOCKDEP, blockdep)
949 prev_cmd = cmd
950
951 if npu_block_type == NpuBlockType.ConvolutionMxN:
952 emit.cmd_do_operation(cmd0.NPU_OP_CONV)
953 elif npu_block_type == NpuBlockType.ConvolutionDepthWise:
954 emit.cmd_do_operation(cmd0.NPU_OP_DEPTHWISE)
955 elif npu_block_type == NpuBlockType.VectorProduct:
956 # Vector product is implemented using a 1x1 convolution
957 emit.cmd_do_operation(cmd0.NPU_OP_CONV)
958 elif npu_block_type == NpuBlockType.Pooling:
959 param = "Max" not in primary_op.type
960 emit.cmd_do_operation(cmd0.NPU_OP_POOL, param=param)
961 elif npu_block_type == NpuBlockType.ElementWise:
962 param = elementwise_mode_map[primary_op.type]
963 emit.cmd_do_operation(cmd0.NPU_OP_ELEMENTWISE, param)
964 else:
965 print("Warning: Skipping register command stream generation for", ps)
966
967 # Fill in final part of command stream:
968 emit.cmd_do_operation(cmd0.NPU_OP_STOP, param=0xFFFF)
969
970 sg.register_command_stream = emit.to_list()
971 if verbose:
972 emit.print_cmds()
973 print("number of commands", len(emit.cmd_stream))
974 print("command stream length in words", len(sg.register_command_stream))