blob: ecca0e0e40fbd06b9c2fca5c9fc991b4dfc04c0d [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Internal representation of a Neural Network Tensor.
Tim Hall79d07d22020-04-27 18:20:16 +010018import enum
Tim Hall79d07d22020-04-27 18:20:16 +010019import uuid
Diego Russoea6111a2020-04-14 18:41:58 +010020
21import numpy as np
22
23from . import numeric_util
Michael McGeagh5778ffd2020-08-06 17:31:02 +010024from .data_type import DataType
Dwight Lidmana9390f72020-05-13 12:00:08 +020025from .ethos_u55_regs.ethos_u55_regs import resampling_mode
Michael McGeagh5778ffd2020-08-06 17:31:02 +010026from .operation import Operation
Diego Russoe8a10452020-04-21 17:39:10 +010027from .range_set import MemoryRangeSet
Tim Hall79d07d22020-04-27 18:20:16 +010028
29
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020030class MemType(enum.IntFlag):
31 Unknown = 0
32 Permanent_NPU = 1
33 Permanent_CPU = 2
34 Scratch = 3
35 Scratch_fast = 4
36 Size = Scratch_fast + 1
37
38 def display_name(self):
39 return ("Unknown", "Permanent_NPU", "Permanent_CPU", "Scratch", "Scratch_fast", "Size")[self.value]
40
41 def identifier_name(self):
42 return ("unknown", "permanent_npu", "permanent_cpu", "scratch", "scratch_fast", "size")[self.value]
43
44 def all():
45 return (MemType.Permanent_NPU, MemType.Permanent_CPU, MemType.Scratch, MemType.Scratch_fast)
46
47 def __str__(self):
48 return self.name
49
50
Tim Hall79d07d22020-04-27 18:20:16 +010051class MemArea(enum.IntFlag):
52 Unknown = 0
53 Sram = 1
54 Dram = 2
55 OnChipFlash = 3
56 OffChipFlash = 4
57 Size = OffChipFlash + 1
58
59 def display_name(self):
60 return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "Size")[self.value]
61
62 def identifier_name(self):
63 return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "size")[self.value]
64
65 def all():
66 return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash)
67
68 def __str__(self):
69 return self.name
70
71
72class TensorPurpose(enum.IntFlag):
73 Unknown = 0
74 Weights = 1
75 FeatureMap = 2
76 Scratch = 3
Fredrik Svedberga0c36242020-06-03 15:43:31 +020077 LUT = 4
78 Size = 5
Tim Hall79d07d22020-04-27 18:20:16 +010079
80 def display_name(self):
Fredrik Svedberga0c36242020-06-03 15:43:31 +020081 return ("Unknown", "Weights", "FeatureMap", "Scratch", "LUT", "Size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010082
83 def identifier_name(self):
Fredrik Svedberga0c36242020-06-03 15:43:31 +020084 return ("unknown", "weights", "feature_map", "scratch", "lut", "size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010085
86 def all():
87 return (TensorPurpose.Weights, TensorPurpose.FeatureMap)
88
89
90class TensorSubPurpose(enum.Enum):
91 Standard = 0
92 DoubleBuffer = 1
93 RollingBufferX = 2
94 RollingBufferY = 3
95 RollingBufferXY = 4
96
97 def display_name(self):
98 return ("Standard", "Double Buffer", "Rolling Buffer X", "Rolling Buffer Y", "Rolling Buffer XY")[self.value]
99
100 def identifier_name(self):
101 return ("standard", "double_buffer", "rolling_buffer_x", "rolling_buffer_y", "rolling_buffer_xy")[self.value]
102
103 def all():
104 return (
105 TensorSubPurpose.Standard,
106 TensorSubPurpose.DoubleBuffer,
107 TensorSubPurpose.RollingBufferX,
108 TensorSubPurpose.RollingBufferY,
109 TensorSubPurpose.RollingBufferXY,
110 )
111
112
113class TensorFormat(enum.Flag):
114 Unknown = 0
115 WeightsCompressed = 1
116 NHWC = 2
117 NHCWB16 = 3
118
119 def __str__(self):
120 return self.name
121
122
123class TensorBlockTraversal(enum.Enum):
124 Default = 0
125 DepthWise = 1
126 DepthFirst = 2
127 PartKernelFirst = 3
128
129
130def shape_num_elements(shp):
131 elems = 1
132 if shp is None:
133 return None
134 for d in shp:
135 if d is None:
136 return None
137 elems *= d
138 return elems
139
140
141def shape_fully_defined(shp):
142 if shp is None:
143 return False
144 for d in shp:
145 if d is None:
146 return False
147 return True
148
149
150def shape_round_to_quantum(shp, quantum):
151 new_shp = list(shp)
152
153 # Traverse backwards using length of shape since there may be more rounding quantums than shape elements
154 for i in range(-1, -len(shp) - 1, -1):
155 if new_shp[i] is not None:
156 new_shp[i] = numeric_util.round_up(new_shp[i], quantum[i])
157 return new_shp
158
159
160class QuantizationParameters:
161 __slots__ = "min", "max", "num_bits", "narrow_range", "scale_f32", "zero_point", "quant_min", "quant_max"
162
163 def __init__(self, min=None, max=None, num_bits=None, narrow_range=None):
164 self.min = min
165 self.max = max
166
167 self.num_bits = num_bits
168 self.narrow_range = narrow_range
169
170 self.scale_f32 = None
171 self.zero_point = None
172 self.quant_min = None
173 self.quant_max = None
174
175 def __str__(self):
176 return "<nng.QuantizationParameters min=%s max=%s, num_bits=%s, scale=%s, zero_point=%s>" % (
177 self.min,
178 self.max,
179 self.num_bits,
180 self.scale_f32,
181 self.zero_point,
182 )
183
184 __repr__ = __str__
185
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200186 def __eq__(self, other):
187 if other is None:
188 return False
189 if not isinstance(other, QuantizationParameters):
190 return False
191
192 pairs = ((getattr(self, s), getattr(other, s)) for s in QuantizationParameters.__slots__)
193
194 return all(np.array_equal(a, b) for a, b in pairs)
195
196 def __ne__(self, other):
197 return not self == other
198
Tim Hall79d07d22020-04-27 18:20:16 +0100199 def clone(self):
200 res = QuantizationParameters()
201 res.min = self.min
202 res.max = self.max
203
204 res.num_bits = self.num_bits
205 res.narrow_range = self.narrow_range
206
207 res.scale_f32 = self.scale_f32
208 res.zero_point = self.zero_point
209 res.quant_min = self.quant_min
210 res.quant_max = self.quant_max
211 return res
212
213 def dequantize(self, values):
214 if self.zero_point.size == 1 and self.scale_f32.size == 1:
215 # same scale is used for all values
216 res = (values.astype(np.float64) - self.zero_point) * self.scale_f32
217 else:
218 # a different scale is used for different sets of values
219 values_as_float = values.astype(np.float64)
220
221 # this is not compatible with the format of depthwise weights,
222 # where input is at index 3 (Output, Kh, Kw, Input)
223 # return the quantized values
224 return np.ndarray((values_as_float.shape))
225
226 shape = values_as_float.shape[0]
227 assert self.zero_point.size == self.scale_f32.size == shape
228 res = np.ndarray(values_as_float.shape)
229 for i in range(shape):
230 res[i] = (values_as_float[i] - self.zero_point[i]) * self.scale_f32[i]
231
232 return res
233
234
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100235def create_const_tensor(name, shape, dtype, values, value_dtype=None, purpose=TensorPurpose.Unknown, quantization=None):
236 # Tensor
237 const_tensor = Tensor(shape, dtype, name + "_0")
238 const_tensor.purpose = purpose
239 const_tensor.quantization = quantization
240 const_tensor.values = np.array(values, dtype=value_dtype)
241 const_tensor.quant_values = np.frombuffer(const_tensor.values.tobytes(), dtype=np.uint8)
242 # Operator
243 const_op = Operation("Const", name)
244 const_op.set_output_tensor(const_tensor)
245 return const_tensor
246
247
248def create_reshape_tensor(tens, shape, ifm_reshape=True):
249 if shape == tens.shape:
250 return tens
251 # Tensors
252 name = tens.name + "_reshape"
253 reshape_ifm = tens
254 reshape_ofm = tens.clone("_reshaped")
255 reshape_ofm.set_all_shapes(shape)
256 if not ifm_reshape:
257 reshape_ifm, reshape_ofm = reshape_ofm, reshape_ifm
258 # Operator
259 reshape_op = Operation("Reshape", name)
260 reshape_op.attrs["new_shape"] = shape
261 reshape_op.add_input_tensor(reshape_ifm)
262 reshape_op.add_input_tensor(create_const_tensor(name + "_shape", [1], DataType.int32, shape))
263 reshape_op.set_output_tensor(reshape_ofm)
264 return reshape_ofm if ifm_reshape else reshape_ifm
265
266
Tim Hall79d07d22020-04-27 18:20:16 +0100267class Tensor:
268 __slots__ = (
269 "shape",
270 "storage_shape",
271 "bandwidth_shape",
272 "dtype",
273 "name",
274 "ops",
275 "consumer_list",
276 "values",
277 "quant_values",
278 "compressed_values",
Tim Hallf7e810a2020-06-25 15:04:31 +0100279 "compressed_values_substream_offsets",
Tim Hall79d07d22020-04-27 18:20:16 +0100280 "mem_area",
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200281 "mem_type",
Tim Hall79d07d22020-04-27 18:20:16 +0100282 "format",
283 "purpose",
284 "sub_purpose",
285 "alignment",
286 "weight_transpose_depthwise",
287 "storage_compression_scale",
288 "bandwidth_compression_scale",
289 "compression_scale_for_worst_weight_stream",
290 "weight_compression_scales",
291 "weight_compression_config",
292 "storage_rounding_quantum",
293 "brick_size",
294 "address",
295 "quantization",
296 "weight_compressed_offsets",
297 "element_size_bytes",
Tim Hall79d07d22020-04-27 18:20:16 +0100298 "block_traversal",
Tim Hall79d07d22020-04-27 18:20:16 +0100299 "cpu_tensor",
300 "npu_tensor",
301 "equivalence_id",
Dwight Lidmana9390f72020-05-13 12:00:08 +0200302 "resampling_mode",
Patrik Gustavsson458a2082020-08-13 13:41:05 +0200303 "avoid_NHCWB16",
Tim Hall79d07d22020-04-27 18:20:16 +0100304 )
305 AllocationQuantum = 16
306
307 def __init__(self, shape, dtype, name):
308 self.shape = shape
309 self.storage_shape = shape
310 self.bandwidth_shape = shape
311 self.dtype = dtype
312 self.name = name
313 self.equivalence_id = uuid.uuid4()
314
315 self.ops = []
316 self.consumer_list = []
317 # Below attributes are only set if a tensor has been cloned,
318 # either from Cpu -> Npu or vice versa. Needed for offline allocation
319 self.cpu_tensor = None # reference to the corresponding Cpu tensor
320 self.npu_tensor = None # reference to the corresponding Npu tensor
321
322 self.values = None
323 self.quant_values = None
324 self.compressed_values = None
Tim Hallf7e810a2020-06-25 15:04:31 +0100325 self.compressed_values_substream_offsets = None
Tim Hall79d07d22020-04-27 18:20:16 +0100326 self.mem_area = MemArea.Unknown
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200327 self.mem_type = MemType.Unknown
Tim Hall79d07d22020-04-27 18:20:16 +0100328 self.format = TensorFormat.Unknown
329 self.purpose = TensorPurpose.Unknown
330 self.sub_purpose = TensorSubPurpose.Standard
331 self.alignment = Tensor.AllocationQuantum
332 self.weight_transpose_depthwise = False
333
334 self.storage_compression_scale = 1.0
335 self.bandwidth_compression_scale = 1.0
336 self.compression_scale_for_worst_weight_stream = 1.0
337 self.weight_compression_scales = None
338 self.weight_compression_config = None
339 self.weight_compressed_offsets = []
340 self.storage_rounding_quantum = (1, 1, 1, 1)
341 self.brick_size = (1, 1, 1, 1)
Charles Xu04ce34c2020-06-23 12:42:28 +0200342 self.address = None # start address of tensor. will be filled in by tensor allocator
Tim Hall79d07d22020-04-27 18:20:16 +0100343 self.element_size_bytes = 0
344
345 # quantization parameters
346 self.quantization = None
Tim Hall79d07d22020-04-27 18:20:16 +0100347 self.block_traversal = TensorBlockTraversal.Default
Dwight Lidmana9390f72020-05-13 12:00:08 +0200348 self.resampling_mode = resampling_mode.NONE
Tim Hall79d07d22020-04-27 18:20:16 +0100349
Patrik Gustavsson458a2082020-08-13 13:41:05 +0200350 self.avoid_NHCWB16 = False
351
Tim Hall79d07d22020-04-27 18:20:16 +0100352 def element_size(self):
353 if self.element_size_bytes == 0:
354 return self.dtype.size_in_bits() / 8
355 return self.element_size_bytes
356
357 def clone(self, suffix="_clone"):
358 res = Tensor(self.shape, self.dtype, self.name + suffix)
359 res.storage_shape = list(self.storage_shape)
360 res.bandwidth_shape = list(self.bandwidth_shape)
361
362 res.ops = []
363 res.consumer_list = []
364 res.equivalence_id = self.equivalence_id
365
366 res.values = self.values
367 res.quant_values = self.quant_values
Tim Hall79d07d22020-04-27 18:20:16 +0100368 res.mem_area = self.mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200369 res.mem_type = self.mem_type
Tim Hall79d07d22020-04-27 18:20:16 +0100370 res.format = self.format
371 res.purpose = self.purpose
372 res.sub_purpose = self.sub_purpose
373 res.alignment = self.alignment
Tim Hall79d07d22020-04-27 18:20:16 +0100374 res.bandwidth_compression_scale = self.bandwidth_compression_scale
Tim Hall79d07d22020-04-27 18:20:16 +0100375 res.storage_rounding_quantum = self.storage_rounding_quantum
Charles Xu04ce34c2020-06-23 12:42:28 +0200376 res.address = None
Tim Hall79d07d22020-04-27 18:20:16 +0100377
378 if self.quantization is not None:
379 res.quantization = self.quantization.clone()
380 else:
381 res.quantization = None
382
Dwight Lidmana9390f72020-05-13 12:00:08 +0200383 res.resampling_mode = self.resampling_mode
384
Louis Verhaard3c07c972020-05-07 08:12:58 +0200385 res.copy_compressed_weight_info(self)
Patrik Gustavsson458a2082020-08-13 13:41:05 +0200386 res.avoid_NHCWB16 = self.avoid_NHCWB16
Tim Hall79d07d22020-04-27 18:20:16 +0100387 return res
388
389 def clone_into_fast_storage(self, arch):
390 res = self.clone(suffix="_fast_storage")
391 res.mem_area = arch.fast_storage_mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200392 res.mem_type = MemType.Scratch_fast
Tim Hall79d07d22020-04-27 18:20:16 +0100393 return res
394
Louis Verhaard3c07c972020-05-07 08:12:58 +0200395 def copy_compressed_weight_info(self, src_tens):
396 # Copies compressed values + all related weight compression info from the given tensor
397 self.compressed_values = src_tens.compressed_values
Tim Hallf7e810a2020-06-25 15:04:31 +0100398 self.compressed_values_substream_offsets = src_tens.compressed_values_substream_offsets
Louis Verhaard3c07c972020-05-07 08:12:58 +0200399 self.storage_shape = src_tens.storage_shape
400 self.brick_size = src_tens.brick_size
401 self.weight_compression_scales = src_tens.weight_compression_scales
402 self.weight_compressed_offsets = src_tens.weight_compressed_offsets
403 self.weight_transpose_depthwise = src_tens.weight_transpose_depthwise
404 self.compression_scale_for_worst_weight_stream = src_tens.compression_scale_for_worst_weight_stream
405 self.storage_compression_scale = src_tens.storage_compression_scale
406 self.block_traversal = src_tens.block_traversal
407 self.weight_compression_config = src_tens.weight_compression_config
408
Tim Hall79d07d22020-04-27 18:20:16 +0100409 def set_format(self, fmt, arch):
410 self.format = fmt
411 shape_len = 0
412 try:
413 shape_len = len(self.shape)
414 except TypeError:
415 pass
416
417 self.storage_rounding_quantum = arch.storage_rounding_quantums[self.format]
418 self.storage_rounding_quantum = self.storage_rounding_quantum[-shape_len:]
Tim Hall79d07d22020-04-27 18:20:16 +0100419 self.brick_size = arch.brick_sizes[self.format]
420 self.brick_size = self.brick_size[-shape_len:]
421 if self.shape is None:
422 return
423
424 self.bandwidth_shape = shape_round_to_quantum(self.shape, self.brick_size)
425 self.storage_shape = shape_round_to_quantum(self.shape, self.storage_rounding_quantum)
426
427 if fmt == TensorFormat.WeightsCompressed:
428 compression_ratio = 5 / 8
429 self.storage_compression_scale = compression_ratio
430 self.bandwidth_compression_scale = compression_ratio
431 self.compression_scale_for_worst_weight_stream = compression_ratio
432
433 def storage_elements(self):
434 elems = shape_num_elements(self.storage_shape)
435 if elems is None:
436 return 0
437 return elems
438
439 def elements(self):
440 elems = shape_num_elements(self.shape)
441 if elems is None:
442 return 0
443 return elems
444
445 def has_fully_defined_shape(self):
446 return shape_fully_defined(self.shape)
447
448 def storage_size(self):
449 raw_size = self.storage_elements() * self.element_size()
450 if raw_size == 0:
451 raw_size = 1 # force it to take up space
452 rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment)
453 return rounded_size
454
455 def storage_size_for_sub_purpose(self, sub_purpose, param_a=None, param_b=None):
456 alt_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b)
457 elems = shape_num_elements(alt_shape)
458 if elems is None:
459 return 0
460 if sub_purpose == TensorSubPurpose.DoubleBuffer:
461 raw_size = elems * self.element_size() * self.compression_scale_for_worst_weight_stream
462 else:
463 raw_size = elems * self.element_size() * self.storage_compression_scale
464 rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment)
465 return rounded_size
466
467 def storage_shape_for_sub_purpose(self, sub_purpose, param_a, param_b):
Tim Hall79d07d22020-04-27 18:20:16 +0100468 if sub_purpose == TensorSubPurpose.DoubleBuffer:
Jacob Bohline843d332020-06-23 12:12:56 +0200469 shp = list(self.shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100470 assert len(shp) >= 2
471 shp[-1] = min(shp[-1], param_a * 2)
Tim Hall79d07d22020-04-27 18:20:16 +0100472 else:
Jacob Bohline843d332020-06-23 12:12:56 +0200473 shp = list(self.storage_shape)
474 if sub_purpose == TensorSubPurpose.RollingBufferX:
475 assert len(shp) == 4
476 shp[0] = 1
477 shp[2] = min(shp[2], param_a)
478 elif sub_purpose == TensorSubPurpose.RollingBufferY:
479 assert len(shp) == 4
480 shp[0] = 1
481 shp[1] = min(shp[1], param_a)
482 elif sub_purpose == TensorSubPurpose.RollingBufferXY:
483 assert len(shp) == 4
484 shp[0] = 1
485 shp[2] = min(shp[2], param_a)
486 shp[1] = min(shp[1], param_b)
487 elif sub_purpose == TensorSubPurpose.Standard:
488 pass
489 else:
490 assert 0, "did not expect new sub purpose %s" % (sub_purpose,)
491
Tim Hall79d07d22020-04-27 18:20:16 +0100492 return shp
493
494 def set_new_sub_purpose(self, sub_purpose, param_a=None, param_b=None):
495 self.storage_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b)
496 self.sub_purpose = sub_purpose
497 if sub_purpose == TensorSubPurpose.DoubleBuffer:
498 self.storage_compression_scale = self.compression_scale_for_worst_weight_stream
499
500 def bandwidth(self):
501 elems = shape_num_elements(self.bandwidth_shape)
502 if elems is None:
503 return 0
504 return elems * self.element_size() * self.bandwidth_compression_scale
505
506 def consumers(self):
507 return self.consumer_list
508
509 def get_address_ranges_for_coordinates(self, start_coord, end_coord):
510 if self.sub_purpose in set(
511 (TensorSubPurpose.RollingBufferX, TensorSubPurpose.RollingBufferY, TensorSubPurpose.RollingBufferXY)
512 ):
513 # build dummy coordinates that cover the entire buffer
514 start_coord = [0] * len(start_coord)
515 end_coord = [min(self.storage_shape[i], self.shape[i]) for i in range(len(end_coord))]
516
517 start = self.address_for_coordinate(start_coord, is_top_box=False)
518 end = self.address_for_coordinate(end_coord, is_top_box=True)
519 return MemoryRangeSet(self.mem_area, start, end)
520
521 def addresses_for_rolling_buffer(self, start_coord, end_coord):
522 # returns ( box_height0, box_height1, box_width, [address_tl, address_tr, address_bl, address_br] )
523
524 if len(start_coord) < 4:
525 box_height0 = 1
526 box_width = 1
527
528 if len(start_coord) >= 2:
529 box_width = end_coord[-2] - start_coord[-2]
530
531 return box_height0, box_height0, box_width, [self.address_for_coordinate(start_coord), None, None, None]
532
533 crossing_y = numeric_util.round_up(start_coord[1] + 1, self.storage_shape[1])
534 crossing_x = numeric_util.round_up(start_coord[2] + 1, self.storage_shape[2])
535
536 crossing_y = min(crossing_y, end_coord[1])
537 crossing_x = min(crossing_x, end_coord[2])
538
539 box_height0 = crossing_y - start_coord[1]
540 box_width = crossing_x - start_coord[2]
541
542 addresses = [None] * 4
543 addresses[0] = self.address_for_coordinate(start_coord)
544
545 if end_coord[2] > crossing_x:
546 addresses[1] = self.address_for_coordinate([start_coord[0], start_coord[1], crossing_x, start_coord[3]])
547 raise Exception("Striping in vertical direction is not supported")
548 if end_coord[1] > crossing_y:
549 addresses[2] = self.address_for_coordinate([start_coord[0], crossing_y, start_coord[2], start_coord[3]])
550 if end_coord[1] > crossing_y and end_coord[2] > crossing_x:
551 addresses[3] = self.address_for_coordinate([start_coord[0], crossing_y, crossing_x, start_coord[3]])
552
553 return box_height0, box_height0, box_width, addresses
554
555 def address_for_coordinate(self, coord, is_top_box=False):
556 return self.address + self.address_offset_for_coordinate(coord, is_top_box)
557
558 def get_strides_and_coord(self, coord=None):
559 if coord is None:
560 coord = [0] * len(self.storage_shape)
561
562 augmented_coord = coord
563 augmented_shape = self.storage_shape
564 while len(augmented_shape) < 4:
565 augmented_shape = [1] + augmented_shape
566
567 while len(augmented_coord) < 4:
568 augmented_coord = [0] + augmented_coord
569
570 assert len(augmented_coord) == len(augmented_shape)
571
572 if self.format == TensorFormat.NHWC:
573 augmented_shape = [augmented_shape[0], augmented_shape[3]] + augmented_shape[1:3] + [1]
574 augmented_coord = [augmented_coord[0], augmented_coord[3]] + augmented_coord[1:3] + [0]
575 stride_order = [4, 1, 3, 2, 0]
576
577 elif self.format == TensorFormat.NHCWB16:
Patrik Gustavsson2213e902020-05-05 17:49:35 +0200578 channel_divisor = 16
Tim Hall79d07d22020-04-27 18:20:16 +0100579 augmented_shape = augmented_shape[0:4] + [1]
580 augmented_coord = (
581 [augmented_coord[0], augmented_coord[3] // channel_divisor]
582 + augmented_coord[1:3]
583 + [augmented_coord[3] % channel_divisor]
584 )
585
586 if augmented_shape[1] == 0:
587 augmented_shape[1] = 1
588
589 else:
590 assert self.format in set((TensorFormat.Unknown, TensorFormat.WeightsCompressed))
591 return None, None
592
593 strides = [0] * len(augmented_shape)
594 stride = self.element_size() * self.storage_compression_scale
595
596 if self.format != TensorFormat.NHCWB16:
597 for i in stride_order:
598 strides[i] = stride
599 stride *= augmented_shape[i]
600 else:
601 assert len(strides) == 5
Tim Hall79d07d22020-04-27 18:20:16 +0100602 strides[4] = stride
Patrik Gustavsson2213e902020-05-05 17:49:35 +0200603 strides[3] = 16 * stride # STRIDE_X
Tim Hall79d07d22020-04-27 18:20:16 +0100604 strides[1] = strides[3] * augmented_shape[2] # STRIDE_C
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200605 strides[2] = augmented_shape[2] * augmented_shape[3] * stride # STRIDE_Y
Tim Hall79d07d22020-04-27 18:20:16 +0100606 strides[0] = strides[2] * augmented_shape[1] # STRIDE_N
607
608 return strides, augmented_coord
609
610 def get_strides(self):
611 strides, _ = self.get_strides_and_coord()
612
613 return strides
614
Louis Verhaard3c07c972020-05-07 08:12:58 +0200615 def needs_dma(self):
616 return len(self.ops) == 1 and self.ops[0].type == "DMA"
617
618 def get_dma_src_tensor(self):
619 # For weight tensors that need DMA: returns the source tensor in Flash, else None
620 # Note: for DMA ops, Pass.weight_tensor is referring to the SRAM weight tensor
621 return self.ops[0].inputs[0] if self.needs_dma() else None
622
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200623 def find_npu_op(self):
624 # Returns the NPU operator that uses this tensor, excluding DMA operators.
625 for op in self.consumers():
626 if op.type == "DMA":
627 return op.outputs[0].find_npu_op()
628 if "npu_block_type" in op.attrs:
629 return op
630 return None
631
Tim Hall79d07d22020-04-27 18:20:16 +0100632 def compressed_stream_index_from_coord(self, coord):
633 assert self.format == TensorFormat.WeightsCompressed
634 assert len(self.compressed_values) > 0
635 assert len(self.compressed_values) + 1 == len(self.weight_compressed_offsets)
636
637 depth = coord[-1]
638 brick_depth = self.brick_size[-1]
639 # Clamp position at final element index
640 if depth > self.shape[-1]:
641 depth = self.shape[-1]
642
643 # Always round up to next boundary
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100644 index = numeric_util.round_up_divide(depth, brick_depth)
Tim Hall79d07d22020-04-27 18:20:16 +0100645
646 # Check boundaries on all but last weight set (which may be shorter
647 # than the brick we divided it up into)
648 if index < len(self.weight_compressed_offsets) - 1:
649 # There are no half-way points in the weights
650 if (depth % brick_depth) != 0:
651 raise Exception("Offset into weights must be aligned to a brick")
652
653 return index
654
655 def size_of_compressed_stream(self, index):
656 assert 0 <= index < len(self.compressed_values)
657 return len(self.compressed_values[index])
658
659 def is_last_index_in_compressed_stream(self, index):
660 assert 0 <= index < len(self.compressed_values)
661 return index == len(self.compressed_values) - 1
662
663 def address_offset_for_coordinate(self, orig_coord, is_top_box=False):
664 address_offset = 0
665 coord = orig_coord
666
667 coord = coord[-len(self.storage_shape) :]
668
669 if self.sub_purpose == TensorSubPurpose.Standard:
670 for idx, c in enumerate(coord):
671 if is_top_box:
672 assert c > 0 and c <= self.shape[idx]
673 else:
674 assert c >= 0 and c < self.shape[idx]
675
676 if self.format == TensorFormat.WeightsCompressed:
677 if len(self.weight_compressed_offsets) == 0:
678 return 0
679
Louis Verhaard3c07c972020-05-07 08:12:58 +0200680 if self.needs_dma() and self.sub_purpose == TensorSubPurpose.DoubleBuffer:
Tim Hall79d07d22020-04-27 18:20:16 +0100681 depth = orig_coord[-1]
682 brick_depth = self.brick_size[-1]
683 # Clamp position at final element index
684 if depth > self.shape[-1]:
685 depth = self.shape[-1]
686
687 # Always round up to next boundary
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100688 index = numeric_util.round_up_divide(depth, brick_depth)
Tim Hall79d07d22020-04-27 18:20:16 +0100689 index = index % 2
690
691 if len(self.compressed_values) <= 2:
692 if is_top_box and index == 0:
693 for cv in self.compressed_values:
694 address_offset += len(cv)
695 else:
696 address_offset = index * len(self.compressed_values[0])
697 else:
698 if is_top_box and index == 0:
699 address_offset = self.storage_shape[-1]
700 else:
701 address_offset = index * (self.storage_shape[-1] // 2)
702 else:
703 index = self.compressed_stream_index_from_coord(orig_coord)
704 assert index < len(self.weight_compressed_offsets)
705 address_offset = self.weight_compressed_offsets[index]
706 else:
707 if is_top_box:
708 coord = [c - 1 for c in coord]
709
710 # handle wraparound for partial buffers. make sure to do this after subtracting top box:
711 coord = [c % self.storage_shape[idx] for idx, c in enumerate(coord)]
712
713 strides, augmented_coord = self.get_strides_and_coord(coord)
714 if strides is None:
715 return None
716
717 if is_top_box:
718 address_offset += 1 * strides[-1] # one element
719
720 address_offset += np.dot(augmented_coord, strides)
721
722 assert address_offset >= 0
723 assert address_offset <= self.storage_size()
724 return address_offset
725
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200726 def is_allocated_in_tensor_arena(self, scratch_tensor_mem_area):
727 if self.mem_area == scratch_tensor_mem_area and (self.mem_type in set((MemType.Scratch, MemType.Scratch_fast))):
728 return True
729 return False
730
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100731 def set_all_shapes(self, shape):
732 self.shape = shape
733 self.storage_shape = shape
734 self.bandwidth_shape = shape
735
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100736 def get_full_shape(self):
737 d = len(self.shape)
738 if d in (1, 3):
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100739 return numeric_util.full_shape(4, self.shape, 1)
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100740 elif d == 2:
741 return [self.shape[0], 1, 1, self.shape[1]]
742 else:
743 return self.shape
744
Tim Hall79d07d22020-04-27 18:20:16 +0100745 def __str__(self):
746 return "<nng.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.shape, self.dtype)
747
748 __repr__ = __str__