Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame^] | 1 | # Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
| 16 | |
| 17 | |
| 18 | # Description: |
| 19 | # Internal representation of a Neural Network Tensor. |
| 20 | |
| 21 | import enum |
| 22 | from . import numeric_util |
| 23 | import numpy as np |
| 24 | from . import data_type |
| 25 | import uuid |
| 26 | from .range_set import MemoryRangeSet |
| 27 | from .numeric_util import round_up_divide |
| 28 | |
| 29 | |
| 30 | class MemArea(enum.IntFlag): |
| 31 | Unknown = 0 |
| 32 | Sram = 1 |
| 33 | Dram = 2 |
| 34 | OnChipFlash = 3 |
| 35 | OffChipFlash = 4 |
| 36 | Size = OffChipFlash + 1 |
| 37 | |
| 38 | def display_name(self): |
| 39 | return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "Size")[self.value] |
| 40 | |
| 41 | def identifier_name(self): |
| 42 | return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "size")[self.value] |
| 43 | |
| 44 | def all(): |
| 45 | return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash) |
| 46 | |
| 47 | def __str__(self): |
| 48 | return self.name |
| 49 | |
| 50 | |
| 51 | class TensorPurpose(enum.IntFlag): |
| 52 | Unknown = 0 |
| 53 | Weights = 1 |
| 54 | FeatureMap = 2 |
| 55 | Scratch = 3 |
| 56 | Size = 4 |
| 57 | |
| 58 | def display_name(self): |
| 59 | return ("Unknown", "Weights", "FeatureMap", "Scratch", "Size")[self.value] |
| 60 | |
| 61 | def identifier_name(self): |
| 62 | return ("unknown", "weights", "feature_map", "scratch", "size")[self.value] |
| 63 | |
| 64 | def all(): |
| 65 | return (TensorPurpose.Weights, TensorPurpose.FeatureMap) |
| 66 | |
| 67 | |
| 68 | class TensorSubPurpose(enum.Enum): |
| 69 | Standard = 0 |
| 70 | DoubleBuffer = 1 |
| 71 | RollingBufferX = 2 |
| 72 | RollingBufferY = 3 |
| 73 | RollingBufferXY = 4 |
| 74 | |
| 75 | def display_name(self): |
| 76 | return ("Standard", "Double Buffer", "Rolling Buffer X", "Rolling Buffer Y", "Rolling Buffer XY")[self.value] |
| 77 | |
| 78 | def identifier_name(self): |
| 79 | return ("standard", "double_buffer", "rolling_buffer_x", "rolling_buffer_y", "rolling_buffer_xy")[self.value] |
| 80 | |
| 81 | def all(): |
| 82 | return ( |
| 83 | TensorSubPurpose.Standard, |
| 84 | TensorSubPurpose.DoubleBuffer, |
| 85 | TensorSubPurpose.RollingBufferX, |
| 86 | TensorSubPurpose.RollingBufferY, |
| 87 | TensorSubPurpose.RollingBufferXY, |
| 88 | ) |
| 89 | |
| 90 | |
| 91 | class TensorFormat(enum.Flag): |
| 92 | Unknown = 0 |
| 93 | WeightsCompressed = 1 |
| 94 | NHWC = 2 |
| 95 | NHCWB16 = 3 |
| 96 | |
| 97 | def __str__(self): |
| 98 | return self.name |
| 99 | |
| 100 | |
| 101 | class TensorBlockTraversal(enum.Enum): |
| 102 | Default = 0 |
| 103 | DepthWise = 1 |
| 104 | DepthFirst = 2 |
| 105 | PartKernelFirst = 3 |
| 106 | |
| 107 | |
| 108 | def shape_num_elements(shp): |
| 109 | elems = 1 |
| 110 | if shp is None: |
| 111 | return None |
| 112 | for d in shp: |
| 113 | if d is None: |
| 114 | return None |
| 115 | elems *= d |
| 116 | return elems |
| 117 | |
| 118 | |
| 119 | def shape_fully_defined(shp): |
| 120 | if shp is None: |
| 121 | return False |
| 122 | for d in shp: |
| 123 | if d is None: |
| 124 | return False |
| 125 | return True |
| 126 | |
| 127 | |
| 128 | def shape_round_to_quantum(shp, quantum): |
| 129 | new_shp = list(shp) |
| 130 | |
| 131 | # Traverse backwards using length of shape since there may be more rounding quantums than shape elements |
| 132 | for i in range(-1, -len(shp) - 1, -1): |
| 133 | if new_shp[i] is not None: |
| 134 | new_shp[i] = numeric_util.round_up(new_shp[i], quantum[i]) |
| 135 | return new_shp |
| 136 | |
| 137 | |
| 138 | class QuantizationParameters: |
| 139 | __slots__ = "min", "max", "num_bits", "narrow_range", "scale_f32", "zero_point", "quant_min", "quant_max" |
| 140 | |
| 141 | def __init__(self, min=None, max=None, num_bits=None, narrow_range=None): |
| 142 | self.min = min |
| 143 | self.max = max |
| 144 | |
| 145 | self.num_bits = num_bits |
| 146 | self.narrow_range = narrow_range |
| 147 | |
| 148 | self.scale_f32 = None |
| 149 | self.zero_point = None |
| 150 | self.quant_min = None |
| 151 | self.quant_max = None |
| 152 | |
| 153 | def __str__(self): |
| 154 | return "<nng.QuantizationParameters min=%s max=%s, num_bits=%s, scale=%s, zero_point=%s>" % ( |
| 155 | self.min, |
| 156 | self.max, |
| 157 | self.num_bits, |
| 158 | self.scale_f32, |
| 159 | self.zero_point, |
| 160 | ) |
| 161 | |
| 162 | __repr__ = __str__ |
| 163 | |
| 164 | def clone(self): |
| 165 | res = QuantizationParameters() |
| 166 | res.min = self.min |
| 167 | res.max = self.max |
| 168 | |
| 169 | res.num_bits = self.num_bits |
| 170 | res.narrow_range = self.narrow_range |
| 171 | |
| 172 | res.scale_f32 = self.scale_f32 |
| 173 | res.zero_point = self.zero_point |
| 174 | res.quant_min = self.quant_min |
| 175 | res.quant_max = self.quant_max |
| 176 | return res |
| 177 | |
| 178 | def dequantize(self, values): |
| 179 | if self.zero_point.size == 1 and self.scale_f32.size == 1: |
| 180 | # same scale is used for all values |
| 181 | res = (values.astype(np.float64) - self.zero_point) * self.scale_f32 |
| 182 | else: |
| 183 | # a different scale is used for different sets of values |
| 184 | values_as_float = values.astype(np.float64) |
| 185 | |
| 186 | # this is not compatible with the format of depthwise weights, |
| 187 | # where input is at index 3 (Output, Kh, Kw, Input) |
| 188 | # return the quantized values |
| 189 | return np.ndarray((values_as_float.shape)) |
| 190 | |
| 191 | shape = values_as_float.shape[0] |
| 192 | assert self.zero_point.size == self.scale_f32.size == shape |
| 193 | res = np.ndarray(values_as_float.shape) |
| 194 | for i in range(shape): |
| 195 | res[i] = (values_as_float[i] - self.zero_point[i]) * self.scale_f32[i] |
| 196 | |
| 197 | return res |
| 198 | |
| 199 | |
| 200 | class Tensor: |
| 201 | __slots__ = ( |
| 202 | "shape", |
| 203 | "storage_shape", |
| 204 | "bandwidth_shape", |
| 205 | "dtype", |
| 206 | "name", |
| 207 | "ops", |
| 208 | "consumer_list", |
| 209 | "values", |
| 210 | "quant_values", |
| 211 | "compressed_values", |
| 212 | "mem_area", |
| 213 | "format", |
| 214 | "purpose", |
| 215 | "sub_purpose", |
| 216 | "alignment", |
| 217 | "weight_transpose_depthwise", |
| 218 | "storage_compression_scale", |
| 219 | "bandwidth_compression_scale", |
| 220 | "compression_scale_for_worst_weight_stream", |
| 221 | "weight_compression_scales", |
| 222 | "weight_compression_config", |
| 223 | "storage_rounding_quantum", |
| 224 | "brick_size", |
| 225 | "address", |
| 226 | "quantization", |
| 227 | "weight_compressed_offsets", |
| 228 | "element_size_bytes", |
| 229 | "reshaped", |
| 230 | "block_traversal", |
| 231 | "offset", |
| 232 | "cpu_tensor", |
| 233 | "npu_tensor", |
| 234 | "equivalence_id", |
| 235 | ) |
| 236 | AllocationQuantum = 16 |
| 237 | |
| 238 | def __init__(self, shape, dtype, name): |
| 239 | self.shape = shape |
| 240 | self.storage_shape = shape |
| 241 | self.bandwidth_shape = shape |
| 242 | self.dtype = dtype |
| 243 | self.name = name |
| 244 | self.equivalence_id = uuid.uuid4() |
| 245 | |
| 246 | self.ops = [] |
| 247 | self.consumer_list = [] |
| 248 | # Below attributes are only set if a tensor has been cloned, |
| 249 | # either from Cpu -> Npu or vice versa. Needed for offline allocation |
| 250 | self.cpu_tensor = None # reference to the corresponding Cpu tensor |
| 251 | self.npu_tensor = None # reference to the corresponding Npu tensor |
| 252 | |
| 253 | self.values = None |
| 254 | self.quant_values = None |
| 255 | self.compressed_values = None |
| 256 | self.mem_area = MemArea.Unknown |
| 257 | self.format = TensorFormat.Unknown |
| 258 | self.purpose = TensorPurpose.Unknown |
| 259 | self.sub_purpose = TensorSubPurpose.Standard |
| 260 | self.alignment = Tensor.AllocationQuantum |
| 261 | self.weight_transpose_depthwise = False |
| 262 | |
| 263 | self.storage_compression_scale = 1.0 |
| 264 | self.bandwidth_compression_scale = 1.0 |
| 265 | self.compression_scale_for_worst_weight_stream = 1.0 |
| 266 | self.weight_compression_scales = None |
| 267 | self.weight_compression_config = None |
| 268 | self.weight_compressed_offsets = [] |
| 269 | self.storage_rounding_quantum = (1, 1, 1, 1) |
| 270 | self.brick_size = (1, 1, 1, 1) |
| 271 | self.address = 0 # start address of tensor. will be filled in by tensor allocator |
| 272 | self.element_size_bytes = 0 |
| 273 | |
| 274 | # quantization parameters |
| 275 | self.quantization = None |
| 276 | |
| 277 | self.reshaped = False |
| 278 | self.block_traversal = TensorBlockTraversal.Default |
| 279 | |
| 280 | def element_size(self): |
| 281 | if self.element_size_bytes == 0: |
| 282 | return self.dtype.size_in_bits() / 8 |
| 283 | return self.element_size_bytes |
| 284 | |
| 285 | def clone(self, suffix="_clone"): |
| 286 | res = Tensor(self.shape, self.dtype, self.name + suffix) |
| 287 | res.storage_shape = list(self.storage_shape) |
| 288 | res.bandwidth_shape = list(self.bandwidth_shape) |
| 289 | |
| 290 | res.ops = [] |
| 291 | res.consumer_list = [] |
| 292 | res.equivalence_id = self.equivalence_id |
| 293 | |
| 294 | res.values = self.values |
| 295 | res.quant_values = self.quant_values |
| 296 | res.compressed_values = self.compressed_values |
| 297 | res.mem_area = self.mem_area |
| 298 | res.format = self.format |
| 299 | res.purpose = self.purpose |
| 300 | res.sub_purpose = self.sub_purpose |
| 301 | res.alignment = self.alignment |
| 302 | res.weight_transpose_depthwise = self.weight_transpose_depthwise |
| 303 | |
| 304 | res.storage_compression_scale = self.storage_compression_scale |
| 305 | res.bandwidth_compression_scale = self.bandwidth_compression_scale |
| 306 | res.compression_scale_for_worst_weight_stream = self.compression_scale_for_worst_weight_stream |
| 307 | res.weight_compression_scales = self.weight_compression_scales |
| 308 | res.storage_rounding_quantum = self.storage_rounding_quantum |
| 309 | res.brick_size = self.brick_size |
| 310 | res.address = 0 |
| 311 | |
| 312 | if self.quantization is not None: |
| 313 | res.quantization = self.quantization.clone() |
| 314 | else: |
| 315 | res.quantization = None |
| 316 | |
| 317 | return res |
| 318 | |
| 319 | def clone_into_fast_storage(self, arch): |
| 320 | res = self.clone(suffix="_fast_storage") |
| 321 | res.mem_area = arch.fast_storage_mem_area |
| 322 | return res |
| 323 | |
| 324 | def set_format(self, fmt, arch): |
| 325 | self.format = fmt |
| 326 | shape_len = 0 |
| 327 | try: |
| 328 | shape_len = len(self.shape) |
| 329 | except TypeError: |
| 330 | pass |
| 331 | |
| 332 | self.storage_rounding_quantum = arch.storage_rounding_quantums[self.format] |
| 333 | self.storage_rounding_quantum = self.storage_rounding_quantum[-shape_len:] |
| 334 | if self.format == TensorFormat.NHCWB16: |
| 335 | self.storage_rounding_quantum = self.storage_rounding_quantum[:-1] + ( |
| 336 | int(self.storage_rounding_quantum[-1] / self.dtype.size_in_bytes()), |
| 337 | ) |
| 338 | self.brick_size = arch.brick_sizes[self.format] |
| 339 | self.brick_size = self.brick_size[-shape_len:] |
| 340 | if self.shape is None: |
| 341 | return |
| 342 | |
| 343 | self.bandwidth_shape = shape_round_to_quantum(self.shape, self.brick_size) |
| 344 | self.storage_shape = shape_round_to_quantum(self.shape, self.storage_rounding_quantum) |
| 345 | |
| 346 | if fmt == TensorFormat.WeightsCompressed: |
| 347 | compression_ratio = 5 / 8 |
| 348 | self.storage_compression_scale = compression_ratio |
| 349 | self.bandwidth_compression_scale = compression_ratio |
| 350 | self.compression_scale_for_worst_weight_stream = compression_ratio |
| 351 | |
| 352 | def storage_elements(self): |
| 353 | elems = shape_num_elements(self.storage_shape) |
| 354 | if elems is None: |
| 355 | return 0 |
| 356 | return elems |
| 357 | |
| 358 | def elements(self): |
| 359 | elems = shape_num_elements(self.shape) |
| 360 | if elems is None: |
| 361 | return 0 |
| 362 | return elems |
| 363 | |
| 364 | def has_fully_defined_shape(self): |
| 365 | return shape_fully_defined(self.shape) |
| 366 | |
| 367 | def storage_size(self): |
| 368 | raw_size = self.storage_elements() * self.element_size() |
| 369 | if raw_size == 0: |
| 370 | raw_size = 1 # force it to take up space |
| 371 | rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment) |
| 372 | return rounded_size |
| 373 | |
| 374 | def storage_size_for_sub_purpose(self, sub_purpose, param_a=None, param_b=None): |
| 375 | alt_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b) |
| 376 | elems = shape_num_elements(alt_shape) |
| 377 | if elems is None: |
| 378 | return 0 |
| 379 | if sub_purpose == TensorSubPurpose.DoubleBuffer: |
| 380 | raw_size = elems * self.element_size() * self.compression_scale_for_worst_weight_stream |
| 381 | else: |
| 382 | raw_size = elems * self.element_size() * self.storage_compression_scale |
| 383 | rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment) |
| 384 | return rounded_size |
| 385 | |
| 386 | def storage_shape_for_sub_purpose(self, sub_purpose, param_a, param_b): |
| 387 | shp = list(self.storage_shape) |
| 388 | if sub_purpose == TensorSubPurpose.DoubleBuffer: |
| 389 | assert len(shp) >= 2 |
| 390 | shp[-1] = min(shp[-1], param_a * 2) |
| 391 | elif sub_purpose == TensorSubPurpose.RollingBufferX: |
| 392 | assert len(shp) == 4 |
| 393 | shp[0] = 1 |
| 394 | shp[2] = min(shp[2], param_a) |
| 395 | elif sub_purpose == TensorSubPurpose.RollingBufferY: |
| 396 | assert len(shp) == 4 |
| 397 | shp[0] = 1 |
| 398 | shp[1] = min(shp[1], param_a) |
| 399 | elif sub_purpose == TensorSubPurpose.RollingBufferXY: |
| 400 | assert len(shp) == 4 |
| 401 | shp[0] = 1 |
| 402 | shp[2] = min(shp[2], param_a) |
| 403 | shp[1] = min(shp[1], param_b) |
| 404 | elif sub_purpose == TensorSubPurpose.Standard: |
| 405 | pass |
| 406 | else: |
| 407 | assert 0, "did not expect new sub purpose %s" % (sub_purpose,) |
| 408 | return shp |
| 409 | |
| 410 | def set_new_sub_purpose(self, sub_purpose, param_a=None, param_b=None): |
| 411 | self.storage_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b) |
| 412 | self.sub_purpose = sub_purpose |
| 413 | if sub_purpose == TensorSubPurpose.DoubleBuffer: |
| 414 | self.storage_compression_scale = self.compression_scale_for_worst_weight_stream |
| 415 | |
| 416 | def bandwidth(self): |
| 417 | elems = shape_num_elements(self.bandwidth_shape) |
| 418 | if elems is None: |
| 419 | return 0 |
| 420 | return elems * self.element_size() * self.bandwidth_compression_scale |
| 421 | |
| 422 | def consumers(self): |
| 423 | return self.consumer_list |
| 424 | |
| 425 | def get_address_ranges_for_coordinates(self, start_coord, end_coord): |
| 426 | if self.sub_purpose in set( |
| 427 | (TensorSubPurpose.RollingBufferX, TensorSubPurpose.RollingBufferY, TensorSubPurpose.RollingBufferXY) |
| 428 | ): |
| 429 | # build dummy coordinates that cover the entire buffer |
| 430 | start_coord = [0] * len(start_coord) |
| 431 | end_coord = [min(self.storage_shape[i], self.shape[i]) for i in range(len(end_coord))] |
| 432 | |
| 433 | start = self.address_for_coordinate(start_coord, is_top_box=False) |
| 434 | end = self.address_for_coordinate(end_coord, is_top_box=True) |
| 435 | return MemoryRangeSet(self.mem_area, start, end) |
| 436 | |
| 437 | def addresses_for_rolling_buffer(self, start_coord, end_coord): |
| 438 | # returns ( box_height0, box_height1, box_width, [address_tl, address_tr, address_bl, address_br] ) |
| 439 | |
| 440 | if len(start_coord) < 4: |
| 441 | box_height0 = 1 |
| 442 | box_width = 1 |
| 443 | |
| 444 | if len(start_coord) >= 2: |
| 445 | box_width = end_coord[-2] - start_coord[-2] |
| 446 | |
| 447 | return box_height0, box_height0, box_width, [self.address_for_coordinate(start_coord), None, None, None] |
| 448 | |
| 449 | crossing_y = numeric_util.round_up(start_coord[1] + 1, self.storage_shape[1]) |
| 450 | crossing_x = numeric_util.round_up(start_coord[2] + 1, self.storage_shape[2]) |
| 451 | |
| 452 | crossing_y = min(crossing_y, end_coord[1]) |
| 453 | crossing_x = min(crossing_x, end_coord[2]) |
| 454 | |
| 455 | box_height0 = crossing_y - start_coord[1] |
| 456 | box_width = crossing_x - start_coord[2] |
| 457 | |
| 458 | addresses = [None] * 4 |
| 459 | addresses[0] = self.address_for_coordinate(start_coord) |
| 460 | |
| 461 | if end_coord[2] > crossing_x: |
| 462 | addresses[1] = self.address_for_coordinate([start_coord[0], start_coord[1], crossing_x, start_coord[3]]) |
| 463 | raise Exception("Striping in vertical direction is not supported") |
| 464 | if end_coord[1] > crossing_y: |
| 465 | addresses[2] = self.address_for_coordinate([start_coord[0], crossing_y, start_coord[2], start_coord[3]]) |
| 466 | if end_coord[1] > crossing_y and end_coord[2] > crossing_x: |
| 467 | addresses[3] = self.address_for_coordinate([start_coord[0], crossing_y, crossing_x, start_coord[3]]) |
| 468 | |
| 469 | return box_height0, box_height0, box_width, addresses |
| 470 | |
| 471 | def address_for_coordinate(self, coord, is_top_box=False): |
| 472 | return self.address + self.address_offset_for_coordinate(coord, is_top_box) |
| 473 | |
| 474 | def get_strides_and_coord(self, coord=None): |
| 475 | if coord is None: |
| 476 | coord = [0] * len(self.storage_shape) |
| 477 | |
| 478 | augmented_coord = coord |
| 479 | augmented_shape = self.storage_shape |
| 480 | while len(augmented_shape) < 4: |
| 481 | augmented_shape = [1] + augmented_shape |
| 482 | |
| 483 | while len(augmented_coord) < 4: |
| 484 | augmented_coord = [0] + augmented_coord |
| 485 | |
| 486 | assert len(augmented_coord) == len(augmented_shape) |
| 487 | |
| 488 | if self.format == TensorFormat.NHWC: |
| 489 | augmented_shape = [augmented_shape[0], augmented_shape[3]] + augmented_shape[1:3] + [1] |
| 490 | augmented_coord = [augmented_coord[0], augmented_coord[3]] + augmented_coord[1:3] + [0] |
| 491 | stride_order = [4, 1, 3, 2, 0] |
| 492 | |
| 493 | elif self.format == TensorFormat.NHCWB16: |
| 494 | channel_divisor = int(16 / self.element_size()) |
| 495 | augmented_shape = augmented_shape[0:4] + [1] |
| 496 | augmented_coord = ( |
| 497 | [augmented_coord[0], augmented_coord[3] // channel_divisor] |
| 498 | + augmented_coord[1:3] |
| 499 | + [augmented_coord[3] % channel_divisor] |
| 500 | ) |
| 501 | |
| 502 | if augmented_shape[1] == 0: |
| 503 | augmented_shape[1] = 1 |
| 504 | |
| 505 | else: |
| 506 | assert self.format in set((TensorFormat.Unknown, TensorFormat.WeightsCompressed)) |
| 507 | return None, None |
| 508 | |
| 509 | strides = [0] * len(augmented_shape) |
| 510 | stride = self.element_size() * self.storage_compression_scale |
| 511 | |
| 512 | if self.format != TensorFormat.NHCWB16: |
| 513 | for i in stride_order: |
| 514 | strides[i] = stride |
| 515 | stride *= augmented_shape[i] |
| 516 | else: |
| 517 | assert len(strides) == 5 |
| 518 | channel_divisor = int(16 / self.element_size()) |
| 519 | strides[4] = stride |
| 520 | strides[3] = channel_divisor # STRIDE_X |
| 521 | strides[1] = strides[3] * augmented_shape[2] # STRIDE_C |
| 522 | strides[2] = augmented_shape[2] * augmented_shape[3] # STRIDE_Y |
| 523 | strides[0] = strides[2] * augmented_shape[1] # STRIDE_N |
| 524 | |
| 525 | return strides, augmented_coord |
| 526 | |
| 527 | def get_strides(self): |
| 528 | strides, _ = self.get_strides_and_coord() |
| 529 | |
| 530 | return strides |
| 531 | |
| 532 | def compressed_stream_index_from_coord(self, coord): |
| 533 | assert self.format == TensorFormat.WeightsCompressed |
| 534 | assert len(self.compressed_values) > 0 |
| 535 | assert len(self.compressed_values) + 1 == len(self.weight_compressed_offsets) |
| 536 | |
| 537 | depth = coord[-1] |
| 538 | brick_depth = self.brick_size[-1] |
| 539 | # Clamp position at final element index |
| 540 | if depth > self.shape[-1]: |
| 541 | depth = self.shape[-1] |
| 542 | |
| 543 | # Always round up to next boundary |
| 544 | index = round_up_divide(depth, brick_depth) |
| 545 | |
| 546 | # Check boundaries on all but last weight set (which may be shorter |
| 547 | # than the brick we divided it up into) |
| 548 | if index < len(self.weight_compressed_offsets) - 1: |
| 549 | # There are no half-way points in the weights |
| 550 | if (depth % brick_depth) != 0: |
| 551 | raise Exception("Offset into weights must be aligned to a brick") |
| 552 | |
| 553 | return index |
| 554 | |
| 555 | def size_of_compressed_stream(self, index): |
| 556 | assert 0 <= index < len(self.compressed_values) |
| 557 | return len(self.compressed_values[index]) |
| 558 | |
| 559 | def is_last_index_in_compressed_stream(self, index): |
| 560 | assert 0 <= index < len(self.compressed_values) |
| 561 | return index == len(self.compressed_values) - 1 |
| 562 | |
| 563 | def address_offset_for_coordinate(self, orig_coord, is_top_box=False): |
| 564 | address_offset = 0 |
| 565 | coord = orig_coord |
| 566 | |
| 567 | coord = coord[-len(self.storage_shape) :] |
| 568 | |
| 569 | if self.sub_purpose == TensorSubPurpose.Standard: |
| 570 | for idx, c in enumerate(coord): |
| 571 | if is_top_box: |
| 572 | assert c > 0 and c <= self.shape[idx] |
| 573 | else: |
| 574 | assert c >= 0 and c < self.shape[idx] |
| 575 | |
| 576 | if self.format == TensorFormat.WeightsCompressed: |
| 577 | if len(self.weight_compressed_offsets) == 0: |
| 578 | return 0 |
| 579 | |
| 580 | if len(self.ops) == 1 and self.ops[0].type == "DMA" and self.sub_purpose == TensorSubPurpose.DoubleBuffer: |
| 581 | depth = orig_coord[-1] |
| 582 | brick_depth = self.brick_size[-1] |
| 583 | # Clamp position at final element index |
| 584 | if depth > self.shape[-1]: |
| 585 | depth = self.shape[-1] |
| 586 | |
| 587 | # Always round up to next boundary |
| 588 | index = round_up_divide(depth, brick_depth) |
| 589 | index = index % 2 |
| 590 | |
| 591 | if len(self.compressed_values) <= 2: |
| 592 | if is_top_box and index == 0: |
| 593 | for cv in self.compressed_values: |
| 594 | address_offset += len(cv) |
| 595 | else: |
| 596 | address_offset = index * len(self.compressed_values[0]) |
| 597 | else: |
| 598 | if is_top_box and index == 0: |
| 599 | address_offset = self.storage_shape[-1] |
| 600 | else: |
| 601 | address_offset = index * (self.storage_shape[-1] // 2) |
| 602 | else: |
| 603 | index = self.compressed_stream_index_from_coord(orig_coord) |
| 604 | assert index < len(self.weight_compressed_offsets) |
| 605 | address_offset = self.weight_compressed_offsets[index] |
| 606 | else: |
| 607 | if is_top_box: |
| 608 | coord = [c - 1 for c in coord] |
| 609 | |
| 610 | # handle wraparound for partial buffers. make sure to do this after subtracting top box: |
| 611 | coord = [c % self.storage_shape[idx] for idx, c in enumerate(coord)] |
| 612 | |
| 613 | strides, augmented_coord = self.get_strides_and_coord(coord) |
| 614 | if strides is None: |
| 615 | return None |
| 616 | |
| 617 | if is_top_box: |
| 618 | address_offset += 1 * strides[-1] # one element |
| 619 | |
| 620 | address_offset += np.dot(augmented_coord, strides) |
| 621 | |
| 622 | assert address_offset >= 0 |
| 623 | assert address_offset <= self.storage_size() |
| 624 | return address_offset |
| 625 | |
| 626 | def __str__(self): |
| 627 | return "<nng.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.shape, self.dtype) |
| 628 | |
| 629 | __repr__ = __str__ |