Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1 | # Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 16 | # Description: |
| 17 | # Internal representation of a Neural Network Tensor. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 18 | import enum |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 19 | import uuid |
Jacob Bohlin | 1a66697 | 2020-09-11 10:04:15 +0200 | [diff] [blame] | 20 | from collections import defaultdict |
Louis Verhaard | 9db529a | 2020-09-23 10:27:11 +0200 | [diff] [blame] | 21 | from functools import lru_cache |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 22 | |
| 23 | import numpy as np |
| 24 | |
| 25 | from . import numeric_util |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 26 | from .data_type import BaseType |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 27 | from .data_type import DataType |
Dwight Lidman | a9390f7 | 2020-05-13 12:00:08 +0200 | [diff] [blame] | 28 | from .ethos_u55_regs.ethos_u55_regs import resampling_mode |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 29 | from .operation import Op |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 30 | from .operation import Operation |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 31 | from .range_set import MemoryRangeSet |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 32 | |
| 33 | |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 34 | class MemType(enum.IntFlag): |
| 35 | Unknown = 0 |
| 36 | Permanent_NPU = 1 |
| 37 | Permanent_CPU = 2 |
| 38 | Scratch = 3 |
| 39 | Scratch_fast = 4 |
| 40 | Size = Scratch_fast + 1 |
| 41 | |
| 42 | def display_name(self): |
| 43 | return ("Unknown", "Permanent_NPU", "Permanent_CPU", "Scratch", "Scratch_fast", "Size")[self.value] |
| 44 | |
| 45 | def identifier_name(self): |
| 46 | return ("unknown", "permanent_npu", "permanent_cpu", "scratch", "scratch_fast", "size")[self.value] |
| 47 | |
| 48 | def all(): |
| 49 | return (MemType.Permanent_NPU, MemType.Permanent_CPU, MemType.Scratch, MemType.Scratch_fast) |
| 50 | |
| 51 | def __str__(self): |
| 52 | return self.name |
| 53 | |
| 54 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 55 | class MemArea(enum.IntFlag): |
| 56 | Unknown = 0 |
| 57 | Sram = 1 |
| 58 | Dram = 2 |
| 59 | OnChipFlash = 3 |
| 60 | OffChipFlash = 4 |
Louis Verhaard | 0b8268a | 2020-08-05 16:11:29 +0200 | [diff] [blame] | 61 | Shram = 5 # for LUT |
| 62 | Size = Shram + 1 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 63 | |
| 64 | def display_name(self): |
Louis Verhaard | 0b8268a | 2020-08-05 16:11:29 +0200 | [diff] [blame] | 65 | return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "SHRAM", "Size")[self.value] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 66 | |
| 67 | def identifier_name(self): |
Louis Verhaard | 0b8268a | 2020-08-05 16:11:29 +0200 | [diff] [blame] | 68 | return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "shram", "size")[self.value] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 69 | |
| 70 | def all(): |
Louis Verhaard | 0b8268a | 2020-08-05 16:11:29 +0200 | [diff] [blame] | 71 | return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash, MemArea.Shram) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 72 | |
| 73 | def __str__(self): |
| 74 | return self.name |
| 75 | |
| 76 | |
| 77 | class TensorPurpose(enum.IntFlag): |
| 78 | Unknown = 0 |
| 79 | Weights = 1 |
| 80 | FeatureMap = 2 |
| 81 | Scratch = 3 |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 82 | LUT = 4 |
| 83 | Size = 5 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 84 | |
| 85 | def display_name(self): |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 86 | return ("Unknown", "Weights", "FeatureMap", "Scratch", "LUT", "Size")[self.value] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 87 | |
| 88 | def identifier_name(self): |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 89 | return ("unknown", "weights", "feature_map", "scratch", "lut", "size")[self.value] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 90 | |
| 91 | def all(): |
| 92 | return (TensorPurpose.Weights, TensorPurpose.FeatureMap) |
| 93 | |
| 94 | |
| 95 | class TensorSubPurpose(enum.Enum): |
| 96 | Standard = 0 |
| 97 | DoubleBuffer = 1 |
| 98 | RollingBufferX = 2 |
| 99 | RollingBufferY = 3 |
| 100 | RollingBufferXY = 4 |
| 101 | |
| 102 | def display_name(self): |
| 103 | return ("Standard", "Double Buffer", "Rolling Buffer X", "Rolling Buffer Y", "Rolling Buffer XY")[self.value] |
| 104 | |
| 105 | def identifier_name(self): |
| 106 | return ("standard", "double_buffer", "rolling_buffer_x", "rolling_buffer_y", "rolling_buffer_xy")[self.value] |
| 107 | |
| 108 | def all(): |
| 109 | return ( |
| 110 | TensorSubPurpose.Standard, |
| 111 | TensorSubPurpose.DoubleBuffer, |
| 112 | TensorSubPurpose.RollingBufferX, |
| 113 | TensorSubPurpose.RollingBufferY, |
| 114 | TensorSubPurpose.RollingBufferXY, |
| 115 | ) |
| 116 | |
| 117 | |
| 118 | class TensorFormat(enum.Flag): |
| 119 | Unknown = 0 |
| 120 | WeightsCompressed = 1 |
| 121 | NHWC = 2 |
| 122 | NHCWB16 = 3 |
| 123 | |
| 124 | def __str__(self): |
| 125 | return self.name |
| 126 | |
| 127 | |
| 128 | class TensorBlockTraversal(enum.Enum): |
| 129 | Default = 0 |
| 130 | DepthWise = 1 |
| 131 | DepthFirst = 2 |
| 132 | PartKernelFirst = 3 |
| 133 | |
| 134 | |
| 135 | def shape_num_elements(shp): |
| 136 | elems = 1 |
| 137 | if shp is None: |
| 138 | return None |
| 139 | for d in shp: |
| 140 | if d is None: |
| 141 | return None |
| 142 | elems *= d |
| 143 | return elems |
| 144 | |
| 145 | |
| 146 | def shape_fully_defined(shp): |
| 147 | if shp is None: |
| 148 | return False |
| 149 | for d in shp: |
| 150 | if d is None: |
| 151 | return False |
| 152 | return True |
| 153 | |
| 154 | |
| 155 | def shape_round_to_quantum(shp, quantum): |
| 156 | new_shp = list(shp) |
| 157 | |
| 158 | # Traverse backwards using length of shape since there may be more rounding quantums than shape elements |
| 159 | for i in range(-1, -len(shp) - 1, -1): |
| 160 | if new_shp[i] is not None: |
| 161 | new_shp[i] = numeric_util.round_up(new_shp[i], quantum[i]) |
| 162 | return new_shp |
| 163 | |
| 164 | |
Louis Verhaard | 9db529a | 2020-09-23 10:27:11 +0200 | [diff] [blame] | 165 | @lru_cache(maxsize=None) |
| 166 | def create_equivalence_id(key): |
| 167 | # Generates equivalence_id based on the given key. |
| 168 | return uuid.uuid4() |
| 169 | |
| 170 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 171 | class QuantizationParameters: |
| 172 | __slots__ = "min", "max", "num_bits", "narrow_range", "scale_f32", "zero_point", "quant_min", "quant_max" |
| 173 | |
| 174 | def __init__(self, min=None, max=None, num_bits=None, narrow_range=None): |
| 175 | self.min = min |
| 176 | self.max = max |
| 177 | |
| 178 | self.num_bits = num_bits |
| 179 | self.narrow_range = narrow_range |
| 180 | |
| 181 | self.scale_f32 = None |
| 182 | self.zero_point = None |
| 183 | self.quant_min = None |
| 184 | self.quant_max = None |
| 185 | |
| 186 | def __str__(self): |
| 187 | return "<nng.QuantizationParameters min=%s max=%s, num_bits=%s, scale=%s, zero_point=%s>" % ( |
| 188 | self.min, |
| 189 | self.max, |
| 190 | self.num_bits, |
| 191 | self.scale_f32, |
| 192 | self.zero_point, |
| 193 | ) |
| 194 | |
| 195 | __repr__ = __str__ |
| 196 | |
| 197 | def clone(self): |
| 198 | res = QuantizationParameters() |
| 199 | res.min = self.min |
| 200 | res.max = self.max |
| 201 | |
| 202 | res.num_bits = self.num_bits |
| 203 | res.narrow_range = self.narrow_range |
| 204 | |
| 205 | res.scale_f32 = self.scale_f32 |
| 206 | res.zero_point = self.zero_point |
| 207 | res.quant_min = self.quant_min |
| 208 | res.quant_max = self.quant_max |
| 209 | return res |
| 210 | |
| 211 | def dequantize(self, values): |
| 212 | if self.zero_point.size == 1 and self.scale_f32.size == 1: |
| 213 | # same scale is used for all values |
| 214 | res = (values.astype(np.float64) - self.zero_point) * self.scale_f32 |
| 215 | else: |
| 216 | # a different scale is used for different sets of values |
| 217 | values_as_float = values.astype(np.float64) |
| 218 | |
| 219 | # this is not compatible with the format of depthwise weights, |
| 220 | # where input is at index 3 (Output, Kh, Kw, Input) |
| 221 | # return the quantized values |
| 222 | return np.ndarray((values_as_float.shape)) |
| 223 | |
| 224 | shape = values_as_float.shape[0] |
| 225 | assert self.zero_point.size == self.scale_f32.size == shape |
| 226 | res = np.ndarray(values_as_float.shape) |
| 227 | for i in range(shape): |
| 228 | res[i] = (values_as_float[i] - self.zero_point[i]) * self.scale_f32[i] |
| 229 | |
| 230 | return res |
| 231 | |
Tim Hall | e3786ac | 2020-07-28 17:40:50 +0100 | [diff] [blame] | 232 | def is_scaling_equal(self, other): |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 233 | # quantisation parameter scaling is not equal if 'other' is None because |
| 234 | # it implies that the tensor it belongs to is not quantised. otherwise, |
| 235 | # it depends upon whether the scale and zero point are equal |
| 236 | |
| 237 | if other is None: |
Tim Hall | e3786ac | 2020-07-28 17:40:50 +0100 | [diff] [blame] | 238 | return False |
| 239 | |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 240 | assert isinstance(other, QuantizationParameters) |
| 241 | |
Tim Hall | e3786ac | 2020-07-28 17:40:50 +0100 | [diff] [blame] | 242 | return self.scale_f32 == other.scale_f32 and self.zero_point == other.zero_point |
| 243 | |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 244 | def is_valid(self): |
| 245 | # quantisation parameters are consider valid if they have a scale and zero point |
| 246 | |
| 247 | return None not in (self.scale_f32, self.zero_point) |
| 248 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 249 | |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 250 | def create_const_tensor(name, shape, dtype, values, value_dtype=None, purpose=TensorPurpose.Unknown, quantization=None): |
| 251 | # Tensor |
| 252 | const_tensor = Tensor(shape, dtype, name + "_0") |
| 253 | const_tensor.purpose = purpose |
| 254 | const_tensor.quantization = quantization |
| 255 | const_tensor.values = np.array(values, dtype=value_dtype) |
Jacob Bohlin | a41cd4d | 2020-08-26 18:21:28 +0200 | [diff] [blame] | 256 | const_tensor.quant_values = np.frombuffer(const_tensor.values.tobytes(), dtype=np.uint8) |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 257 | # Operator |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 258 | const_op = Operation(Op.Const, name) |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 259 | const_op.set_output_tensor(const_tensor) |
| 260 | return const_tensor |
| 261 | |
| 262 | |
| 263 | def create_reshape_tensor(tens, shape, ifm_reshape=True): |
| 264 | if shape == tens.shape: |
| 265 | return tens |
| 266 | # Tensors |
| 267 | name = tens.name + "_reshape" |
| 268 | reshape_ifm = tens |
| 269 | reshape_ofm = tens.clone("_reshaped") |
| 270 | reshape_ofm.set_all_shapes(shape) |
| 271 | if not ifm_reshape: |
| 272 | reshape_ifm, reshape_ofm = reshape_ofm, reshape_ifm |
| 273 | # Operator |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 274 | reshape_op = Operation(Op.Reshape, name) |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 275 | reshape_op.attrs["new_shape"] = shape |
| 276 | reshape_op.add_input_tensor(reshape_ifm) |
| 277 | reshape_op.add_input_tensor(create_const_tensor(name + "_shape", [1], DataType.int32, shape)) |
| 278 | reshape_op.set_output_tensor(reshape_ofm) |
| 279 | return reshape_ofm if ifm_reshape else reshape_ifm |
| 280 | |
| 281 | |
Jacob Bohlin | 1a66697 | 2020-09-11 10:04:15 +0200 | [diff] [blame] | 282 | # class that keeps track of all tensor addresses in the different memory types |
| 283 | class TensorAddressMap: |
| 284 | address_map = defaultdict(dict) # dict (tens.equivalence_id -> dict (mem_type -> address)) |
| 285 | |
| 286 | @classmethod |
| 287 | def get_address_for_tens(cls, tens_id, mem_type): |
| 288 | return cls.address_map[tens_id].get(mem_type) |
| 289 | |
| 290 | @classmethod |
| 291 | def set_address_for_tens(cls, tens_id, mem_type, address): |
| 292 | # Check previous address if there is one |
| 293 | previous_address = cls.address_map[tens_id].get(mem_type) |
Louis Verhaard | 0b9c9a3 | 2020-09-15 14:05:38 +0200 | [diff] [blame] | 294 | if address is not None and previous_address is not None: |
Jacob Bohlin | 1a66697 | 2020-09-11 10:04:15 +0200 | [diff] [blame] | 295 | assert previous_address == address, "Two different addresses cannot be assigned to the same tensor." |
| 296 | |
| 297 | # Set tensor's address for memory type |
| 298 | cls.address_map[tens_id][mem_type] = address |
| 299 | |
| 300 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 301 | class Tensor: |
| 302 | __slots__ = ( |
| 303 | "shape", |
| 304 | "storage_shape", |
| 305 | "bandwidth_shape", |
| 306 | "dtype", |
| 307 | "name", |
| 308 | "ops", |
| 309 | "consumer_list", |
| 310 | "values", |
| 311 | "quant_values", |
| 312 | "compressed_values", |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 313 | "compressed_values_substream_offsets", |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 314 | "mem_area", |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 315 | "mem_type", |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 316 | "format", |
| 317 | "purpose", |
| 318 | "sub_purpose", |
| 319 | "alignment", |
| 320 | "weight_transpose_depthwise", |
| 321 | "storage_compression_scale", |
| 322 | "bandwidth_compression_scale", |
| 323 | "compression_scale_for_worst_weight_stream", |
| 324 | "weight_compression_scales", |
| 325 | "weight_compression_config", |
Louis Verhaard | 9db529a | 2020-09-23 10:27:11 +0200 | [diff] [blame] | 326 | "value_id", |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 327 | "storage_rounding_quantum", |
| 328 | "brick_size", |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 329 | "quantization", |
| 330 | "weight_compressed_offsets", |
| 331 | "element_size_bytes", |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 332 | "block_traversal", |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 333 | "equivalence_id", |
Dwight Lidman | a9390f7 | 2020-05-13 12:00:08 +0200 | [diff] [blame] | 334 | "resampling_mode", |
Patrik Gustavsson | 458a208 | 2020-08-13 13:41:05 +0200 | [diff] [blame] | 335 | "avoid_NHCWB16", |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 336 | ) |
| 337 | AllocationQuantum = 16 |
| 338 | |
| 339 | def __init__(self, shape, dtype, name): |
| 340 | self.shape = shape |
| 341 | self.storage_shape = shape |
| 342 | self.bandwidth_shape = shape |
| 343 | self.dtype = dtype |
| 344 | self.name = name |
| 345 | self.equivalence_id = uuid.uuid4() |
| 346 | |
| 347 | self.ops = [] |
| 348 | self.consumer_list = [] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 349 | |
| 350 | self.values = None |
| 351 | self.quant_values = None |
| 352 | self.compressed_values = None |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 353 | self.compressed_values_substream_offsets = None |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 354 | self.mem_area = MemArea.Unknown |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 355 | self.mem_type = MemType.Unknown |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 356 | self.format = TensorFormat.Unknown |
| 357 | self.purpose = TensorPurpose.Unknown |
| 358 | self.sub_purpose = TensorSubPurpose.Standard |
| 359 | self.alignment = Tensor.AllocationQuantum |
| 360 | self.weight_transpose_depthwise = False |
| 361 | |
| 362 | self.storage_compression_scale = 1.0 |
| 363 | self.bandwidth_compression_scale = 1.0 |
| 364 | self.compression_scale_for_worst_weight_stream = 1.0 |
| 365 | self.weight_compression_scales = None |
Louis Verhaard | 9db529a | 2020-09-23 10:27:11 +0200 | [diff] [blame] | 366 | # if two tensors have the same weight_compression_config, then they have the same compressed values |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 367 | self.weight_compression_config = None |
Louis Verhaard | 9db529a | 2020-09-23 10:27:11 +0200 | [diff] [blame] | 368 | # if two tensors have the same value_id, then they have the same values |
| 369 | self.value_id = uuid.uuid4() |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 370 | self.weight_compressed_offsets = [] |
| 371 | self.storage_rounding_quantum = (1, 1, 1, 1) |
| 372 | self.brick_size = (1, 1, 1, 1) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 373 | self.element_size_bytes = 0 |
| 374 | |
| 375 | # quantization parameters |
| 376 | self.quantization = None |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 377 | self.block_traversal = TensorBlockTraversal.Default |
Dwight Lidman | a9390f7 | 2020-05-13 12:00:08 +0200 | [diff] [blame] | 378 | self.resampling_mode = resampling_mode.NONE |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 379 | |
Patrik Gustavsson | 458a208 | 2020-08-13 13:41:05 +0200 | [diff] [blame] | 380 | self.avoid_NHCWB16 = False |
| 381 | |
Jacob Bohlin | 1a66697 | 2020-09-11 10:04:15 +0200 | [diff] [blame] | 382 | @property |
| 383 | def address(self): |
| 384 | return TensorAddressMap.get_address_for_tens(self.equivalence_id, self.mem_type) |
| 385 | |
| 386 | @address.setter |
| 387 | def address(self, address): |
| 388 | TensorAddressMap.set_address_for_tens(self.equivalence_id, self.mem_type, address) |
| 389 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 390 | def element_size(self): |
| 391 | if self.element_size_bytes == 0: |
| 392 | return self.dtype.size_in_bits() / 8 |
| 393 | return self.element_size_bytes |
| 394 | |
| 395 | def clone(self, suffix="_clone"): |
| 396 | res = Tensor(self.shape, self.dtype, self.name + suffix) |
| 397 | res.storage_shape = list(self.storage_shape) |
| 398 | res.bandwidth_shape = list(self.bandwidth_shape) |
| 399 | |
| 400 | res.ops = [] |
| 401 | res.consumer_list = [] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 402 | |
| 403 | res.values = self.values |
| 404 | res.quant_values = self.quant_values |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 405 | res.mem_area = self.mem_area |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 406 | res.mem_type = self.mem_type |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 407 | res.format = self.format |
| 408 | res.purpose = self.purpose |
| 409 | res.sub_purpose = self.sub_purpose |
| 410 | res.alignment = self.alignment |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 411 | res.bandwidth_compression_scale = self.bandwidth_compression_scale |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 412 | res.storage_rounding_quantum = self.storage_rounding_quantum |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 413 | |
| 414 | if self.quantization is not None: |
| 415 | res.quantization = self.quantization.clone() |
| 416 | else: |
| 417 | res.quantization = None |
| 418 | |
Dwight Lidman | a9390f7 | 2020-05-13 12:00:08 +0200 | [diff] [blame] | 419 | res.resampling_mode = self.resampling_mode |
| 420 | |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 421 | res.copy_compressed_weight_info(self) |
Patrik Gustavsson | 458a208 | 2020-08-13 13:41:05 +0200 | [diff] [blame] | 422 | res.avoid_NHCWB16 = self.avoid_NHCWB16 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 423 | return res |
| 424 | |
| 425 | def clone_into_fast_storage(self, arch): |
| 426 | res = self.clone(suffix="_fast_storage") |
| 427 | res.mem_area = arch.fast_storage_mem_area |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 428 | res.mem_type = MemType.Scratch_fast |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 429 | return res |
| 430 | |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 431 | def copy_compressed_weight_info(self, src_tens): |
| 432 | # Copies compressed values + all related weight compression info from the given tensor |
Louis Verhaard | 9db529a | 2020-09-23 10:27:11 +0200 | [diff] [blame] | 433 | self.equivalence_id = src_tens.equivalence_id |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 434 | self.compressed_values = src_tens.compressed_values |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 435 | self.compressed_values_substream_offsets = src_tens.compressed_values_substream_offsets |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 436 | self.storage_shape = src_tens.storage_shape |
| 437 | self.brick_size = src_tens.brick_size |
| 438 | self.weight_compression_scales = src_tens.weight_compression_scales |
| 439 | self.weight_compressed_offsets = src_tens.weight_compressed_offsets |
| 440 | self.weight_transpose_depthwise = src_tens.weight_transpose_depthwise |
| 441 | self.compression_scale_for_worst_weight_stream = src_tens.compression_scale_for_worst_weight_stream |
| 442 | self.storage_compression_scale = src_tens.storage_compression_scale |
Diqing Zhong | 7e1d1d1 | 2020-10-30 15:10:46 +0100 | [diff] [blame] | 443 | self.bandwidth_compression_scale = src_tens.bandwidth_compression_scale |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 444 | self.block_traversal = src_tens.block_traversal |
| 445 | self.weight_compression_config = src_tens.weight_compression_config |
Louis Verhaard | 9db529a | 2020-09-23 10:27:11 +0200 | [diff] [blame] | 446 | self.value_id = src_tens.value_id |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 447 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 448 | def set_format(self, fmt, arch): |
| 449 | self.format = fmt |
| 450 | shape_len = 0 |
| 451 | try: |
| 452 | shape_len = len(self.shape) |
| 453 | except TypeError: |
| 454 | pass |
| 455 | |
| 456 | self.storage_rounding_quantum = arch.storage_rounding_quantums[self.format] |
| 457 | self.storage_rounding_quantum = self.storage_rounding_quantum[-shape_len:] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 458 | self.brick_size = arch.brick_sizes[self.format] |
| 459 | self.brick_size = self.brick_size[-shape_len:] |
| 460 | if self.shape is None: |
| 461 | return |
| 462 | |
| 463 | self.bandwidth_shape = shape_round_to_quantum(self.shape, self.brick_size) |
| 464 | self.storage_shape = shape_round_to_quantum(self.shape, self.storage_rounding_quantum) |
| 465 | |
| 466 | if fmt == TensorFormat.WeightsCompressed: |
| 467 | compression_ratio = 5 / 8 |
| 468 | self.storage_compression_scale = compression_ratio |
| 469 | self.bandwidth_compression_scale = compression_ratio |
| 470 | self.compression_scale_for_worst_weight_stream = compression_ratio |
| 471 | |
| 472 | def storage_elements(self): |
| 473 | elems = shape_num_elements(self.storage_shape) |
| 474 | if elems is None: |
| 475 | return 0 |
| 476 | return elems |
| 477 | |
| 478 | def elements(self): |
| 479 | elems = shape_num_elements(self.shape) |
| 480 | if elems is None: |
| 481 | return 0 |
| 482 | return elems |
| 483 | |
| 484 | def has_fully_defined_shape(self): |
| 485 | return shape_fully_defined(self.shape) |
| 486 | |
Patrik Gustavsson | 90831bc | 2020-08-24 16:26:11 +0200 | [diff] [blame] | 487 | def storage_size(self, scale=1.0): |
| 488 | raw_size = self.storage_elements() * self.element_size() * scale |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 489 | if raw_size == 0: |
| 490 | raw_size = 1 # force it to take up space |
| 491 | rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment) |
| 492 | return rounded_size |
| 493 | |
Patrik Gustavsson | 90831bc | 2020-08-24 16:26:11 +0200 | [diff] [blame] | 494 | def storage_size_for_sub_purpose(self, arch, sub_purpose, param_a=None, param_b=None): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 495 | alt_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b) |
| 496 | elems = shape_num_elements(alt_shape) |
| 497 | if elems is None: |
| 498 | return 0 |
| 499 | if sub_purpose == TensorSubPurpose.DoubleBuffer: |
Patrik Gustavsson | 90831bc | 2020-08-24 16:26:11 +0200 | [diff] [blame] | 500 | raw_size = ( |
| 501 | elems |
| 502 | * self.element_size() |
| 503 | * self.compression_scale_for_worst_weight_stream |
| 504 | * arch.weight_estimation_scaling |
| 505 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 506 | else: |
Patrik Gustavsson | 9baa4c3 | 2020-08-20 13:59:01 +0200 | [diff] [blame] | 507 | # Rolling buffers are used for intermediate data in ifm streaming |
| 508 | # These will all use the NHCWB16 format, and need to be aligned to 16 in the C-dimension |
| 509 | if alt_shape[-1] % 16 != 0: |
| 510 | nhcwb16_shape = alt_shape[0:-1] + [numeric_util.round_up(alt_shape[-1], 16)] |
| 511 | elems = shape_num_elements(nhcwb16_shape) |
| 512 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 513 | raw_size = elems * self.element_size() * self.storage_compression_scale |
| 514 | rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment) |
| 515 | return rounded_size |
| 516 | |
| 517 | def storage_shape_for_sub_purpose(self, sub_purpose, param_a, param_b): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 518 | if sub_purpose == TensorSubPurpose.DoubleBuffer: |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 519 | shp = list(self.shape) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 520 | assert len(shp) >= 2 |
| 521 | shp[-1] = min(shp[-1], param_a * 2) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 522 | else: |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 523 | shp = list(self.storage_shape) |
| 524 | if sub_purpose == TensorSubPurpose.RollingBufferX: |
| 525 | assert len(shp) == 4 |
| 526 | shp[0] = 1 |
| 527 | shp[2] = min(shp[2], param_a) |
| 528 | elif sub_purpose == TensorSubPurpose.RollingBufferY: |
| 529 | assert len(shp) == 4 |
| 530 | shp[0] = 1 |
| 531 | shp[1] = min(shp[1], param_a) |
| 532 | elif sub_purpose == TensorSubPurpose.RollingBufferXY: |
| 533 | assert len(shp) == 4 |
| 534 | shp[0] = 1 |
| 535 | shp[2] = min(shp[2], param_a) |
| 536 | shp[1] = min(shp[1], param_b) |
| 537 | elif sub_purpose == TensorSubPurpose.Standard: |
| 538 | pass |
| 539 | else: |
| 540 | assert 0, "did not expect new sub purpose %s" % (sub_purpose,) |
| 541 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 542 | return shp |
| 543 | |
| 544 | def set_new_sub_purpose(self, sub_purpose, param_a=None, param_b=None): |
| 545 | self.storage_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b) |
| 546 | self.sub_purpose = sub_purpose |
| 547 | if sub_purpose == TensorSubPurpose.DoubleBuffer: |
| 548 | self.storage_compression_scale = self.compression_scale_for_worst_weight_stream |
| 549 | |
| 550 | def bandwidth(self): |
| 551 | elems = shape_num_elements(self.bandwidth_shape) |
| 552 | if elems is None: |
| 553 | return 0 |
| 554 | return elems * self.element_size() * self.bandwidth_compression_scale |
| 555 | |
| 556 | def consumers(self): |
| 557 | return self.consumer_list |
| 558 | |
| 559 | def get_address_ranges_for_coordinates(self, start_coord, end_coord): |
| 560 | if self.sub_purpose in set( |
| 561 | (TensorSubPurpose.RollingBufferX, TensorSubPurpose.RollingBufferY, TensorSubPurpose.RollingBufferXY) |
| 562 | ): |
| 563 | # build dummy coordinates that cover the entire buffer |
| 564 | start_coord = [0] * len(start_coord) |
| 565 | end_coord = [min(self.storage_shape[i], self.shape[i]) for i in range(len(end_coord))] |
| 566 | |
| 567 | start = self.address_for_coordinate(start_coord, is_top_box=False) |
| 568 | end = self.address_for_coordinate(end_coord, is_top_box=True) |
| 569 | return MemoryRangeSet(self.mem_area, start, end) |
| 570 | |
| 571 | def addresses_for_rolling_buffer(self, start_coord, end_coord): |
| 572 | # returns ( box_height0, box_height1, box_width, [address_tl, address_tr, address_bl, address_br] ) |
| 573 | |
| 574 | if len(start_coord) < 4: |
| 575 | box_height0 = 1 |
| 576 | box_width = 1 |
| 577 | |
| 578 | if len(start_coord) >= 2: |
| 579 | box_width = end_coord[-2] - start_coord[-2] |
| 580 | |
| 581 | return box_height0, box_height0, box_width, [self.address_for_coordinate(start_coord), None, None, None] |
| 582 | |
| 583 | crossing_y = numeric_util.round_up(start_coord[1] + 1, self.storage_shape[1]) |
| 584 | crossing_x = numeric_util.round_up(start_coord[2] + 1, self.storage_shape[2]) |
| 585 | |
| 586 | crossing_y = min(crossing_y, end_coord[1]) |
| 587 | crossing_x = min(crossing_x, end_coord[2]) |
| 588 | |
| 589 | box_height0 = crossing_y - start_coord[1] |
| 590 | box_width = crossing_x - start_coord[2] |
| 591 | |
| 592 | addresses = [None] * 4 |
| 593 | addresses[0] = self.address_for_coordinate(start_coord) |
| 594 | |
| 595 | if end_coord[2] > crossing_x: |
| 596 | addresses[1] = self.address_for_coordinate([start_coord[0], start_coord[1], crossing_x, start_coord[3]]) |
| 597 | raise Exception("Striping in vertical direction is not supported") |
| 598 | if end_coord[1] > crossing_y: |
| 599 | addresses[2] = self.address_for_coordinate([start_coord[0], crossing_y, start_coord[2], start_coord[3]]) |
| 600 | if end_coord[1] > crossing_y and end_coord[2] > crossing_x: |
| 601 | addresses[3] = self.address_for_coordinate([start_coord[0], crossing_y, crossing_x, start_coord[3]]) |
| 602 | |
| 603 | return box_height0, box_height0, box_width, addresses |
| 604 | |
| 605 | def address_for_coordinate(self, coord, is_top_box=False): |
| 606 | return self.address + self.address_offset_for_coordinate(coord, is_top_box) |
| 607 | |
| 608 | def get_strides_and_coord(self, coord=None): |
| 609 | if coord is None: |
| 610 | coord = [0] * len(self.storage_shape) |
| 611 | |
| 612 | augmented_coord = coord |
| 613 | augmented_shape = self.storage_shape |
| 614 | while len(augmented_shape) < 4: |
| 615 | augmented_shape = [1] + augmented_shape |
| 616 | |
| 617 | while len(augmented_coord) < 4: |
| 618 | augmented_coord = [0] + augmented_coord |
| 619 | |
| 620 | assert len(augmented_coord) == len(augmented_shape) |
| 621 | |
| 622 | if self.format == TensorFormat.NHWC: |
| 623 | augmented_shape = [augmented_shape[0], augmented_shape[3]] + augmented_shape[1:3] + [1] |
| 624 | augmented_coord = [augmented_coord[0], augmented_coord[3]] + augmented_coord[1:3] + [0] |
| 625 | stride_order = [4, 1, 3, 2, 0] |
| 626 | |
| 627 | elif self.format == TensorFormat.NHCWB16: |
Patrik Gustavsson | 2213e90 | 2020-05-05 17:49:35 +0200 | [diff] [blame] | 628 | channel_divisor = 16 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 629 | augmented_shape = augmented_shape[0:4] + [1] |
| 630 | augmented_coord = ( |
| 631 | [augmented_coord[0], augmented_coord[3] // channel_divisor] |
| 632 | + augmented_coord[1:3] |
| 633 | + [augmented_coord[3] % channel_divisor] |
| 634 | ) |
| 635 | |
| 636 | if augmented_shape[1] == 0: |
| 637 | augmented_shape[1] = 1 |
| 638 | |
| 639 | else: |
| 640 | assert self.format in set((TensorFormat.Unknown, TensorFormat.WeightsCompressed)) |
| 641 | return None, None |
| 642 | |
| 643 | strides = [0] * len(augmented_shape) |
| 644 | stride = self.element_size() * self.storage_compression_scale |
| 645 | |
| 646 | if self.format != TensorFormat.NHCWB16: |
| 647 | for i in stride_order: |
| 648 | strides[i] = stride |
| 649 | stride *= augmented_shape[i] |
| 650 | else: |
| 651 | assert len(strides) == 5 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 652 | strides[4] = stride |
Patrik Gustavsson | 2213e90 | 2020-05-05 17:49:35 +0200 | [diff] [blame] | 653 | strides[3] = 16 * stride # STRIDE_X |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 654 | strides[1] = strides[3] * augmented_shape[2] # STRIDE_C |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 655 | strides[2] = augmented_shape[2] * augmented_shape[3] * stride # STRIDE_Y |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 656 | strides[0] = strides[2] * augmented_shape[1] # STRIDE_N |
| 657 | |
| 658 | return strides, augmented_coord |
| 659 | |
| 660 | def get_strides(self): |
| 661 | strides, _ = self.get_strides_and_coord() |
| 662 | |
| 663 | return strides |
| 664 | |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 665 | def needs_dma(self): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 666 | return len(self.ops) == 1 and self.ops[0].type == Op.DMA |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 667 | |
| 668 | def get_dma_src_tensor(self): |
| 669 | # For weight tensors that need DMA: returns the source tensor in Flash, else None |
| 670 | # Note: for DMA ops, Pass.weight_tensor is referring to the SRAM weight tensor |
| 671 | return self.ops[0].inputs[0] if self.needs_dma() else None |
| 672 | |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 673 | def find_npu_op(self): |
| 674 | # Returns the NPU operator that uses this tensor, excluding DMA operators. |
| 675 | for op in self.consumers(): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 676 | if op.type == Op.DMA: |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 677 | return op.outputs[0].find_npu_op() |
Dwight Lidman | 940fdee | 2020-08-13 13:11:48 +0200 | [diff] [blame] | 678 | if op.run_on_npu: |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 679 | return op |
| 680 | return None |
| 681 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 682 | def compressed_stream_index_from_coord(self, coord): |
| 683 | assert self.format == TensorFormat.WeightsCompressed |
| 684 | assert len(self.compressed_values) > 0 |
| 685 | assert len(self.compressed_values) + 1 == len(self.weight_compressed_offsets) |
| 686 | |
| 687 | depth = coord[-1] |
| 688 | brick_depth = self.brick_size[-1] |
| 689 | # Clamp position at final element index |
| 690 | if depth > self.shape[-1]: |
| 691 | depth = self.shape[-1] |
| 692 | |
| 693 | # Always round up to next boundary |
Michael McGeagh | 8d3216f | 2020-08-10 11:35:57 +0100 | [diff] [blame] | 694 | index = numeric_util.round_up_divide(depth, brick_depth) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 695 | |
| 696 | # Check boundaries on all but last weight set (which may be shorter |
| 697 | # than the brick we divided it up into) |
| 698 | if index < len(self.weight_compressed_offsets) - 1: |
| 699 | # There are no half-way points in the weights |
| 700 | if (depth % brick_depth) != 0: |
| 701 | raise Exception("Offset into weights must be aligned to a brick") |
| 702 | |
| 703 | return index |
| 704 | |
| 705 | def size_of_compressed_stream(self, index): |
| 706 | assert 0 <= index < len(self.compressed_values) |
| 707 | return len(self.compressed_values[index]) |
| 708 | |
| 709 | def is_last_index_in_compressed_stream(self, index): |
| 710 | assert 0 <= index < len(self.compressed_values) |
| 711 | return index == len(self.compressed_values) - 1 |
| 712 | |
| 713 | def address_offset_for_coordinate(self, orig_coord, is_top_box=False): |
| 714 | address_offset = 0 |
| 715 | coord = orig_coord |
| 716 | |
| 717 | coord = coord[-len(self.storage_shape) :] |
| 718 | |
| 719 | if self.sub_purpose == TensorSubPurpose.Standard: |
| 720 | for idx, c in enumerate(coord): |
| 721 | if is_top_box: |
| 722 | assert c > 0 and c <= self.shape[idx] |
| 723 | else: |
| 724 | assert c >= 0 and c < self.shape[idx] |
| 725 | |
| 726 | if self.format == TensorFormat.WeightsCompressed: |
| 727 | if len(self.weight_compressed_offsets) == 0: |
| 728 | return 0 |
| 729 | |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 730 | if self.needs_dma() and self.sub_purpose == TensorSubPurpose.DoubleBuffer: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 731 | depth = orig_coord[-1] |
| 732 | brick_depth = self.brick_size[-1] |
| 733 | # Clamp position at final element index |
| 734 | if depth > self.shape[-1]: |
| 735 | depth = self.shape[-1] |
| 736 | |
| 737 | # Always round up to next boundary |
Michael McGeagh | 8d3216f | 2020-08-10 11:35:57 +0100 | [diff] [blame] | 738 | index = numeric_util.round_up_divide(depth, brick_depth) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 739 | index = index % 2 |
| 740 | |
| 741 | if len(self.compressed_values) <= 2: |
| 742 | if is_top_box and index == 0: |
| 743 | for cv in self.compressed_values: |
| 744 | address_offset += len(cv) |
| 745 | else: |
| 746 | address_offset = index * len(self.compressed_values[0]) |
| 747 | else: |
| 748 | if is_top_box and index == 0: |
| 749 | address_offset = self.storage_shape[-1] |
| 750 | else: |
| 751 | address_offset = index * (self.storage_shape[-1] // 2) |
| 752 | else: |
| 753 | index = self.compressed_stream_index_from_coord(orig_coord) |
| 754 | assert index < len(self.weight_compressed_offsets) |
| 755 | address_offset = self.weight_compressed_offsets[index] |
| 756 | else: |
| 757 | if is_top_box: |
| 758 | coord = [c - 1 for c in coord] |
| 759 | |
| 760 | # handle wraparound for partial buffers. make sure to do this after subtracting top box: |
| 761 | coord = [c % self.storage_shape[idx] for idx, c in enumerate(coord)] |
| 762 | |
| 763 | strides, augmented_coord = self.get_strides_and_coord(coord) |
| 764 | if strides is None: |
| 765 | return None |
| 766 | |
| 767 | if is_top_box: |
| 768 | address_offset += 1 * strides[-1] # one element |
| 769 | |
| 770 | address_offset += np.dot(augmented_coord, strides) |
| 771 | |
| 772 | assert address_offset >= 0 |
| 773 | assert address_offset <= self.storage_size() |
| 774 | return address_offset |
| 775 | |
Patrik Gustavsson | eca2e95 | 2020-05-27 09:15:11 +0200 | [diff] [blame] | 776 | def is_allocated_in_tensor_arena(self, scratch_tensor_mem_area): |
| 777 | if self.mem_area == scratch_tensor_mem_area and (self.mem_type in set((MemType.Scratch, MemType.Scratch_fast))): |
| 778 | return True |
| 779 | return False |
| 780 | |
Louis Verhaard | 0b8268a | 2020-08-05 16:11:29 +0200 | [diff] [blame] | 781 | def equivalent(self, tens): |
| 782 | return self.equivalence_id == tens.equivalence_id |
| 783 | |
Michael McGeagh | 6a8d424 | 2020-07-28 12:17:59 +0100 | [diff] [blame] | 784 | def set_all_shapes(self, shape): |
| 785 | self.shape = shape |
| 786 | self.storage_shape = shape |
| 787 | self.bandwidth_shape = shape |
| 788 | |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 789 | def get_full_shape(self): |
| 790 | d = len(self.shape) |
| 791 | if d in (1, 3): |
Michael McGeagh | 8d3216f | 2020-08-10 11:35:57 +0100 | [diff] [blame] | 792 | return numeric_util.full_shape(4, self.shape, 1) |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 793 | elif d == 2: |
| 794 | return [self.shape[0], 1, 1, self.shape[1]] |
| 795 | else: |
Fredrik Svedberg | 835d8e1 | 2020-09-04 09:46:17 +0200 | [diff] [blame] | 796 | return self.shape.copy() |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 797 | |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 798 | def is_quantized(self): |
| 799 | # a tensor is quantized if it has an integral type and it contains valid quantization params |
| 800 | |
| 801 | if (self.dtype.type & BaseType.Int) == 0 or self.quantization is None: |
| 802 | return False |
| 803 | |
Tim Hall | 7b1654b | 2020-10-22 14:22:47 +0100 | [diff] [blame] | 804 | assert isinstance(self.quantization, QuantizationParameters) |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 805 | assert self.quantization.is_valid() |
| 806 | |
| 807 | return True |
| 808 | |
Patrik Gustavsson | 3435958 | 2020-11-03 10:24:08 +0100 | [diff] [blame] | 809 | def set_random_equivalence_id(self): |
| 810 | self.equivalence_id = uuid.uuid4() |
| 811 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 812 | def __str__(self): |
| 813 | return "<nng.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.shape, self.dtype) |
| 814 | |
| 815 | __repr__ = __str__ |
Tim Hall | 9358296 | 2020-09-09 21:58:15 +0100 | [diff] [blame] | 816 | |
| 817 | |
| 818 | def check_tens_quantized(tens): |
| 819 | # checks that a tensor is quantized |
| 820 | |
| 821 | return isinstance(tens, Tensor) and tens.is_quantized() |
| 822 | |
| 823 | |
| 824 | def check_quantized_tens_scaling_equal(tens_a, tens_b): |
| 825 | # checks that the scaling of two quantized tensors are equal |
| 826 | |
| 827 | assert check_tens_quantized(tens_a) |
| 828 | assert check_tens_quantized(tens_b) |
| 829 | |
| 830 | return tens_a.quantization.is_scaling_equal(tens_b.quantization) |