blob: c0786bfc0440a593d851724f850f4f25ca541ddd [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Internal representation of a Neural Network Tensor.
Tim Hall79d07d22020-04-27 18:20:16 +010018import enum
Tim Hall79d07d22020-04-27 18:20:16 +010019import uuid
Jacob Bohlin1a666972020-09-11 10:04:15 +020020from collections import defaultdict
Louis Verhaard9db529a2020-09-23 10:27:11 +020021from functools import lru_cache
Diego Russoea6111a2020-04-14 18:41:58 +010022
23import numpy as np
24
25from . import numeric_util
Michael McGeagh5778ffd2020-08-06 17:31:02 +010026from .data_type import DataType
Dwight Lidmana9390f72020-05-13 12:00:08 +020027from .ethos_u55_regs.ethos_u55_regs import resampling_mode
Michael McGeagh5778ffd2020-08-06 17:31:02 +010028from .operation import Operation
Diego Russoe8a10452020-04-21 17:39:10 +010029from .range_set import MemoryRangeSet
Tim Hall79d07d22020-04-27 18:20:16 +010030
31
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020032class MemType(enum.IntFlag):
33 Unknown = 0
34 Permanent_NPU = 1
35 Permanent_CPU = 2
36 Scratch = 3
37 Scratch_fast = 4
38 Size = Scratch_fast + 1
39
40 def display_name(self):
41 return ("Unknown", "Permanent_NPU", "Permanent_CPU", "Scratch", "Scratch_fast", "Size")[self.value]
42
43 def identifier_name(self):
44 return ("unknown", "permanent_npu", "permanent_cpu", "scratch", "scratch_fast", "size")[self.value]
45
46 def all():
47 return (MemType.Permanent_NPU, MemType.Permanent_CPU, MemType.Scratch, MemType.Scratch_fast)
48
49 def __str__(self):
50 return self.name
51
52
Tim Hall79d07d22020-04-27 18:20:16 +010053class MemArea(enum.IntFlag):
54 Unknown = 0
55 Sram = 1
56 Dram = 2
57 OnChipFlash = 3
58 OffChipFlash = 4
Louis Verhaard0b8268a2020-08-05 16:11:29 +020059 Shram = 5 # for LUT
60 Size = Shram + 1
Tim Hall79d07d22020-04-27 18:20:16 +010061
62 def display_name(self):
Louis Verhaard0b8268a2020-08-05 16:11:29 +020063 return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "SHRAM", "Size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010064
65 def identifier_name(self):
Louis Verhaard0b8268a2020-08-05 16:11:29 +020066 return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "shram", "size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010067
68 def all():
Louis Verhaard0b8268a2020-08-05 16:11:29 +020069 return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash, MemArea.Shram)
Tim Hall79d07d22020-04-27 18:20:16 +010070
71 def __str__(self):
72 return self.name
73
74
75class TensorPurpose(enum.IntFlag):
76 Unknown = 0
77 Weights = 1
78 FeatureMap = 2
79 Scratch = 3
Fredrik Svedberga0c36242020-06-03 15:43:31 +020080 LUT = 4
81 Size = 5
Tim Hall79d07d22020-04-27 18:20:16 +010082
83 def display_name(self):
Fredrik Svedberga0c36242020-06-03 15:43:31 +020084 return ("Unknown", "Weights", "FeatureMap", "Scratch", "LUT", "Size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010085
86 def identifier_name(self):
Fredrik Svedberga0c36242020-06-03 15:43:31 +020087 return ("unknown", "weights", "feature_map", "scratch", "lut", "size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010088
89 def all():
90 return (TensorPurpose.Weights, TensorPurpose.FeatureMap)
91
92
93class TensorSubPurpose(enum.Enum):
94 Standard = 0
95 DoubleBuffer = 1
96 RollingBufferX = 2
97 RollingBufferY = 3
98 RollingBufferXY = 4
99
100 def display_name(self):
101 return ("Standard", "Double Buffer", "Rolling Buffer X", "Rolling Buffer Y", "Rolling Buffer XY")[self.value]
102
103 def identifier_name(self):
104 return ("standard", "double_buffer", "rolling_buffer_x", "rolling_buffer_y", "rolling_buffer_xy")[self.value]
105
106 def all():
107 return (
108 TensorSubPurpose.Standard,
109 TensorSubPurpose.DoubleBuffer,
110 TensorSubPurpose.RollingBufferX,
111 TensorSubPurpose.RollingBufferY,
112 TensorSubPurpose.RollingBufferXY,
113 )
114
115
116class TensorFormat(enum.Flag):
117 Unknown = 0
118 WeightsCompressed = 1
119 NHWC = 2
120 NHCWB16 = 3
121
122 def __str__(self):
123 return self.name
124
125
126class TensorBlockTraversal(enum.Enum):
127 Default = 0
128 DepthWise = 1
129 DepthFirst = 2
130 PartKernelFirst = 3
131
132
133def shape_num_elements(shp):
134 elems = 1
135 if shp is None:
136 return None
137 for d in shp:
138 if d is None:
139 return None
140 elems *= d
141 return elems
142
143
144def shape_fully_defined(shp):
145 if shp is None:
146 return False
147 for d in shp:
148 if d is None:
149 return False
150 return True
151
152
153def shape_round_to_quantum(shp, quantum):
154 new_shp = list(shp)
155
156 # Traverse backwards using length of shape since there may be more rounding quantums than shape elements
157 for i in range(-1, -len(shp) - 1, -1):
158 if new_shp[i] is not None:
159 new_shp[i] = numeric_util.round_up(new_shp[i], quantum[i])
160 return new_shp
161
162
Louis Verhaard9db529a2020-09-23 10:27:11 +0200163@lru_cache(maxsize=None)
164def create_equivalence_id(key):
165 # Generates equivalence_id based on the given key.
166 return uuid.uuid4()
167
168
Tim Hall79d07d22020-04-27 18:20:16 +0100169class QuantizationParameters:
170 __slots__ = "min", "max", "num_bits", "narrow_range", "scale_f32", "zero_point", "quant_min", "quant_max"
171
172 def __init__(self, min=None, max=None, num_bits=None, narrow_range=None):
173 self.min = min
174 self.max = max
175
176 self.num_bits = num_bits
177 self.narrow_range = narrow_range
178
179 self.scale_f32 = None
180 self.zero_point = None
181 self.quant_min = None
182 self.quant_max = None
183
184 def __str__(self):
185 return "<nng.QuantizationParameters min=%s max=%s, num_bits=%s, scale=%s, zero_point=%s>" % (
186 self.min,
187 self.max,
188 self.num_bits,
189 self.scale_f32,
190 self.zero_point,
191 )
192
193 __repr__ = __str__
194
195 def clone(self):
196 res = QuantizationParameters()
197 res.min = self.min
198 res.max = self.max
199
200 res.num_bits = self.num_bits
201 res.narrow_range = self.narrow_range
202
203 res.scale_f32 = self.scale_f32
204 res.zero_point = self.zero_point
205 res.quant_min = self.quant_min
206 res.quant_max = self.quant_max
207 return res
208
209 def dequantize(self, values):
210 if self.zero_point.size == 1 and self.scale_f32.size == 1:
211 # same scale is used for all values
212 res = (values.astype(np.float64) - self.zero_point) * self.scale_f32
213 else:
214 # a different scale is used for different sets of values
215 values_as_float = values.astype(np.float64)
216
217 # this is not compatible with the format of depthwise weights,
218 # where input is at index 3 (Output, Kh, Kw, Input)
219 # return the quantized values
220 return np.ndarray((values_as_float.shape))
221
222 shape = values_as_float.shape[0]
223 assert self.zero_point.size == self.scale_f32.size == shape
224 res = np.ndarray(values_as_float.shape)
225 for i in range(shape):
226 res[i] = (values_as_float[i] - self.zero_point[i]) * self.scale_f32[i]
227
228 return res
229
Tim Halle3786ac2020-07-28 17:40:50 +0100230 def is_scaling_equal(self, other):
231 if other is None or not isinstance(other, QuantizationParameters):
232 return False
233
234 return self.scale_f32 == other.scale_f32 and self.zero_point == other.zero_point
235
Tim Hall79d07d22020-04-27 18:20:16 +0100236
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100237def create_const_tensor(name, shape, dtype, values, value_dtype=None, purpose=TensorPurpose.Unknown, quantization=None):
238 # Tensor
239 const_tensor = Tensor(shape, dtype, name + "_0")
240 const_tensor.purpose = purpose
241 const_tensor.quantization = quantization
242 const_tensor.values = np.array(values, dtype=value_dtype)
Jacob Bohlina41cd4d2020-08-26 18:21:28 +0200243 const_tensor.quant_values = np.frombuffer(const_tensor.values.tobytes(), dtype=np.uint8)
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100244 # Operator
245 const_op = Operation("Const", name)
246 const_op.set_output_tensor(const_tensor)
247 return const_tensor
248
249
250def create_reshape_tensor(tens, shape, ifm_reshape=True):
251 if shape == tens.shape:
252 return tens
253 # Tensors
254 name = tens.name + "_reshape"
255 reshape_ifm = tens
256 reshape_ofm = tens.clone("_reshaped")
257 reshape_ofm.set_all_shapes(shape)
258 if not ifm_reshape:
259 reshape_ifm, reshape_ofm = reshape_ofm, reshape_ifm
260 # Operator
261 reshape_op = Operation("Reshape", name)
262 reshape_op.attrs["new_shape"] = shape
263 reshape_op.add_input_tensor(reshape_ifm)
264 reshape_op.add_input_tensor(create_const_tensor(name + "_shape", [1], DataType.int32, shape))
265 reshape_op.set_output_tensor(reshape_ofm)
266 return reshape_ofm if ifm_reshape else reshape_ifm
267
268
Jacob Bohlin1a666972020-09-11 10:04:15 +0200269# class that keeps track of all tensor addresses in the different memory types
270class TensorAddressMap:
271 address_map = defaultdict(dict) # dict (tens.equivalence_id -> dict (mem_type -> address))
272
273 @classmethod
274 def get_address_for_tens(cls, tens_id, mem_type):
275 return cls.address_map[tens_id].get(mem_type)
276
277 @classmethod
278 def set_address_for_tens(cls, tens_id, mem_type, address):
279 # Check previous address if there is one
280 previous_address = cls.address_map[tens_id].get(mem_type)
Louis Verhaard0b9c9a32020-09-15 14:05:38 +0200281 if address is not None and previous_address is not None:
Jacob Bohlin1a666972020-09-11 10:04:15 +0200282 assert previous_address == address, "Two different addresses cannot be assigned to the same tensor."
283
284 # Set tensor's address for memory type
285 cls.address_map[tens_id][mem_type] = address
286
287
Tim Hall79d07d22020-04-27 18:20:16 +0100288class Tensor:
289 __slots__ = (
290 "shape",
291 "storage_shape",
292 "bandwidth_shape",
293 "dtype",
294 "name",
295 "ops",
296 "consumer_list",
297 "values",
298 "quant_values",
299 "compressed_values",
Tim Hallf7e810a2020-06-25 15:04:31 +0100300 "compressed_values_substream_offsets",
Tim Hall79d07d22020-04-27 18:20:16 +0100301 "mem_area",
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200302 "mem_type",
Tim Hall79d07d22020-04-27 18:20:16 +0100303 "format",
304 "purpose",
305 "sub_purpose",
306 "alignment",
307 "weight_transpose_depthwise",
308 "storage_compression_scale",
309 "bandwidth_compression_scale",
310 "compression_scale_for_worst_weight_stream",
311 "weight_compression_scales",
312 "weight_compression_config",
Louis Verhaard9db529a2020-09-23 10:27:11 +0200313 "value_id",
Tim Hall79d07d22020-04-27 18:20:16 +0100314 "storage_rounding_quantum",
315 "brick_size",
Tim Hall79d07d22020-04-27 18:20:16 +0100316 "quantization",
317 "weight_compressed_offsets",
318 "element_size_bytes",
Tim Hall79d07d22020-04-27 18:20:16 +0100319 "block_traversal",
Tim Hall79d07d22020-04-27 18:20:16 +0100320 "equivalence_id",
Dwight Lidmana9390f72020-05-13 12:00:08 +0200321 "resampling_mode",
Patrik Gustavsson458a2082020-08-13 13:41:05 +0200322 "avoid_NHCWB16",
Tim Hall79d07d22020-04-27 18:20:16 +0100323 )
324 AllocationQuantum = 16
325
326 def __init__(self, shape, dtype, name):
327 self.shape = shape
328 self.storage_shape = shape
329 self.bandwidth_shape = shape
330 self.dtype = dtype
331 self.name = name
332 self.equivalence_id = uuid.uuid4()
333
334 self.ops = []
335 self.consumer_list = []
Tim Hall79d07d22020-04-27 18:20:16 +0100336
337 self.values = None
338 self.quant_values = None
339 self.compressed_values = None
Tim Hallf7e810a2020-06-25 15:04:31 +0100340 self.compressed_values_substream_offsets = None
Tim Hall79d07d22020-04-27 18:20:16 +0100341 self.mem_area = MemArea.Unknown
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200342 self.mem_type = MemType.Unknown
Tim Hall79d07d22020-04-27 18:20:16 +0100343 self.format = TensorFormat.Unknown
344 self.purpose = TensorPurpose.Unknown
345 self.sub_purpose = TensorSubPurpose.Standard
346 self.alignment = Tensor.AllocationQuantum
347 self.weight_transpose_depthwise = False
348
349 self.storage_compression_scale = 1.0
350 self.bandwidth_compression_scale = 1.0
351 self.compression_scale_for_worst_weight_stream = 1.0
352 self.weight_compression_scales = None
Louis Verhaard9db529a2020-09-23 10:27:11 +0200353 # if two tensors have the same weight_compression_config, then they have the same compressed values
Tim Hall79d07d22020-04-27 18:20:16 +0100354 self.weight_compression_config = None
Louis Verhaard9db529a2020-09-23 10:27:11 +0200355 # if two tensors have the same value_id, then they have the same values
356 self.value_id = uuid.uuid4()
Tim Hall79d07d22020-04-27 18:20:16 +0100357 self.weight_compressed_offsets = []
358 self.storage_rounding_quantum = (1, 1, 1, 1)
359 self.brick_size = (1, 1, 1, 1)
Tim Hall79d07d22020-04-27 18:20:16 +0100360 self.element_size_bytes = 0
361
362 # quantization parameters
363 self.quantization = None
Tim Hall79d07d22020-04-27 18:20:16 +0100364 self.block_traversal = TensorBlockTraversal.Default
Dwight Lidmana9390f72020-05-13 12:00:08 +0200365 self.resampling_mode = resampling_mode.NONE
Tim Hall79d07d22020-04-27 18:20:16 +0100366
Patrik Gustavsson458a2082020-08-13 13:41:05 +0200367 self.avoid_NHCWB16 = False
368
Jacob Bohlin1a666972020-09-11 10:04:15 +0200369 @property
370 def address(self):
371 return TensorAddressMap.get_address_for_tens(self.equivalence_id, self.mem_type)
372
373 @address.setter
374 def address(self, address):
375 TensorAddressMap.set_address_for_tens(self.equivalence_id, self.mem_type, address)
376
Tim Hall79d07d22020-04-27 18:20:16 +0100377 def element_size(self):
378 if self.element_size_bytes == 0:
379 return self.dtype.size_in_bits() / 8
380 return self.element_size_bytes
381
382 def clone(self, suffix="_clone"):
383 res = Tensor(self.shape, self.dtype, self.name + suffix)
384 res.storage_shape = list(self.storage_shape)
385 res.bandwidth_shape = list(self.bandwidth_shape)
386
387 res.ops = []
388 res.consumer_list = []
Tim Hall79d07d22020-04-27 18:20:16 +0100389
390 res.values = self.values
391 res.quant_values = self.quant_values
Tim Hall79d07d22020-04-27 18:20:16 +0100392 res.mem_area = self.mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200393 res.mem_type = self.mem_type
Tim Hall79d07d22020-04-27 18:20:16 +0100394 res.format = self.format
395 res.purpose = self.purpose
396 res.sub_purpose = self.sub_purpose
397 res.alignment = self.alignment
Tim Hall79d07d22020-04-27 18:20:16 +0100398 res.bandwidth_compression_scale = self.bandwidth_compression_scale
Tim Hall79d07d22020-04-27 18:20:16 +0100399 res.storage_rounding_quantum = self.storage_rounding_quantum
Tim Hall79d07d22020-04-27 18:20:16 +0100400
401 if self.quantization is not None:
402 res.quantization = self.quantization.clone()
403 else:
404 res.quantization = None
405
Dwight Lidmana9390f72020-05-13 12:00:08 +0200406 res.resampling_mode = self.resampling_mode
407
Louis Verhaard3c07c972020-05-07 08:12:58 +0200408 res.copy_compressed_weight_info(self)
Patrik Gustavsson458a2082020-08-13 13:41:05 +0200409 res.avoid_NHCWB16 = self.avoid_NHCWB16
Tim Hall79d07d22020-04-27 18:20:16 +0100410 return res
411
412 def clone_into_fast_storage(self, arch):
413 res = self.clone(suffix="_fast_storage")
414 res.mem_area = arch.fast_storage_mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200415 res.mem_type = MemType.Scratch_fast
Tim Hall79d07d22020-04-27 18:20:16 +0100416 return res
417
Louis Verhaard3c07c972020-05-07 08:12:58 +0200418 def copy_compressed_weight_info(self, src_tens):
419 # Copies compressed values + all related weight compression info from the given tensor
Louis Verhaard9db529a2020-09-23 10:27:11 +0200420 self.equivalence_id = src_tens.equivalence_id
Louis Verhaard3c07c972020-05-07 08:12:58 +0200421 self.compressed_values = src_tens.compressed_values
Tim Hallf7e810a2020-06-25 15:04:31 +0100422 self.compressed_values_substream_offsets = src_tens.compressed_values_substream_offsets
Louis Verhaard3c07c972020-05-07 08:12:58 +0200423 self.storage_shape = src_tens.storage_shape
424 self.brick_size = src_tens.brick_size
425 self.weight_compression_scales = src_tens.weight_compression_scales
426 self.weight_compressed_offsets = src_tens.weight_compressed_offsets
427 self.weight_transpose_depthwise = src_tens.weight_transpose_depthwise
428 self.compression_scale_for_worst_weight_stream = src_tens.compression_scale_for_worst_weight_stream
429 self.storage_compression_scale = src_tens.storage_compression_scale
430 self.block_traversal = src_tens.block_traversal
431 self.weight_compression_config = src_tens.weight_compression_config
Louis Verhaard9db529a2020-09-23 10:27:11 +0200432 self.value_id = src_tens.value_id
Louis Verhaard3c07c972020-05-07 08:12:58 +0200433
Tim Hall79d07d22020-04-27 18:20:16 +0100434 def set_format(self, fmt, arch):
435 self.format = fmt
436 shape_len = 0
437 try:
438 shape_len = len(self.shape)
439 except TypeError:
440 pass
441
442 self.storage_rounding_quantum = arch.storage_rounding_quantums[self.format]
443 self.storage_rounding_quantum = self.storage_rounding_quantum[-shape_len:]
Tim Hall79d07d22020-04-27 18:20:16 +0100444 self.brick_size = arch.brick_sizes[self.format]
445 self.brick_size = self.brick_size[-shape_len:]
446 if self.shape is None:
447 return
448
449 self.bandwidth_shape = shape_round_to_quantum(self.shape, self.brick_size)
450 self.storage_shape = shape_round_to_quantum(self.shape, self.storage_rounding_quantum)
451
452 if fmt == TensorFormat.WeightsCompressed:
453 compression_ratio = 5 / 8
454 self.storage_compression_scale = compression_ratio
455 self.bandwidth_compression_scale = compression_ratio
456 self.compression_scale_for_worst_weight_stream = compression_ratio
457
458 def storage_elements(self):
459 elems = shape_num_elements(self.storage_shape)
460 if elems is None:
461 return 0
462 return elems
463
464 def elements(self):
465 elems = shape_num_elements(self.shape)
466 if elems is None:
467 return 0
468 return elems
469
470 def has_fully_defined_shape(self):
471 return shape_fully_defined(self.shape)
472
Patrik Gustavsson90831bc2020-08-24 16:26:11 +0200473 def storage_size(self, scale=1.0):
474 raw_size = self.storage_elements() * self.element_size() * scale
Tim Hall79d07d22020-04-27 18:20:16 +0100475 if raw_size == 0:
476 raw_size = 1 # force it to take up space
477 rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment)
478 return rounded_size
479
Patrik Gustavsson90831bc2020-08-24 16:26:11 +0200480 def storage_size_for_sub_purpose(self, arch, sub_purpose, param_a=None, param_b=None):
Tim Hall79d07d22020-04-27 18:20:16 +0100481 alt_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b)
482 elems = shape_num_elements(alt_shape)
483 if elems is None:
484 return 0
485 if sub_purpose == TensorSubPurpose.DoubleBuffer:
Patrik Gustavsson90831bc2020-08-24 16:26:11 +0200486 raw_size = (
487 elems
488 * self.element_size()
489 * self.compression_scale_for_worst_weight_stream
490 * arch.weight_estimation_scaling
491 )
Tim Hall79d07d22020-04-27 18:20:16 +0100492 else:
Patrik Gustavsson9baa4c32020-08-20 13:59:01 +0200493 # Rolling buffers are used for intermediate data in ifm streaming
494 # These will all use the NHCWB16 format, and need to be aligned to 16 in the C-dimension
495 if alt_shape[-1] % 16 != 0:
496 nhcwb16_shape = alt_shape[0:-1] + [numeric_util.round_up(alt_shape[-1], 16)]
497 elems = shape_num_elements(nhcwb16_shape)
498
Tim Hall79d07d22020-04-27 18:20:16 +0100499 raw_size = elems * self.element_size() * self.storage_compression_scale
500 rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment)
501 return rounded_size
502
503 def storage_shape_for_sub_purpose(self, sub_purpose, param_a, param_b):
Tim Hall79d07d22020-04-27 18:20:16 +0100504 if sub_purpose == TensorSubPurpose.DoubleBuffer:
Jacob Bohline843d332020-06-23 12:12:56 +0200505 shp = list(self.shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100506 assert len(shp) >= 2
507 shp[-1] = min(shp[-1], param_a * 2)
Tim Hall79d07d22020-04-27 18:20:16 +0100508 else:
Jacob Bohline843d332020-06-23 12:12:56 +0200509 shp = list(self.storage_shape)
510 if sub_purpose == TensorSubPurpose.RollingBufferX:
511 assert len(shp) == 4
512 shp[0] = 1
513 shp[2] = min(shp[2], param_a)
514 elif sub_purpose == TensorSubPurpose.RollingBufferY:
515 assert len(shp) == 4
516 shp[0] = 1
517 shp[1] = min(shp[1], param_a)
518 elif sub_purpose == TensorSubPurpose.RollingBufferXY:
519 assert len(shp) == 4
520 shp[0] = 1
521 shp[2] = min(shp[2], param_a)
522 shp[1] = min(shp[1], param_b)
523 elif sub_purpose == TensorSubPurpose.Standard:
524 pass
525 else:
526 assert 0, "did not expect new sub purpose %s" % (sub_purpose,)
527
Tim Hall79d07d22020-04-27 18:20:16 +0100528 return shp
529
530 def set_new_sub_purpose(self, sub_purpose, param_a=None, param_b=None):
531 self.storage_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b)
532 self.sub_purpose = sub_purpose
533 if sub_purpose == TensorSubPurpose.DoubleBuffer:
534 self.storage_compression_scale = self.compression_scale_for_worst_weight_stream
535
536 def bandwidth(self):
537 elems = shape_num_elements(self.bandwidth_shape)
538 if elems is None:
539 return 0
540 return elems * self.element_size() * self.bandwidth_compression_scale
541
542 def consumers(self):
543 return self.consumer_list
544
545 def get_address_ranges_for_coordinates(self, start_coord, end_coord):
546 if self.sub_purpose in set(
547 (TensorSubPurpose.RollingBufferX, TensorSubPurpose.RollingBufferY, TensorSubPurpose.RollingBufferXY)
548 ):
549 # build dummy coordinates that cover the entire buffer
550 start_coord = [0] * len(start_coord)
551 end_coord = [min(self.storage_shape[i], self.shape[i]) for i in range(len(end_coord))]
552
553 start = self.address_for_coordinate(start_coord, is_top_box=False)
554 end = self.address_for_coordinate(end_coord, is_top_box=True)
555 return MemoryRangeSet(self.mem_area, start, end)
556
557 def addresses_for_rolling_buffer(self, start_coord, end_coord):
558 # returns ( box_height0, box_height1, box_width, [address_tl, address_tr, address_bl, address_br] )
559
560 if len(start_coord) < 4:
561 box_height0 = 1
562 box_width = 1
563
564 if len(start_coord) >= 2:
565 box_width = end_coord[-2] - start_coord[-2]
566
567 return box_height0, box_height0, box_width, [self.address_for_coordinate(start_coord), None, None, None]
568
569 crossing_y = numeric_util.round_up(start_coord[1] + 1, self.storage_shape[1])
570 crossing_x = numeric_util.round_up(start_coord[2] + 1, self.storage_shape[2])
571
572 crossing_y = min(crossing_y, end_coord[1])
573 crossing_x = min(crossing_x, end_coord[2])
574
575 box_height0 = crossing_y - start_coord[1]
576 box_width = crossing_x - start_coord[2]
577
578 addresses = [None] * 4
579 addresses[0] = self.address_for_coordinate(start_coord)
580
581 if end_coord[2] > crossing_x:
582 addresses[1] = self.address_for_coordinate([start_coord[0], start_coord[1], crossing_x, start_coord[3]])
583 raise Exception("Striping in vertical direction is not supported")
584 if end_coord[1] > crossing_y:
585 addresses[2] = self.address_for_coordinate([start_coord[0], crossing_y, start_coord[2], start_coord[3]])
586 if end_coord[1] > crossing_y and end_coord[2] > crossing_x:
587 addresses[3] = self.address_for_coordinate([start_coord[0], crossing_y, crossing_x, start_coord[3]])
588
589 return box_height0, box_height0, box_width, addresses
590
591 def address_for_coordinate(self, coord, is_top_box=False):
592 return self.address + self.address_offset_for_coordinate(coord, is_top_box)
593
594 def get_strides_and_coord(self, coord=None):
595 if coord is None:
596 coord = [0] * len(self.storage_shape)
597
598 augmented_coord = coord
599 augmented_shape = self.storage_shape
600 while len(augmented_shape) < 4:
601 augmented_shape = [1] + augmented_shape
602
603 while len(augmented_coord) < 4:
604 augmented_coord = [0] + augmented_coord
605
606 assert len(augmented_coord) == len(augmented_shape)
607
608 if self.format == TensorFormat.NHWC:
609 augmented_shape = [augmented_shape[0], augmented_shape[3]] + augmented_shape[1:3] + [1]
610 augmented_coord = [augmented_coord[0], augmented_coord[3]] + augmented_coord[1:3] + [0]
611 stride_order = [4, 1, 3, 2, 0]
612
613 elif self.format == TensorFormat.NHCWB16:
Patrik Gustavsson2213e902020-05-05 17:49:35 +0200614 channel_divisor = 16
Tim Hall79d07d22020-04-27 18:20:16 +0100615 augmented_shape = augmented_shape[0:4] + [1]
616 augmented_coord = (
617 [augmented_coord[0], augmented_coord[3] // channel_divisor]
618 + augmented_coord[1:3]
619 + [augmented_coord[3] % channel_divisor]
620 )
621
622 if augmented_shape[1] == 0:
623 augmented_shape[1] = 1
624
625 else:
626 assert self.format in set((TensorFormat.Unknown, TensorFormat.WeightsCompressed))
627 return None, None
628
629 strides = [0] * len(augmented_shape)
630 stride = self.element_size() * self.storage_compression_scale
631
632 if self.format != TensorFormat.NHCWB16:
633 for i in stride_order:
634 strides[i] = stride
635 stride *= augmented_shape[i]
636 else:
637 assert len(strides) == 5
Tim Hall79d07d22020-04-27 18:20:16 +0100638 strides[4] = stride
Patrik Gustavsson2213e902020-05-05 17:49:35 +0200639 strides[3] = 16 * stride # STRIDE_X
Tim Hall79d07d22020-04-27 18:20:16 +0100640 strides[1] = strides[3] * augmented_shape[2] # STRIDE_C
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200641 strides[2] = augmented_shape[2] * augmented_shape[3] * stride # STRIDE_Y
Tim Hall79d07d22020-04-27 18:20:16 +0100642 strides[0] = strides[2] * augmented_shape[1] # STRIDE_N
643
644 return strides, augmented_coord
645
646 def get_strides(self):
647 strides, _ = self.get_strides_and_coord()
648
649 return strides
650
Louis Verhaard3c07c972020-05-07 08:12:58 +0200651 def needs_dma(self):
652 return len(self.ops) == 1 and self.ops[0].type == "DMA"
653
654 def get_dma_src_tensor(self):
655 # For weight tensors that need DMA: returns the source tensor in Flash, else None
656 # Note: for DMA ops, Pass.weight_tensor is referring to the SRAM weight tensor
657 return self.ops[0].inputs[0] if self.needs_dma() else None
658
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200659 def find_npu_op(self):
660 # Returns the NPU operator that uses this tensor, excluding DMA operators.
661 for op in self.consumers():
662 if op.type == "DMA":
663 return op.outputs[0].find_npu_op()
Dwight Lidman940fdee2020-08-13 13:11:48 +0200664 if op.run_on_npu:
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200665 return op
666 return None
667
Tim Hall79d07d22020-04-27 18:20:16 +0100668 def compressed_stream_index_from_coord(self, coord):
669 assert self.format == TensorFormat.WeightsCompressed
670 assert len(self.compressed_values) > 0
671 assert len(self.compressed_values) + 1 == len(self.weight_compressed_offsets)
672
673 depth = coord[-1]
674 brick_depth = self.brick_size[-1]
675 # Clamp position at final element index
676 if depth > self.shape[-1]:
677 depth = self.shape[-1]
678
679 # Always round up to next boundary
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100680 index = numeric_util.round_up_divide(depth, brick_depth)
Tim Hall79d07d22020-04-27 18:20:16 +0100681
682 # Check boundaries on all but last weight set (which may be shorter
683 # than the brick we divided it up into)
684 if index < len(self.weight_compressed_offsets) - 1:
685 # There are no half-way points in the weights
686 if (depth % brick_depth) != 0:
687 raise Exception("Offset into weights must be aligned to a brick")
688
689 return index
690
691 def size_of_compressed_stream(self, index):
692 assert 0 <= index < len(self.compressed_values)
693 return len(self.compressed_values[index])
694
695 def is_last_index_in_compressed_stream(self, index):
696 assert 0 <= index < len(self.compressed_values)
697 return index == len(self.compressed_values) - 1
698
699 def address_offset_for_coordinate(self, orig_coord, is_top_box=False):
700 address_offset = 0
701 coord = orig_coord
702
703 coord = coord[-len(self.storage_shape) :]
704
705 if self.sub_purpose == TensorSubPurpose.Standard:
706 for idx, c in enumerate(coord):
707 if is_top_box:
708 assert c > 0 and c <= self.shape[idx]
709 else:
710 assert c >= 0 and c < self.shape[idx]
711
712 if self.format == TensorFormat.WeightsCompressed:
713 if len(self.weight_compressed_offsets) == 0:
714 return 0
715
Louis Verhaard3c07c972020-05-07 08:12:58 +0200716 if self.needs_dma() and self.sub_purpose == TensorSubPurpose.DoubleBuffer:
Tim Hall79d07d22020-04-27 18:20:16 +0100717 depth = orig_coord[-1]
718 brick_depth = self.brick_size[-1]
719 # Clamp position at final element index
720 if depth > self.shape[-1]:
721 depth = self.shape[-1]
722
723 # Always round up to next boundary
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100724 index = numeric_util.round_up_divide(depth, brick_depth)
Tim Hall79d07d22020-04-27 18:20:16 +0100725 index = index % 2
726
727 if len(self.compressed_values) <= 2:
728 if is_top_box and index == 0:
729 for cv in self.compressed_values:
730 address_offset += len(cv)
731 else:
732 address_offset = index * len(self.compressed_values[0])
733 else:
734 if is_top_box and index == 0:
735 address_offset = self.storage_shape[-1]
736 else:
737 address_offset = index * (self.storage_shape[-1] // 2)
738 else:
739 index = self.compressed_stream_index_from_coord(orig_coord)
740 assert index < len(self.weight_compressed_offsets)
741 address_offset = self.weight_compressed_offsets[index]
742 else:
743 if is_top_box:
744 coord = [c - 1 for c in coord]
745
746 # handle wraparound for partial buffers. make sure to do this after subtracting top box:
747 coord = [c % self.storage_shape[idx] for idx, c in enumerate(coord)]
748
749 strides, augmented_coord = self.get_strides_and_coord(coord)
750 if strides is None:
751 return None
752
753 if is_top_box:
754 address_offset += 1 * strides[-1] # one element
755
756 address_offset += np.dot(augmented_coord, strides)
757
758 assert address_offset >= 0
759 assert address_offset <= self.storage_size()
760 return address_offset
761
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200762 def is_allocated_in_tensor_arena(self, scratch_tensor_mem_area):
763 if self.mem_area == scratch_tensor_mem_area and (self.mem_type in set((MemType.Scratch, MemType.Scratch_fast))):
764 return True
765 return False
766
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200767 def is_scaling_equal(self, tens):
768 return self.quantization.is_scaling_equal(tens.quantization)
769
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200770 def equivalent(self, tens):
771 return self.equivalence_id == tens.equivalence_id
772
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100773 def set_all_shapes(self, shape):
774 self.shape = shape
775 self.storage_shape = shape
776 self.bandwidth_shape = shape
777
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100778 def get_full_shape(self):
779 d = len(self.shape)
780 if d in (1, 3):
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100781 return numeric_util.full_shape(4, self.shape, 1)
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100782 elif d == 2:
783 return [self.shape[0], 1, 1, self.shape[1]]
784 else:
Fredrik Svedberg835d8e12020-09-04 09:46:17 +0200785 return self.shape.copy()
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100786
Tim Hall79d07d22020-04-27 18:20:16 +0100787 def __str__(self):
788 return "<nng.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.shape, self.dtype)
789
790 __repr__ = __str__