blob: 3ad9b253aafa6226bdd360204cb36d27573572ff [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Internal representation of a Neural Network Tensor.
Tim Hall79d07d22020-04-27 18:20:16 +010018import enum
Tim Hall79d07d22020-04-27 18:20:16 +010019import uuid
Diego Russoea6111a2020-04-14 18:41:58 +010020
21import numpy as np
22
23from . import numeric_util
Michael McGeagh5778ffd2020-08-06 17:31:02 +010024from .data_type import DataType
Dwight Lidmana9390f72020-05-13 12:00:08 +020025from .ethos_u55_regs.ethos_u55_regs import resampling_mode
Michael McGeagh5778ffd2020-08-06 17:31:02 +010026from .operation import Operation
Diego Russoe8a10452020-04-21 17:39:10 +010027from .range_set import MemoryRangeSet
Tim Hall79d07d22020-04-27 18:20:16 +010028
29
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020030class MemType(enum.IntFlag):
31 Unknown = 0
32 Permanent_NPU = 1
33 Permanent_CPU = 2
34 Scratch = 3
35 Scratch_fast = 4
36 Size = Scratch_fast + 1
37
38 def display_name(self):
39 return ("Unknown", "Permanent_NPU", "Permanent_CPU", "Scratch", "Scratch_fast", "Size")[self.value]
40
41 def identifier_name(self):
42 return ("unknown", "permanent_npu", "permanent_cpu", "scratch", "scratch_fast", "size")[self.value]
43
44 def all():
45 return (MemType.Permanent_NPU, MemType.Permanent_CPU, MemType.Scratch, MemType.Scratch_fast)
46
47 def __str__(self):
48 return self.name
49
50
Tim Hall79d07d22020-04-27 18:20:16 +010051class MemArea(enum.IntFlag):
52 Unknown = 0
53 Sram = 1
54 Dram = 2
55 OnChipFlash = 3
56 OffChipFlash = 4
Louis Verhaard0b8268a2020-08-05 16:11:29 +020057 Shram = 5 # for LUT
58 Size = Shram + 1
Tim Hall79d07d22020-04-27 18:20:16 +010059
60 def display_name(self):
Louis Verhaard0b8268a2020-08-05 16:11:29 +020061 return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "SHRAM", "Size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010062
63 def identifier_name(self):
Louis Verhaard0b8268a2020-08-05 16:11:29 +020064 return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "shram", "size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010065
66 def all():
Louis Verhaard0b8268a2020-08-05 16:11:29 +020067 return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash, MemArea.Shram)
Tim Hall79d07d22020-04-27 18:20:16 +010068
69 def __str__(self):
70 return self.name
71
72
73class TensorPurpose(enum.IntFlag):
74 Unknown = 0
75 Weights = 1
76 FeatureMap = 2
77 Scratch = 3
Fredrik Svedberga0c36242020-06-03 15:43:31 +020078 LUT = 4
79 Size = 5
Tim Hall79d07d22020-04-27 18:20:16 +010080
81 def display_name(self):
Fredrik Svedberga0c36242020-06-03 15:43:31 +020082 return ("Unknown", "Weights", "FeatureMap", "Scratch", "LUT", "Size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010083
84 def identifier_name(self):
Fredrik Svedberga0c36242020-06-03 15:43:31 +020085 return ("unknown", "weights", "feature_map", "scratch", "lut", "size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010086
87 def all():
88 return (TensorPurpose.Weights, TensorPurpose.FeatureMap)
89
90
91class TensorSubPurpose(enum.Enum):
92 Standard = 0
93 DoubleBuffer = 1
94 RollingBufferX = 2
95 RollingBufferY = 3
96 RollingBufferXY = 4
97
98 def display_name(self):
99 return ("Standard", "Double Buffer", "Rolling Buffer X", "Rolling Buffer Y", "Rolling Buffer XY")[self.value]
100
101 def identifier_name(self):
102 return ("standard", "double_buffer", "rolling_buffer_x", "rolling_buffer_y", "rolling_buffer_xy")[self.value]
103
104 def all():
105 return (
106 TensorSubPurpose.Standard,
107 TensorSubPurpose.DoubleBuffer,
108 TensorSubPurpose.RollingBufferX,
109 TensorSubPurpose.RollingBufferY,
110 TensorSubPurpose.RollingBufferXY,
111 )
112
113
114class TensorFormat(enum.Flag):
115 Unknown = 0
116 WeightsCompressed = 1
117 NHWC = 2
118 NHCWB16 = 3
119
120 def __str__(self):
121 return self.name
122
123
124class TensorBlockTraversal(enum.Enum):
125 Default = 0
126 DepthWise = 1
127 DepthFirst = 2
128 PartKernelFirst = 3
129
130
131def shape_num_elements(shp):
132 elems = 1
133 if shp is None:
134 return None
135 for d in shp:
136 if d is None:
137 return None
138 elems *= d
139 return elems
140
141
142def shape_fully_defined(shp):
143 if shp is None:
144 return False
145 for d in shp:
146 if d is None:
147 return False
148 return True
149
150
151def shape_round_to_quantum(shp, quantum):
152 new_shp = list(shp)
153
154 # Traverse backwards using length of shape since there may be more rounding quantums than shape elements
155 for i in range(-1, -len(shp) - 1, -1):
156 if new_shp[i] is not None:
157 new_shp[i] = numeric_util.round_up(new_shp[i], quantum[i])
158 return new_shp
159
160
161class QuantizationParameters:
162 __slots__ = "min", "max", "num_bits", "narrow_range", "scale_f32", "zero_point", "quant_min", "quant_max"
163
164 def __init__(self, min=None, max=None, num_bits=None, narrow_range=None):
165 self.min = min
166 self.max = max
167
168 self.num_bits = num_bits
169 self.narrow_range = narrow_range
170
171 self.scale_f32 = None
172 self.zero_point = None
173 self.quant_min = None
174 self.quant_max = None
175
176 def __str__(self):
177 return "<nng.QuantizationParameters min=%s max=%s, num_bits=%s, scale=%s, zero_point=%s>" % (
178 self.min,
179 self.max,
180 self.num_bits,
181 self.scale_f32,
182 self.zero_point,
183 )
184
185 __repr__ = __str__
186
187 def clone(self):
188 res = QuantizationParameters()
189 res.min = self.min
190 res.max = self.max
191
192 res.num_bits = self.num_bits
193 res.narrow_range = self.narrow_range
194
195 res.scale_f32 = self.scale_f32
196 res.zero_point = self.zero_point
197 res.quant_min = self.quant_min
198 res.quant_max = self.quant_max
199 return res
200
201 def dequantize(self, values):
202 if self.zero_point.size == 1 and self.scale_f32.size == 1:
203 # same scale is used for all values
204 res = (values.astype(np.float64) - self.zero_point) * self.scale_f32
205 else:
206 # a different scale is used for different sets of values
207 values_as_float = values.astype(np.float64)
208
209 # this is not compatible with the format of depthwise weights,
210 # where input is at index 3 (Output, Kh, Kw, Input)
211 # return the quantized values
212 return np.ndarray((values_as_float.shape))
213
214 shape = values_as_float.shape[0]
215 assert self.zero_point.size == self.scale_f32.size == shape
216 res = np.ndarray(values_as_float.shape)
217 for i in range(shape):
218 res[i] = (values_as_float[i] - self.zero_point[i]) * self.scale_f32[i]
219
220 return res
221
Tim Halle3786ac2020-07-28 17:40:50 +0100222 def is_scaling_equal(self, other):
223 if other is None or not isinstance(other, QuantizationParameters):
224 return False
225
226 return self.scale_f32 == other.scale_f32 and self.zero_point == other.zero_point
227
Tim Hall79d07d22020-04-27 18:20:16 +0100228
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100229def create_const_tensor(name, shape, dtype, values, value_dtype=None, purpose=TensorPurpose.Unknown, quantization=None):
230 # Tensor
231 const_tensor = Tensor(shape, dtype, name + "_0")
232 const_tensor.purpose = purpose
233 const_tensor.quantization = quantization
234 const_tensor.values = np.array(values, dtype=value_dtype)
Jacob Bohlind47cc272020-08-24 11:42:14 +0200235 const_tensor.quant_values = const_tensor.values
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100236 # Operator
237 const_op = Operation("Const", name)
238 const_op.set_output_tensor(const_tensor)
239 return const_tensor
240
241
242def create_reshape_tensor(tens, shape, ifm_reshape=True):
243 if shape == tens.shape:
244 return tens
245 # Tensors
246 name = tens.name + "_reshape"
247 reshape_ifm = tens
248 reshape_ofm = tens.clone("_reshaped")
249 reshape_ofm.set_all_shapes(shape)
250 if not ifm_reshape:
251 reshape_ifm, reshape_ofm = reshape_ofm, reshape_ifm
252 # Operator
253 reshape_op = Operation("Reshape", name)
254 reshape_op.attrs["new_shape"] = shape
255 reshape_op.add_input_tensor(reshape_ifm)
256 reshape_op.add_input_tensor(create_const_tensor(name + "_shape", [1], DataType.int32, shape))
257 reshape_op.set_output_tensor(reshape_ofm)
258 return reshape_ofm if ifm_reshape else reshape_ifm
259
260
Tim Hall79d07d22020-04-27 18:20:16 +0100261class Tensor:
262 __slots__ = (
263 "shape",
264 "storage_shape",
265 "bandwidth_shape",
266 "dtype",
267 "name",
268 "ops",
269 "consumer_list",
270 "values",
271 "quant_values",
272 "compressed_values",
Tim Hallf7e810a2020-06-25 15:04:31 +0100273 "compressed_values_substream_offsets",
Tim Hall79d07d22020-04-27 18:20:16 +0100274 "mem_area",
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200275 "mem_type",
Tim Hall79d07d22020-04-27 18:20:16 +0100276 "format",
277 "purpose",
278 "sub_purpose",
279 "alignment",
280 "weight_transpose_depthwise",
281 "storage_compression_scale",
282 "bandwidth_compression_scale",
283 "compression_scale_for_worst_weight_stream",
284 "weight_compression_scales",
285 "weight_compression_config",
286 "storage_rounding_quantum",
287 "brick_size",
288 "address",
289 "quantization",
290 "weight_compressed_offsets",
291 "element_size_bytes",
Tim Hall79d07d22020-04-27 18:20:16 +0100292 "block_traversal",
Tim Hall79d07d22020-04-27 18:20:16 +0100293 "cpu_tensor",
294 "npu_tensor",
295 "equivalence_id",
Dwight Lidmana9390f72020-05-13 12:00:08 +0200296 "resampling_mode",
Patrik Gustavsson458a2082020-08-13 13:41:05 +0200297 "avoid_NHCWB16",
Tim Hall79d07d22020-04-27 18:20:16 +0100298 )
299 AllocationQuantum = 16
300
301 def __init__(self, shape, dtype, name):
302 self.shape = shape
303 self.storage_shape = shape
304 self.bandwidth_shape = shape
305 self.dtype = dtype
306 self.name = name
307 self.equivalence_id = uuid.uuid4()
308
309 self.ops = []
310 self.consumer_list = []
311 # Below attributes are only set if a tensor has been cloned,
312 # either from Cpu -> Npu or vice versa. Needed for offline allocation
313 self.cpu_tensor = None # reference to the corresponding Cpu tensor
314 self.npu_tensor = None # reference to the corresponding Npu tensor
315
316 self.values = None
317 self.quant_values = None
318 self.compressed_values = None
Tim Hallf7e810a2020-06-25 15:04:31 +0100319 self.compressed_values_substream_offsets = None
Tim Hall79d07d22020-04-27 18:20:16 +0100320 self.mem_area = MemArea.Unknown
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200321 self.mem_type = MemType.Unknown
Tim Hall79d07d22020-04-27 18:20:16 +0100322 self.format = TensorFormat.Unknown
323 self.purpose = TensorPurpose.Unknown
324 self.sub_purpose = TensorSubPurpose.Standard
325 self.alignment = Tensor.AllocationQuantum
326 self.weight_transpose_depthwise = False
327
328 self.storage_compression_scale = 1.0
329 self.bandwidth_compression_scale = 1.0
330 self.compression_scale_for_worst_weight_stream = 1.0
331 self.weight_compression_scales = None
332 self.weight_compression_config = None
333 self.weight_compressed_offsets = []
334 self.storage_rounding_quantum = (1, 1, 1, 1)
335 self.brick_size = (1, 1, 1, 1)
Charles Xu04ce34c2020-06-23 12:42:28 +0200336 self.address = None # start address of tensor. will be filled in by tensor allocator
Tim Hall79d07d22020-04-27 18:20:16 +0100337 self.element_size_bytes = 0
338
339 # quantization parameters
340 self.quantization = None
Tim Hall79d07d22020-04-27 18:20:16 +0100341 self.block_traversal = TensorBlockTraversal.Default
Dwight Lidmana9390f72020-05-13 12:00:08 +0200342 self.resampling_mode = resampling_mode.NONE
Tim Hall79d07d22020-04-27 18:20:16 +0100343
Patrik Gustavsson458a2082020-08-13 13:41:05 +0200344 self.avoid_NHCWB16 = False
345
Tim Hall79d07d22020-04-27 18:20:16 +0100346 def element_size(self):
347 if self.element_size_bytes == 0:
348 return self.dtype.size_in_bits() / 8
349 return self.element_size_bytes
350
351 def clone(self, suffix="_clone"):
352 res = Tensor(self.shape, self.dtype, self.name + suffix)
353 res.storage_shape = list(self.storage_shape)
354 res.bandwidth_shape = list(self.bandwidth_shape)
355
356 res.ops = []
357 res.consumer_list = []
358 res.equivalence_id = self.equivalence_id
359
360 res.values = self.values
361 res.quant_values = self.quant_values
Tim Hall79d07d22020-04-27 18:20:16 +0100362 res.mem_area = self.mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200363 res.mem_type = self.mem_type
Tim Hall79d07d22020-04-27 18:20:16 +0100364 res.format = self.format
365 res.purpose = self.purpose
366 res.sub_purpose = self.sub_purpose
367 res.alignment = self.alignment
Tim Hall79d07d22020-04-27 18:20:16 +0100368 res.bandwidth_compression_scale = self.bandwidth_compression_scale
Tim Hall79d07d22020-04-27 18:20:16 +0100369 res.storage_rounding_quantum = self.storage_rounding_quantum
Charles Xu04ce34c2020-06-23 12:42:28 +0200370 res.address = None
Tim Hall79d07d22020-04-27 18:20:16 +0100371
372 if self.quantization is not None:
373 res.quantization = self.quantization.clone()
374 else:
375 res.quantization = None
376
Dwight Lidmana9390f72020-05-13 12:00:08 +0200377 res.resampling_mode = self.resampling_mode
378
Louis Verhaard3c07c972020-05-07 08:12:58 +0200379 res.copy_compressed_weight_info(self)
Patrik Gustavsson458a2082020-08-13 13:41:05 +0200380 res.avoid_NHCWB16 = self.avoid_NHCWB16
Tim Hall79d07d22020-04-27 18:20:16 +0100381 return res
382
383 def clone_into_fast_storage(self, arch):
384 res = self.clone(suffix="_fast_storage")
385 res.mem_area = arch.fast_storage_mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200386 res.mem_type = MemType.Scratch_fast
Tim Hall79d07d22020-04-27 18:20:16 +0100387 return res
388
Louis Verhaard3c07c972020-05-07 08:12:58 +0200389 def copy_compressed_weight_info(self, src_tens):
390 # Copies compressed values + all related weight compression info from the given tensor
391 self.compressed_values = src_tens.compressed_values
Tim Hallf7e810a2020-06-25 15:04:31 +0100392 self.compressed_values_substream_offsets = src_tens.compressed_values_substream_offsets
Louis Verhaard3c07c972020-05-07 08:12:58 +0200393 self.storage_shape = src_tens.storage_shape
394 self.brick_size = src_tens.brick_size
395 self.weight_compression_scales = src_tens.weight_compression_scales
396 self.weight_compressed_offsets = src_tens.weight_compressed_offsets
397 self.weight_transpose_depthwise = src_tens.weight_transpose_depthwise
398 self.compression_scale_for_worst_weight_stream = src_tens.compression_scale_for_worst_weight_stream
399 self.storage_compression_scale = src_tens.storage_compression_scale
400 self.block_traversal = src_tens.block_traversal
401 self.weight_compression_config = src_tens.weight_compression_config
402
Tim Hall79d07d22020-04-27 18:20:16 +0100403 def set_format(self, fmt, arch):
404 self.format = fmt
405 shape_len = 0
406 try:
407 shape_len = len(self.shape)
408 except TypeError:
409 pass
410
411 self.storage_rounding_quantum = arch.storage_rounding_quantums[self.format]
412 self.storage_rounding_quantum = self.storage_rounding_quantum[-shape_len:]
Tim Hall79d07d22020-04-27 18:20:16 +0100413 self.brick_size = arch.brick_sizes[self.format]
414 self.brick_size = self.brick_size[-shape_len:]
415 if self.shape is None:
416 return
417
418 self.bandwidth_shape = shape_round_to_quantum(self.shape, self.brick_size)
419 self.storage_shape = shape_round_to_quantum(self.shape, self.storage_rounding_quantum)
420
421 if fmt == TensorFormat.WeightsCompressed:
422 compression_ratio = 5 / 8
423 self.storage_compression_scale = compression_ratio
424 self.bandwidth_compression_scale = compression_ratio
425 self.compression_scale_for_worst_weight_stream = compression_ratio
426
427 def storage_elements(self):
428 elems = shape_num_elements(self.storage_shape)
429 if elems is None:
430 return 0
431 return elems
432
433 def elements(self):
434 elems = shape_num_elements(self.shape)
435 if elems is None:
436 return 0
437 return elems
438
439 def has_fully_defined_shape(self):
440 return shape_fully_defined(self.shape)
441
Patrik Gustavsson90831bc2020-08-24 16:26:11 +0200442 def storage_size(self, scale=1.0):
443 raw_size = self.storage_elements() * self.element_size() * scale
Tim Hall79d07d22020-04-27 18:20:16 +0100444 if raw_size == 0:
445 raw_size = 1 # force it to take up space
446 rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment)
447 return rounded_size
448
Patrik Gustavsson90831bc2020-08-24 16:26:11 +0200449 def storage_size_for_sub_purpose(self, arch, sub_purpose, param_a=None, param_b=None):
Tim Hall79d07d22020-04-27 18:20:16 +0100450 alt_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b)
451 elems = shape_num_elements(alt_shape)
452 if elems is None:
453 return 0
454 if sub_purpose == TensorSubPurpose.DoubleBuffer:
Patrik Gustavsson90831bc2020-08-24 16:26:11 +0200455 raw_size = (
456 elems
457 * self.element_size()
458 * self.compression_scale_for_worst_weight_stream
459 * arch.weight_estimation_scaling
460 )
Tim Hall79d07d22020-04-27 18:20:16 +0100461 else:
Patrik Gustavsson9baa4c32020-08-20 13:59:01 +0200462 # Rolling buffers are used for intermediate data in ifm streaming
463 # These will all use the NHCWB16 format, and need to be aligned to 16 in the C-dimension
464 if alt_shape[-1] % 16 != 0:
465 nhcwb16_shape = alt_shape[0:-1] + [numeric_util.round_up(alt_shape[-1], 16)]
466 elems = shape_num_elements(nhcwb16_shape)
467
Tim Hall79d07d22020-04-27 18:20:16 +0100468 raw_size = elems * self.element_size() * self.storage_compression_scale
469 rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment)
470 return rounded_size
471
472 def storage_shape_for_sub_purpose(self, sub_purpose, param_a, param_b):
Tim Hall79d07d22020-04-27 18:20:16 +0100473 if sub_purpose == TensorSubPurpose.DoubleBuffer:
Jacob Bohline843d332020-06-23 12:12:56 +0200474 shp = list(self.shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100475 assert len(shp) >= 2
476 shp[-1] = min(shp[-1], param_a * 2)
Tim Hall79d07d22020-04-27 18:20:16 +0100477 else:
Jacob Bohline843d332020-06-23 12:12:56 +0200478 shp = list(self.storage_shape)
479 if sub_purpose == TensorSubPurpose.RollingBufferX:
480 assert len(shp) == 4
481 shp[0] = 1
482 shp[2] = min(shp[2], param_a)
483 elif sub_purpose == TensorSubPurpose.RollingBufferY:
484 assert len(shp) == 4
485 shp[0] = 1
486 shp[1] = min(shp[1], param_a)
487 elif sub_purpose == TensorSubPurpose.RollingBufferXY:
488 assert len(shp) == 4
489 shp[0] = 1
490 shp[2] = min(shp[2], param_a)
491 shp[1] = min(shp[1], param_b)
492 elif sub_purpose == TensorSubPurpose.Standard:
493 pass
494 else:
495 assert 0, "did not expect new sub purpose %s" % (sub_purpose,)
496
Tim Hall79d07d22020-04-27 18:20:16 +0100497 return shp
498
499 def set_new_sub_purpose(self, sub_purpose, param_a=None, param_b=None):
500 self.storage_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b)
501 self.sub_purpose = sub_purpose
502 if sub_purpose == TensorSubPurpose.DoubleBuffer:
503 self.storage_compression_scale = self.compression_scale_for_worst_weight_stream
504
505 def bandwidth(self):
506 elems = shape_num_elements(self.bandwidth_shape)
507 if elems is None:
508 return 0
509 return elems * self.element_size() * self.bandwidth_compression_scale
510
511 def consumers(self):
512 return self.consumer_list
513
514 def get_address_ranges_for_coordinates(self, start_coord, end_coord):
515 if self.sub_purpose in set(
516 (TensorSubPurpose.RollingBufferX, TensorSubPurpose.RollingBufferY, TensorSubPurpose.RollingBufferXY)
517 ):
518 # build dummy coordinates that cover the entire buffer
519 start_coord = [0] * len(start_coord)
520 end_coord = [min(self.storage_shape[i], self.shape[i]) for i in range(len(end_coord))]
521
522 start = self.address_for_coordinate(start_coord, is_top_box=False)
523 end = self.address_for_coordinate(end_coord, is_top_box=True)
524 return MemoryRangeSet(self.mem_area, start, end)
525
526 def addresses_for_rolling_buffer(self, start_coord, end_coord):
527 # returns ( box_height0, box_height1, box_width, [address_tl, address_tr, address_bl, address_br] )
528
529 if len(start_coord) < 4:
530 box_height0 = 1
531 box_width = 1
532
533 if len(start_coord) >= 2:
534 box_width = end_coord[-2] - start_coord[-2]
535
536 return box_height0, box_height0, box_width, [self.address_for_coordinate(start_coord), None, None, None]
537
538 crossing_y = numeric_util.round_up(start_coord[1] + 1, self.storage_shape[1])
539 crossing_x = numeric_util.round_up(start_coord[2] + 1, self.storage_shape[2])
540
541 crossing_y = min(crossing_y, end_coord[1])
542 crossing_x = min(crossing_x, end_coord[2])
543
544 box_height0 = crossing_y - start_coord[1]
545 box_width = crossing_x - start_coord[2]
546
547 addresses = [None] * 4
548 addresses[0] = self.address_for_coordinate(start_coord)
549
550 if end_coord[2] > crossing_x:
551 addresses[1] = self.address_for_coordinate([start_coord[0], start_coord[1], crossing_x, start_coord[3]])
552 raise Exception("Striping in vertical direction is not supported")
553 if end_coord[1] > crossing_y:
554 addresses[2] = self.address_for_coordinate([start_coord[0], crossing_y, start_coord[2], start_coord[3]])
555 if end_coord[1] > crossing_y and end_coord[2] > crossing_x:
556 addresses[3] = self.address_for_coordinate([start_coord[0], crossing_y, crossing_x, start_coord[3]])
557
558 return box_height0, box_height0, box_width, addresses
559
560 def address_for_coordinate(self, coord, is_top_box=False):
561 return self.address + self.address_offset_for_coordinate(coord, is_top_box)
562
563 def get_strides_and_coord(self, coord=None):
564 if coord is None:
565 coord = [0] * len(self.storage_shape)
566
567 augmented_coord = coord
568 augmented_shape = self.storage_shape
569 while len(augmented_shape) < 4:
570 augmented_shape = [1] + augmented_shape
571
572 while len(augmented_coord) < 4:
573 augmented_coord = [0] + augmented_coord
574
575 assert len(augmented_coord) == len(augmented_shape)
576
577 if self.format == TensorFormat.NHWC:
578 augmented_shape = [augmented_shape[0], augmented_shape[3]] + augmented_shape[1:3] + [1]
579 augmented_coord = [augmented_coord[0], augmented_coord[3]] + augmented_coord[1:3] + [0]
580 stride_order = [4, 1, 3, 2, 0]
581
582 elif self.format == TensorFormat.NHCWB16:
Patrik Gustavsson2213e902020-05-05 17:49:35 +0200583 channel_divisor = 16
Tim Hall79d07d22020-04-27 18:20:16 +0100584 augmented_shape = augmented_shape[0:4] + [1]
585 augmented_coord = (
586 [augmented_coord[0], augmented_coord[3] // channel_divisor]
587 + augmented_coord[1:3]
588 + [augmented_coord[3] % channel_divisor]
589 )
590
591 if augmented_shape[1] == 0:
592 augmented_shape[1] = 1
593
594 else:
595 assert self.format in set((TensorFormat.Unknown, TensorFormat.WeightsCompressed))
596 return None, None
597
598 strides = [0] * len(augmented_shape)
599 stride = self.element_size() * self.storage_compression_scale
600
601 if self.format != TensorFormat.NHCWB16:
602 for i in stride_order:
603 strides[i] = stride
604 stride *= augmented_shape[i]
605 else:
606 assert len(strides) == 5
Tim Hall79d07d22020-04-27 18:20:16 +0100607 strides[4] = stride
Patrik Gustavsson2213e902020-05-05 17:49:35 +0200608 strides[3] = 16 * stride # STRIDE_X
Tim Hall79d07d22020-04-27 18:20:16 +0100609 strides[1] = strides[3] * augmented_shape[2] # STRIDE_C
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200610 strides[2] = augmented_shape[2] * augmented_shape[3] * stride # STRIDE_Y
Tim Hall79d07d22020-04-27 18:20:16 +0100611 strides[0] = strides[2] * augmented_shape[1] # STRIDE_N
612
613 return strides, augmented_coord
614
615 def get_strides(self):
616 strides, _ = self.get_strides_and_coord()
617
618 return strides
619
Louis Verhaard3c07c972020-05-07 08:12:58 +0200620 def needs_dma(self):
621 return len(self.ops) == 1 and self.ops[0].type == "DMA"
622
623 def get_dma_src_tensor(self):
624 # For weight tensors that need DMA: returns the source tensor in Flash, else None
625 # Note: for DMA ops, Pass.weight_tensor is referring to the SRAM weight tensor
626 return self.ops[0].inputs[0] if self.needs_dma() else None
627
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200628 def find_npu_op(self):
629 # Returns the NPU operator that uses this tensor, excluding DMA operators.
630 for op in self.consumers():
631 if op.type == "DMA":
632 return op.outputs[0].find_npu_op()
Dwight Lidman940fdee2020-08-13 13:11:48 +0200633 if op.run_on_npu:
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200634 return op
635 return None
636
Tim Hall79d07d22020-04-27 18:20:16 +0100637 def compressed_stream_index_from_coord(self, coord):
638 assert self.format == TensorFormat.WeightsCompressed
639 assert len(self.compressed_values) > 0
640 assert len(self.compressed_values) + 1 == len(self.weight_compressed_offsets)
641
642 depth = coord[-1]
643 brick_depth = self.brick_size[-1]
644 # Clamp position at final element index
645 if depth > self.shape[-1]:
646 depth = self.shape[-1]
647
648 # Always round up to next boundary
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100649 index = numeric_util.round_up_divide(depth, brick_depth)
Tim Hall79d07d22020-04-27 18:20:16 +0100650
651 # Check boundaries on all but last weight set (which may be shorter
652 # than the brick we divided it up into)
653 if index < len(self.weight_compressed_offsets) - 1:
654 # There are no half-way points in the weights
655 if (depth % brick_depth) != 0:
656 raise Exception("Offset into weights must be aligned to a brick")
657
658 return index
659
660 def size_of_compressed_stream(self, index):
661 assert 0 <= index < len(self.compressed_values)
662 return len(self.compressed_values[index])
663
664 def is_last_index_in_compressed_stream(self, index):
665 assert 0 <= index < len(self.compressed_values)
666 return index == len(self.compressed_values) - 1
667
668 def address_offset_for_coordinate(self, orig_coord, is_top_box=False):
669 address_offset = 0
670 coord = orig_coord
671
672 coord = coord[-len(self.storage_shape) :]
673
674 if self.sub_purpose == TensorSubPurpose.Standard:
675 for idx, c in enumerate(coord):
676 if is_top_box:
677 assert c > 0 and c <= self.shape[idx]
678 else:
679 assert c >= 0 and c < self.shape[idx]
680
681 if self.format == TensorFormat.WeightsCompressed:
682 if len(self.weight_compressed_offsets) == 0:
683 return 0
684
Louis Verhaard3c07c972020-05-07 08:12:58 +0200685 if self.needs_dma() and self.sub_purpose == TensorSubPurpose.DoubleBuffer:
Tim Hall79d07d22020-04-27 18:20:16 +0100686 depth = orig_coord[-1]
687 brick_depth = self.brick_size[-1]
688 # Clamp position at final element index
689 if depth > self.shape[-1]:
690 depth = self.shape[-1]
691
692 # Always round up to next boundary
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100693 index = numeric_util.round_up_divide(depth, brick_depth)
Tim Hall79d07d22020-04-27 18:20:16 +0100694 index = index % 2
695
696 if len(self.compressed_values) <= 2:
697 if is_top_box and index == 0:
698 for cv in self.compressed_values:
699 address_offset += len(cv)
700 else:
701 address_offset = index * len(self.compressed_values[0])
702 else:
703 if is_top_box and index == 0:
704 address_offset = self.storage_shape[-1]
705 else:
706 address_offset = index * (self.storage_shape[-1] // 2)
707 else:
708 index = self.compressed_stream_index_from_coord(orig_coord)
709 assert index < len(self.weight_compressed_offsets)
710 address_offset = self.weight_compressed_offsets[index]
711 else:
712 if is_top_box:
713 coord = [c - 1 for c in coord]
714
715 # handle wraparound for partial buffers. make sure to do this after subtracting top box:
716 coord = [c % self.storage_shape[idx] for idx, c in enumerate(coord)]
717
718 strides, augmented_coord = self.get_strides_and_coord(coord)
719 if strides is None:
720 return None
721
722 if is_top_box:
723 address_offset += 1 * strides[-1] # one element
724
725 address_offset += np.dot(augmented_coord, strides)
726
727 assert address_offset >= 0
728 assert address_offset <= self.storage_size()
729 return address_offset
730
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200731 def is_allocated_in_tensor_arena(self, scratch_tensor_mem_area):
732 if self.mem_area == scratch_tensor_mem_area and (self.mem_type in set((MemType.Scratch, MemType.Scratch_fast))):
733 return True
734 return False
735
Louis Verhaardb9fc33c2020-08-13 11:47:36 +0200736 def is_scaling_equal(self, tens):
737 return self.quantization.is_scaling_equal(tens.quantization)
738
Louis Verhaard0b8268a2020-08-05 16:11:29 +0200739 def equivalent(self, tens):
740 return self.equivalence_id == tens.equivalence_id
741
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100742 def set_all_shapes(self, shape):
743 self.shape = shape
744 self.storage_shape = shape
745 self.bandwidth_shape = shape
746
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100747 def get_full_shape(self):
748 d = len(self.shape)
749 if d in (1, 3):
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100750 return numeric_util.full_shape(4, self.shape, 1)
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100751 elif d == 2:
752 return [self.shape[0], 1, 1, self.shape[1]]
753 else:
754 return self.shape
755
Tim Hall79d07d22020-04-27 18:20:16 +0100756 def __str__(self):
757 return "<nng.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.shape, self.dtype)
758
759 __repr__ = __str__