blob: 35749709489682ded662081ee3aff9f61ba0c81e [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Internal representation of a Neural Network Tensor.
Tim Hall79d07d22020-04-27 18:20:16 +010018import enum
Tim Hall79d07d22020-04-27 18:20:16 +010019import uuid
Diego Russoea6111a2020-04-14 18:41:58 +010020
21import numpy as np
22
23from . import numeric_util
Michael McGeagh5778ffd2020-08-06 17:31:02 +010024from .data_type import DataType
Dwight Lidmana9390f72020-05-13 12:00:08 +020025from .ethos_u55_regs.ethos_u55_regs import resampling_mode
Michael McGeagh5778ffd2020-08-06 17:31:02 +010026from .operation import Operation
Diego Russoe8a10452020-04-21 17:39:10 +010027from .range_set import MemoryRangeSet
Tim Hall79d07d22020-04-27 18:20:16 +010028
29
Patrik Gustavssoneca2e952020-05-27 09:15:11 +020030class MemType(enum.IntFlag):
31 Unknown = 0
32 Permanent_NPU = 1
33 Permanent_CPU = 2
34 Scratch = 3
35 Scratch_fast = 4
36 Size = Scratch_fast + 1
37
38 def display_name(self):
39 return ("Unknown", "Permanent_NPU", "Permanent_CPU", "Scratch", "Scratch_fast", "Size")[self.value]
40
41 def identifier_name(self):
42 return ("unknown", "permanent_npu", "permanent_cpu", "scratch", "scratch_fast", "size")[self.value]
43
44 def all():
45 return (MemType.Permanent_NPU, MemType.Permanent_CPU, MemType.Scratch, MemType.Scratch_fast)
46
47 def __str__(self):
48 return self.name
49
50
Tim Hall79d07d22020-04-27 18:20:16 +010051class MemArea(enum.IntFlag):
52 Unknown = 0
53 Sram = 1
54 Dram = 2
55 OnChipFlash = 3
56 OffChipFlash = 4
57 Size = OffChipFlash + 1
58
59 def display_name(self):
60 return ("Unknown", "SRAM", "DRAM", "On-chip Flash", "Off-chip Flash", "Size")[self.value]
61
62 def identifier_name(self):
63 return ("unknown", "sram", "dram", "on_chip_flash", "off_chip_flash", "size")[self.value]
64
65 def all():
66 return (MemArea.Sram, MemArea.Dram, MemArea.OnChipFlash, MemArea.OffChipFlash)
67
68 def __str__(self):
69 return self.name
70
71
72class TensorPurpose(enum.IntFlag):
73 Unknown = 0
74 Weights = 1
75 FeatureMap = 2
76 Scratch = 3
Fredrik Svedberga0c36242020-06-03 15:43:31 +020077 LUT = 4
78 Size = 5
Tim Hall79d07d22020-04-27 18:20:16 +010079
80 def display_name(self):
Fredrik Svedberga0c36242020-06-03 15:43:31 +020081 return ("Unknown", "Weights", "FeatureMap", "Scratch", "LUT", "Size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010082
83 def identifier_name(self):
Fredrik Svedberga0c36242020-06-03 15:43:31 +020084 return ("unknown", "weights", "feature_map", "scratch", "lut", "size")[self.value]
Tim Hall79d07d22020-04-27 18:20:16 +010085
86 def all():
87 return (TensorPurpose.Weights, TensorPurpose.FeatureMap)
88
89
90class TensorSubPurpose(enum.Enum):
91 Standard = 0
92 DoubleBuffer = 1
93 RollingBufferX = 2
94 RollingBufferY = 3
95 RollingBufferXY = 4
96
97 def display_name(self):
98 return ("Standard", "Double Buffer", "Rolling Buffer X", "Rolling Buffer Y", "Rolling Buffer XY")[self.value]
99
100 def identifier_name(self):
101 return ("standard", "double_buffer", "rolling_buffer_x", "rolling_buffer_y", "rolling_buffer_xy")[self.value]
102
103 def all():
104 return (
105 TensorSubPurpose.Standard,
106 TensorSubPurpose.DoubleBuffer,
107 TensorSubPurpose.RollingBufferX,
108 TensorSubPurpose.RollingBufferY,
109 TensorSubPurpose.RollingBufferXY,
110 )
111
112
113class TensorFormat(enum.Flag):
114 Unknown = 0
115 WeightsCompressed = 1
116 NHWC = 2
117 NHCWB16 = 3
118
119 def __str__(self):
120 return self.name
121
122
123class TensorBlockTraversal(enum.Enum):
124 Default = 0
125 DepthWise = 1
126 DepthFirst = 2
127 PartKernelFirst = 3
128
129
130def shape_num_elements(shp):
131 elems = 1
132 if shp is None:
133 return None
134 for d in shp:
135 if d is None:
136 return None
137 elems *= d
138 return elems
139
140
141def shape_fully_defined(shp):
142 if shp is None:
143 return False
144 for d in shp:
145 if d is None:
146 return False
147 return True
148
149
150def shape_round_to_quantum(shp, quantum):
151 new_shp = list(shp)
152
153 # Traverse backwards using length of shape since there may be more rounding quantums than shape elements
154 for i in range(-1, -len(shp) - 1, -1):
155 if new_shp[i] is not None:
156 new_shp[i] = numeric_util.round_up(new_shp[i], quantum[i])
157 return new_shp
158
159
160class QuantizationParameters:
161 __slots__ = "min", "max", "num_bits", "narrow_range", "scale_f32", "zero_point", "quant_min", "quant_max"
162
163 def __init__(self, min=None, max=None, num_bits=None, narrow_range=None):
164 self.min = min
165 self.max = max
166
167 self.num_bits = num_bits
168 self.narrow_range = narrow_range
169
170 self.scale_f32 = None
171 self.zero_point = None
172 self.quant_min = None
173 self.quant_max = None
174
175 def __str__(self):
176 return "<nng.QuantizationParameters min=%s max=%s, num_bits=%s, scale=%s, zero_point=%s>" % (
177 self.min,
178 self.max,
179 self.num_bits,
180 self.scale_f32,
181 self.zero_point,
182 )
183
184 __repr__ = __str__
185
Dwight Lidmanebe26c72020-06-09 11:40:54 +0200186 def __eq__(self, other):
187 if other is None:
188 return False
189 if not isinstance(other, QuantizationParameters):
190 return False
191
192 pairs = ((getattr(self, s), getattr(other, s)) for s in QuantizationParameters.__slots__)
193
194 return all(np.array_equal(a, b) for a, b in pairs)
195
196 def __ne__(self, other):
197 return not self == other
198
Tim Hall79d07d22020-04-27 18:20:16 +0100199 def clone(self):
200 res = QuantizationParameters()
201 res.min = self.min
202 res.max = self.max
203
204 res.num_bits = self.num_bits
205 res.narrow_range = self.narrow_range
206
207 res.scale_f32 = self.scale_f32
208 res.zero_point = self.zero_point
209 res.quant_min = self.quant_min
210 res.quant_max = self.quant_max
211 return res
212
213 def dequantize(self, values):
214 if self.zero_point.size == 1 and self.scale_f32.size == 1:
215 # same scale is used for all values
216 res = (values.astype(np.float64) - self.zero_point) * self.scale_f32
217 else:
218 # a different scale is used for different sets of values
219 values_as_float = values.astype(np.float64)
220
221 # this is not compatible with the format of depthwise weights,
222 # where input is at index 3 (Output, Kh, Kw, Input)
223 # return the quantized values
224 return np.ndarray((values_as_float.shape))
225
226 shape = values_as_float.shape[0]
227 assert self.zero_point.size == self.scale_f32.size == shape
228 res = np.ndarray(values_as_float.shape)
229 for i in range(shape):
230 res[i] = (values_as_float[i] - self.zero_point[i]) * self.scale_f32[i]
231
232 return res
233
234
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100235def create_const_tensor(name, shape, dtype, values, value_dtype=None, purpose=TensorPurpose.Unknown, quantization=None):
236 # Tensor
237 const_tensor = Tensor(shape, dtype, name + "_0")
238 const_tensor.purpose = purpose
239 const_tensor.quantization = quantization
240 const_tensor.values = np.array(values, dtype=value_dtype)
241 const_tensor.quant_values = np.frombuffer(const_tensor.values.tobytes(), dtype=np.uint8)
242 # Operator
243 const_op = Operation("Const", name)
244 const_op.set_output_tensor(const_tensor)
245 return const_tensor
246
247
248def create_reshape_tensor(tens, shape, ifm_reshape=True):
249 if shape == tens.shape:
250 return tens
251 # Tensors
252 name = tens.name + "_reshape"
253 reshape_ifm = tens
254 reshape_ofm = tens.clone("_reshaped")
255 reshape_ofm.set_all_shapes(shape)
256 if not ifm_reshape:
257 reshape_ifm, reshape_ofm = reshape_ofm, reshape_ifm
258 # Operator
259 reshape_op = Operation("Reshape", name)
260 reshape_op.attrs["new_shape"] = shape
261 reshape_op.add_input_tensor(reshape_ifm)
262 reshape_op.add_input_tensor(create_const_tensor(name + "_shape", [1], DataType.int32, shape))
263 reshape_op.set_output_tensor(reshape_ofm)
264 return reshape_ofm if ifm_reshape else reshape_ifm
265
266
Tim Hall79d07d22020-04-27 18:20:16 +0100267class Tensor:
268 __slots__ = (
269 "shape",
270 "storage_shape",
271 "bandwidth_shape",
272 "dtype",
273 "name",
274 "ops",
275 "consumer_list",
276 "values",
277 "quant_values",
278 "compressed_values",
Tim Hallf7e810a2020-06-25 15:04:31 +0100279 "compressed_values_substream_offsets",
Tim Hall79d07d22020-04-27 18:20:16 +0100280 "mem_area",
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200281 "mem_type",
Tim Hall79d07d22020-04-27 18:20:16 +0100282 "format",
283 "purpose",
284 "sub_purpose",
285 "alignment",
286 "weight_transpose_depthwise",
287 "storage_compression_scale",
288 "bandwidth_compression_scale",
289 "compression_scale_for_worst_weight_stream",
290 "weight_compression_scales",
291 "weight_compression_config",
292 "storage_rounding_quantum",
293 "brick_size",
294 "address",
295 "quantization",
296 "weight_compressed_offsets",
297 "element_size_bytes",
Tim Hall79d07d22020-04-27 18:20:16 +0100298 "block_traversal",
Tim Hall79d07d22020-04-27 18:20:16 +0100299 "cpu_tensor",
300 "npu_tensor",
301 "equivalence_id",
Dwight Lidmana9390f72020-05-13 12:00:08 +0200302 "resampling_mode",
Tim Hall79d07d22020-04-27 18:20:16 +0100303 )
304 AllocationQuantum = 16
305
306 def __init__(self, shape, dtype, name):
307 self.shape = shape
308 self.storage_shape = shape
309 self.bandwidth_shape = shape
310 self.dtype = dtype
311 self.name = name
312 self.equivalence_id = uuid.uuid4()
313
314 self.ops = []
315 self.consumer_list = []
316 # Below attributes are only set if a tensor has been cloned,
317 # either from Cpu -> Npu or vice versa. Needed for offline allocation
318 self.cpu_tensor = None # reference to the corresponding Cpu tensor
319 self.npu_tensor = None # reference to the corresponding Npu tensor
320
321 self.values = None
322 self.quant_values = None
323 self.compressed_values = None
Tim Hallf7e810a2020-06-25 15:04:31 +0100324 self.compressed_values_substream_offsets = None
Tim Hall79d07d22020-04-27 18:20:16 +0100325 self.mem_area = MemArea.Unknown
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200326 self.mem_type = MemType.Unknown
Tim Hall79d07d22020-04-27 18:20:16 +0100327 self.format = TensorFormat.Unknown
328 self.purpose = TensorPurpose.Unknown
329 self.sub_purpose = TensorSubPurpose.Standard
330 self.alignment = Tensor.AllocationQuantum
331 self.weight_transpose_depthwise = False
332
333 self.storage_compression_scale = 1.0
334 self.bandwidth_compression_scale = 1.0
335 self.compression_scale_for_worst_weight_stream = 1.0
336 self.weight_compression_scales = None
337 self.weight_compression_config = None
338 self.weight_compressed_offsets = []
339 self.storage_rounding_quantum = (1, 1, 1, 1)
340 self.brick_size = (1, 1, 1, 1)
Charles Xu04ce34c2020-06-23 12:42:28 +0200341 self.address = None # start address of tensor. will be filled in by tensor allocator
Tim Hall79d07d22020-04-27 18:20:16 +0100342 self.element_size_bytes = 0
343
344 # quantization parameters
345 self.quantization = None
Tim Hall79d07d22020-04-27 18:20:16 +0100346 self.block_traversal = TensorBlockTraversal.Default
Dwight Lidmana9390f72020-05-13 12:00:08 +0200347 self.resampling_mode = resampling_mode.NONE
Tim Hall79d07d22020-04-27 18:20:16 +0100348
349 def element_size(self):
350 if self.element_size_bytes == 0:
351 return self.dtype.size_in_bits() / 8
352 return self.element_size_bytes
353
354 def clone(self, suffix="_clone"):
355 res = Tensor(self.shape, self.dtype, self.name + suffix)
356 res.storage_shape = list(self.storage_shape)
357 res.bandwidth_shape = list(self.bandwidth_shape)
358
359 res.ops = []
360 res.consumer_list = []
361 res.equivalence_id = self.equivalence_id
362
363 res.values = self.values
364 res.quant_values = self.quant_values
Tim Hall79d07d22020-04-27 18:20:16 +0100365 res.mem_area = self.mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200366 res.mem_type = self.mem_type
Tim Hall79d07d22020-04-27 18:20:16 +0100367 res.format = self.format
368 res.purpose = self.purpose
369 res.sub_purpose = self.sub_purpose
370 res.alignment = self.alignment
Tim Hall79d07d22020-04-27 18:20:16 +0100371 res.bandwidth_compression_scale = self.bandwidth_compression_scale
Tim Hall79d07d22020-04-27 18:20:16 +0100372 res.storage_rounding_quantum = self.storage_rounding_quantum
Charles Xu04ce34c2020-06-23 12:42:28 +0200373 res.address = None
Tim Hall79d07d22020-04-27 18:20:16 +0100374
375 if self.quantization is not None:
376 res.quantization = self.quantization.clone()
377 else:
378 res.quantization = None
379
Dwight Lidmana9390f72020-05-13 12:00:08 +0200380 res.resampling_mode = self.resampling_mode
381
Louis Verhaard3c07c972020-05-07 08:12:58 +0200382 res.copy_compressed_weight_info(self)
Tim Hall79d07d22020-04-27 18:20:16 +0100383 return res
384
385 def clone_into_fast_storage(self, arch):
386 res = self.clone(suffix="_fast_storage")
387 res.mem_area = arch.fast_storage_mem_area
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200388 res.mem_type = MemType.Scratch_fast
Tim Hall79d07d22020-04-27 18:20:16 +0100389 return res
390
Louis Verhaard3c07c972020-05-07 08:12:58 +0200391 def copy_compressed_weight_info(self, src_tens):
392 # Copies compressed values + all related weight compression info from the given tensor
393 self.compressed_values = src_tens.compressed_values
Tim Hallf7e810a2020-06-25 15:04:31 +0100394 self.compressed_values_substream_offsets = src_tens.compressed_values_substream_offsets
Louis Verhaard3c07c972020-05-07 08:12:58 +0200395 self.storage_shape = src_tens.storage_shape
396 self.brick_size = src_tens.brick_size
397 self.weight_compression_scales = src_tens.weight_compression_scales
398 self.weight_compressed_offsets = src_tens.weight_compressed_offsets
399 self.weight_transpose_depthwise = src_tens.weight_transpose_depthwise
400 self.compression_scale_for_worst_weight_stream = src_tens.compression_scale_for_worst_weight_stream
401 self.storage_compression_scale = src_tens.storage_compression_scale
402 self.block_traversal = src_tens.block_traversal
403 self.weight_compression_config = src_tens.weight_compression_config
404
Tim Hall79d07d22020-04-27 18:20:16 +0100405 def set_format(self, fmt, arch):
406 self.format = fmt
407 shape_len = 0
408 try:
409 shape_len = len(self.shape)
410 except TypeError:
411 pass
412
413 self.storage_rounding_quantum = arch.storage_rounding_quantums[self.format]
414 self.storage_rounding_quantum = self.storage_rounding_quantum[-shape_len:]
Tim Hall79d07d22020-04-27 18:20:16 +0100415 self.brick_size = arch.brick_sizes[self.format]
416 self.brick_size = self.brick_size[-shape_len:]
417 if self.shape is None:
418 return
419
420 self.bandwidth_shape = shape_round_to_quantum(self.shape, self.brick_size)
421 self.storage_shape = shape_round_to_quantum(self.shape, self.storage_rounding_quantum)
422
423 if fmt == TensorFormat.WeightsCompressed:
424 compression_ratio = 5 / 8
425 self.storage_compression_scale = compression_ratio
426 self.bandwidth_compression_scale = compression_ratio
427 self.compression_scale_for_worst_weight_stream = compression_ratio
428
429 def storage_elements(self):
430 elems = shape_num_elements(self.storage_shape)
431 if elems is None:
432 return 0
433 return elems
434
435 def elements(self):
436 elems = shape_num_elements(self.shape)
437 if elems is None:
438 return 0
439 return elems
440
441 def has_fully_defined_shape(self):
442 return shape_fully_defined(self.shape)
443
444 def storage_size(self):
445 raw_size = self.storage_elements() * self.element_size()
446 if raw_size == 0:
447 raw_size = 1 # force it to take up space
448 rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment)
449 return rounded_size
450
451 def storage_size_for_sub_purpose(self, sub_purpose, param_a=None, param_b=None):
452 alt_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b)
453 elems = shape_num_elements(alt_shape)
454 if elems is None:
455 return 0
456 if sub_purpose == TensorSubPurpose.DoubleBuffer:
457 raw_size = elems * self.element_size() * self.compression_scale_for_worst_weight_stream
458 else:
459 raw_size = elems * self.element_size() * self.storage_compression_scale
460 rounded_size = numeric_util.round_up(numeric_util.round_up_to_int(raw_size), self.alignment)
461 return rounded_size
462
463 def storage_shape_for_sub_purpose(self, sub_purpose, param_a, param_b):
Tim Hall79d07d22020-04-27 18:20:16 +0100464 if sub_purpose == TensorSubPurpose.DoubleBuffer:
Jacob Bohline843d332020-06-23 12:12:56 +0200465 shp = list(self.shape)
Tim Hall79d07d22020-04-27 18:20:16 +0100466 assert len(shp) >= 2
467 shp[-1] = min(shp[-1], param_a * 2)
Tim Hall79d07d22020-04-27 18:20:16 +0100468 else:
Jacob Bohline843d332020-06-23 12:12:56 +0200469 shp = list(self.storage_shape)
470 if sub_purpose == TensorSubPurpose.RollingBufferX:
471 assert len(shp) == 4
472 shp[0] = 1
473 shp[2] = min(shp[2], param_a)
474 elif sub_purpose == TensorSubPurpose.RollingBufferY:
475 assert len(shp) == 4
476 shp[0] = 1
477 shp[1] = min(shp[1], param_a)
478 elif sub_purpose == TensorSubPurpose.RollingBufferXY:
479 assert len(shp) == 4
480 shp[0] = 1
481 shp[2] = min(shp[2], param_a)
482 shp[1] = min(shp[1], param_b)
483 elif sub_purpose == TensorSubPurpose.Standard:
484 pass
485 else:
486 assert 0, "did not expect new sub purpose %s" % (sub_purpose,)
487
Tim Hall79d07d22020-04-27 18:20:16 +0100488 return shp
489
490 def set_new_sub_purpose(self, sub_purpose, param_a=None, param_b=None):
491 self.storage_shape = self.storage_shape_for_sub_purpose(sub_purpose, param_a, param_b)
492 self.sub_purpose = sub_purpose
493 if sub_purpose == TensorSubPurpose.DoubleBuffer:
494 self.storage_compression_scale = self.compression_scale_for_worst_weight_stream
495
496 def bandwidth(self):
497 elems = shape_num_elements(self.bandwidth_shape)
498 if elems is None:
499 return 0
500 return elems * self.element_size() * self.bandwidth_compression_scale
501
502 def consumers(self):
503 return self.consumer_list
504
505 def get_address_ranges_for_coordinates(self, start_coord, end_coord):
506 if self.sub_purpose in set(
507 (TensorSubPurpose.RollingBufferX, TensorSubPurpose.RollingBufferY, TensorSubPurpose.RollingBufferXY)
508 ):
509 # build dummy coordinates that cover the entire buffer
510 start_coord = [0] * len(start_coord)
511 end_coord = [min(self.storage_shape[i], self.shape[i]) for i in range(len(end_coord))]
512
513 start = self.address_for_coordinate(start_coord, is_top_box=False)
514 end = self.address_for_coordinate(end_coord, is_top_box=True)
515 return MemoryRangeSet(self.mem_area, start, end)
516
517 def addresses_for_rolling_buffer(self, start_coord, end_coord):
518 # returns ( box_height0, box_height1, box_width, [address_tl, address_tr, address_bl, address_br] )
519
520 if len(start_coord) < 4:
521 box_height0 = 1
522 box_width = 1
523
524 if len(start_coord) >= 2:
525 box_width = end_coord[-2] - start_coord[-2]
526
527 return box_height0, box_height0, box_width, [self.address_for_coordinate(start_coord), None, None, None]
528
529 crossing_y = numeric_util.round_up(start_coord[1] + 1, self.storage_shape[1])
530 crossing_x = numeric_util.round_up(start_coord[2] + 1, self.storage_shape[2])
531
532 crossing_y = min(crossing_y, end_coord[1])
533 crossing_x = min(crossing_x, end_coord[2])
534
535 box_height0 = crossing_y - start_coord[1]
536 box_width = crossing_x - start_coord[2]
537
538 addresses = [None] * 4
539 addresses[0] = self.address_for_coordinate(start_coord)
540
541 if end_coord[2] > crossing_x:
542 addresses[1] = self.address_for_coordinate([start_coord[0], start_coord[1], crossing_x, start_coord[3]])
543 raise Exception("Striping in vertical direction is not supported")
544 if end_coord[1] > crossing_y:
545 addresses[2] = self.address_for_coordinate([start_coord[0], crossing_y, start_coord[2], start_coord[3]])
546 if end_coord[1] > crossing_y and end_coord[2] > crossing_x:
547 addresses[3] = self.address_for_coordinate([start_coord[0], crossing_y, crossing_x, start_coord[3]])
548
549 return box_height0, box_height0, box_width, addresses
550
551 def address_for_coordinate(self, coord, is_top_box=False):
552 return self.address + self.address_offset_for_coordinate(coord, is_top_box)
553
554 def get_strides_and_coord(self, coord=None):
555 if coord is None:
556 coord = [0] * len(self.storage_shape)
557
558 augmented_coord = coord
559 augmented_shape = self.storage_shape
560 while len(augmented_shape) < 4:
561 augmented_shape = [1] + augmented_shape
562
563 while len(augmented_coord) < 4:
564 augmented_coord = [0] + augmented_coord
565
566 assert len(augmented_coord) == len(augmented_shape)
567
568 if self.format == TensorFormat.NHWC:
569 augmented_shape = [augmented_shape[0], augmented_shape[3]] + augmented_shape[1:3] + [1]
570 augmented_coord = [augmented_coord[0], augmented_coord[3]] + augmented_coord[1:3] + [0]
571 stride_order = [4, 1, 3, 2, 0]
572
573 elif self.format == TensorFormat.NHCWB16:
Patrik Gustavsson2213e902020-05-05 17:49:35 +0200574 channel_divisor = 16
Tim Hall79d07d22020-04-27 18:20:16 +0100575 augmented_shape = augmented_shape[0:4] + [1]
576 augmented_coord = (
577 [augmented_coord[0], augmented_coord[3] // channel_divisor]
578 + augmented_coord[1:3]
579 + [augmented_coord[3] % channel_divisor]
580 )
581
582 if augmented_shape[1] == 0:
583 augmented_shape[1] = 1
584
585 else:
586 assert self.format in set((TensorFormat.Unknown, TensorFormat.WeightsCompressed))
587 return None, None
588
589 strides = [0] * len(augmented_shape)
590 stride = self.element_size() * self.storage_compression_scale
591
592 if self.format != TensorFormat.NHCWB16:
593 for i in stride_order:
594 strides[i] = stride
595 stride *= augmented_shape[i]
596 else:
597 assert len(strides) == 5
Tim Hall79d07d22020-04-27 18:20:16 +0100598 strides[4] = stride
Patrik Gustavsson2213e902020-05-05 17:49:35 +0200599 strides[3] = 16 * stride # STRIDE_X
Tim Hall79d07d22020-04-27 18:20:16 +0100600 strides[1] = strides[3] * augmented_shape[2] # STRIDE_C
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200601 strides[2] = augmented_shape[2] * augmented_shape[3] * stride # STRIDE_Y
Tim Hall79d07d22020-04-27 18:20:16 +0100602 strides[0] = strides[2] * augmented_shape[1] # STRIDE_N
603
604 return strides, augmented_coord
605
606 def get_strides(self):
607 strides, _ = self.get_strides_and_coord()
608
609 return strides
610
Louis Verhaard3c07c972020-05-07 08:12:58 +0200611 def needs_dma(self):
612 return len(self.ops) == 1 and self.ops[0].type == "DMA"
613
614 def get_dma_src_tensor(self):
615 # For weight tensors that need DMA: returns the source tensor in Flash, else None
616 # Note: for DMA ops, Pass.weight_tensor is referring to the SRAM weight tensor
617 return self.ops[0].inputs[0] if self.needs_dma() else None
618
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200619 def find_npu_op(self):
620 # Returns the NPU operator that uses this tensor, excluding DMA operators.
621 for op in self.consumers():
622 if op.type == "DMA":
623 return op.outputs[0].find_npu_op()
624 if "npu_block_type" in op.attrs:
625 return op
626 return None
627
Tim Hall79d07d22020-04-27 18:20:16 +0100628 def compressed_stream_index_from_coord(self, coord):
629 assert self.format == TensorFormat.WeightsCompressed
630 assert len(self.compressed_values) > 0
631 assert len(self.compressed_values) + 1 == len(self.weight_compressed_offsets)
632
633 depth = coord[-1]
634 brick_depth = self.brick_size[-1]
635 # Clamp position at final element index
636 if depth > self.shape[-1]:
637 depth = self.shape[-1]
638
639 # Always round up to next boundary
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100640 index = numeric_util.round_up_divide(depth, brick_depth)
Tim Hall79d07d22020-04-27 18:20:16 +0100641
642 # Check boundaries on all but last weight set (which may be shorter
643 # than the brick we divided it up into)
644 if index < len(self.weight_compressed_offsets) - 1:
645 # There are no half-way points in the weights
646 if (depth % brick_depth) != 0:
647 raise Exception("Offset into weights must be aligned to a brick")
648
649 return index
650
651 def size_of_compressed_stream(self, index):
652 assert 0 <= index < len(self.compressed_values)
653 return len(self.compressed_values[index])
654
655 def is_last_index_in_compressed_stream(self, index):
656 assert 0 <= index < len(self.compressed_values)
657 return index == len(self.compressed_values) - 1
658
659 def address_offset_for_coordinate(self, orig_coord, is_top_box=False):
660 address_offset = 0
661 coord = orig_coord
662
663 coord = coord[-len(self.storage_shape) :]
664
665 if self.sub_purpose == TensorSubPurpose.Standard:
666 for idx, c in enumerate(coord):
667 if is_top_box:
668 assert c > 0 and c <= self.shape[idx]
669 else:
670 assert c >= 0 and c < self.shape[idx]
671
672 if self.format == TensorFormat.WeightsCompressed:
673 if len(self.weight_compressed_offsets) == 0:
674 return 0
675
Louis Verhaard3c07c972020-05-07 08:12:58 +0200676 if self.needs_dma() and self.sub_purpose == TensorSubPurpose.DoubleBuffer:
Tim Hall79d07d22020-04-27 18:20:16 +0100677 depth = orig_coord[-1]
678 brick_depth = self.brick_size[-1]
679 # Clamp position at final element index
680 if depth > self.shape[-1]:
681 depth = self.shape[-1]
682
683 # Always round up to next boundary
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100684 index = numeric_util.round_up_divide(depth, brick_depth)
Tim Hall79d07d22020-04-27 18:20:16 +0100685 index = index % 2
686
687 if len(self.compressed_values) <= 2:
688 if is_top_box and index == 0:
689 for cv in self.compressed_values:
690 address_offset += len(cv)
691 else:
692 address_offset = index * len(self.compressed_values[0])
693 else:
694 if is_top_box and index == 0:
695 address_offset = self.storage_shape[-1]
696 else:
697 address_offset = index * (self.storage_shape[-1] // 2)
698 else:
699 index = self.compressed_stream_index_from_coord(orig_coord)
700 assert index < len(self.weight_compressed_offsets)
701 address_offset = self.weight_compressed_offsets[index]
702 else:
703 if is_top_box:
704 coord = [c - 1 for c in coord]
705
706 # handle wraparound for partial buffers. make sure to do this after subtracting top box:
707 coord = [c % self.storage_shape[idx] for idx, c in enumerate(coord)]
708
709 strides, augmented_coord = self.get_strides_and_coord(coord)
710 if strides is None:
711 return None
712
713 if is_top_box:
714 address_offset += 1 * strides[-1] # one element
715
716 address_offset += np.dot(augmented_coord, strides)
717
718 assert address_offset >= 0
719 assert address_offset <= self.storage_size()
720 return address_offset
721
Patrik Gustavssoneca2e952020-05-27 09:15:11 +0200722 def is_allocated_in_tensor_arena(self, scratch_tensor_mem_area):
723 if self.mem_area == scratch_tensor_mem_area and (self.mem_type in set((MemType.Scratch, MemType.Scratch_fast))):
724 return True
725 return False
726
Michael McGeagh6a8d4242020-07-28 12:17:59 +0100727 def set_all_shapes(self, shape):
728 self.shape = shape
729 self.storage_shape = shape
730 self.bandwidth_shape = shape
731
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100732 def get_full_shape(self):
733 d = len(self.shape)
734 if d in (1, 3):
Michael McGeagh8d3216f2020-08-10 11:35:57 +0100735 return numeric_util.full_shape(4, self.shape, 1)
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100736 elif d == 2:
737 return [self.shape[0], 1, 1, self.shape[1]]
738 else:
739 return self.shape
740
Tim Hall79d07d22020-04-27 18:20:16 +0100741 def __str__(self):
742 return "<nng.Tensor '%s' shape=%s dtype=%s>" % (self.name, self.shape, self.dtype)
743
744 __repr__ = __str__