blob: 78c43511170da1754fc15aa2cfe283efef000c7d [file] [log] [blame]
erik.andersson@arm.com460c6892021-02-24 14:38:09 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Compresses and pads the weigths. It also calculates the scales and packs with the biases.
Tim Hall79d07d22020-04-27 18:20:16 +010018from collections import namedtuple
Tim Halld8339a72021-05-27 18:49:40 +010019from collections import OrderedDict
Jonas Ohlsson845e2322022-03-01 12:39:55 +010020from typing import Dict
21from typing import Optional
Louis Verhaardaeae5672020-11-02 18:04:27 +010022from typing import Tuple
Diego Russoea6111a2020-04-14 18:41:58 +010023
24import numpy as np
Tim Hall79d07d22020-04-27 18:20:16 +010025
Louis Verhaarde8a5a782020-11-02 18:04:27 +010026from .api import NpuBlockTraversal
Manupa Karunaratned83d2e12020-07-20 12:05:32 +010027from .architecture_features import Accelerator
28from .architecture_features import ArchitectureFeatures
Diego Russoe8a10452020-04-21 17:39:10 +010029from .data_type import DataType
Louis Verhaard7db78962020-05-25 15:05:26 +020030from .errors import UnsupportedFeatureError
Diego Russoe8a10452020-04-21 17:39:10 +010031from .numeric_util import round_up
32from .operation import NpuBlockType
Louis Verhaardaee5d752020-09-30 09:01:52 +020033from .operation import Op
Diego Russoe8a10452020-04-21 17:39:10 +010034from .scaling import quantise_scale
35from .scaling import reduced_quantise_scale
Tim Halld8339a72021-05-27 18:49:40 +010036from .tensor import Tensor
Diego Russoe8a10452020-04-21 17:39:10 +010037from .tensor import TensorFormat
38from .tensor import TensorPurpose
Jacob Bohline843d332020-06-23 12:12:56 +020039from ethosu import mlw_codec
Diego Russoe8a10452020-04-21 17:39:10 +010040
Tim Hall79d07d22020-04-27 18:20:16 +010041
Louis Verhaard3c07c972020-05-07 08:12:58 +020042# Contains meta info for a weight compression. If two tensors have identical weight compression config,
43# then they also will have identical compressed weights.
44WeightCompressionConfig = namedtuple(
Jonas Ohlssond8575072022-03-30 10:30:25 +020045 "WeightCompressionConfig",
46 ["npu_block_type", "ofm_block_depth", "ofm_depth_step", "dilation", "weight_value_id"],
Louis Verhaard3c07c972020-05-07 08:12:58 +020047)
48
Tim Halld784af72021-06-08 21:25:57 +010049ScaleCompressionConfig = namedtuple("ScaleCompressionConfig", ["scale_value_id", "ifm_scale", "ofm_scale"])
50
Tim Halld8339a72021-05-27 18:49:40 +010051WeightKey = namedtuple("WeightKey", ["core", "depth"])
52
53
54class WeightRange:
55 def __init__(self):
56 self.offset = 0
57 self.scale_bytes = 0
58 self.weight_offset = 0
59 self.weight_bytes = 0
60 self.index = 0
61
62 @property
63 def total_bytes(self):
64 return self.scale_bytes + self.weight_bytes
65
66
67class NpuWeightTensor(Tensor):
68 def __init__(self, name):
69 Tensor.__init__(self, None, None, name + "_npu_encoded_weights")
70 self.buffer = []
Rickard Bolinfd8b5002022-05-16 09:11:06 +000071 self.double_buffer_sizes = [0, 0] # Required sizes if double buffering is used
Tim Halld8339a72021-05-27 18:49:40 +010072 self.encoded_ranges = OrderedDict()
73 self.hw_traversal = NpuBlockTraversal.DEPTH_FIRST
74 self.dtype = DataType.uint8
Tim Halld784af72021-06-08 21:25:57 +010075 self.scale_compression_config = None
Tim Halld8339a72021-05-27 18:49:40 +010076
Rickard Bolinfd8b5002022-05-16 09:11:06 +000077 def max_range_bytes(self):
78 return max(self.double_buffer_sizes)
79
80 def double_buffer_size(self):
81 """Return total required size for double buffering"""
82 return sum(self.double_buffer_sizes)
83
Tim Halld8339a72021-05-27 18:49:40 +010084
85class CompressedWeightCache:
86 """Global tensor weight compression cache"""
87
Jonas Ohlsson845e2322022-03-01 12:39:55 +010088 cache: Dict[WeightCompressionConfig, Tensor] = {}
Tim Halld8339a72021-05-27 18:49:40 +010089
90 @staticmethod
91 def get_tensor_with_same_compression(wcc):
92 return CompressedWeightCache.cache.get(wcc)
93
94 @staticmethod
95 def add(tens):
96 # Adds the compressed weights from the tensor to the cache
97 wcc = tens.weight_compression_config
98 CompressedWeightCache.cache[wcc] = tens
99
100 @staticmethod
101 def has_tensor_with_same_compression(wcc):
102 return wcc in CompressedWeightCache.cache
103
104 @staticmethod
105 def get_unencoded_size_with_same_compression(wcc):
106 cache_obj = CompressedWeightCache.cache.get(wcc)
107 return cache_obj[1] if cache_obj else None
108
109
Tim Halld784af72021-06-08 21:25:57 +0100110def create_weight_compression_config(weight_tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation):
Tim Halld8339a72021-05-27 18:49:40 +0100111 # Note: for an ofm block only its depth is used in weight compression.
112 # And block depth > ofm depth gives same result as block depth == ofm depth
James Peet7519d502021-07-19 16:47:58 +0100113 block_depth = min(ofm_block_depth, weight_tens.values.shape[-1])
Tim Halld784af72021-06-08 21:25:57 +0100114 return WeightCompressionConfig(npu_block_type, block_depth, ofm_depth_step, dilation, weight_tens.value_id)
Tim Halld8339a72021-05-27 18:49:40 +0100115
Louis Verhaard3c07c972020-05-07 08:12:58 +0200116
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100117def encode_weights(
118 accelerator: Accelerator,
119 weights_volume: np.ndarray,
Louis Verhaardaeae5672020-11-02 18:04:27 +0100120 dilation_xy: Tuple[int, int],
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100121 ifm_bitdepth: int,
122 ofm_block_depth: int,
123 is_depthwise: bool,
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100124 block_traversal: NpuBlockTraversal,
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100125):
126 """
Louis Verhaardaeae5672020-11-02 18:04:27 +0100127 Internal implementation of the public facing API to use weight encoding.
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100128
Tim Hallc8a73862020-10-27 12:43:14 +0000129 :param accelerator: architecture_features.Accelerator enum to pick the correct Ethos-U accelerator
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100130 :param weights_volume: numpy.ndarray in OHWI layout with a shape of four
131 :param dilation_xy: a two element tuple of dilation attributes in x,y dimension
132 :param ifm_bitdepth: the bitdepth of input feature map
Tim Hallc8a73862020-10-27 12:43:14 +0000133 :param ofm_block_depth: the depth of blocks for Ethos-U processing
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100134 :param is_depthwise: a boolean indicating these weights are used for a depthwise traversal
Louis Verhaardaeae5672020-11-02 18:04:27 +0100135 :param block_traversal: indicates how these weights are traversed on sub-kernel basis
136
Fredrik Svedbergf5c07c42021-04-23 14:36:42 +0200137 :return: a tuple with a bytearray of encoded weights and the size of the unencoded weights
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100138 """
Manupa Karunaratne8b24f2b2020-08-12 18:26:39 +0000139 # Check arg types
140 assert isinstance(accelerator, Accelerator)
141 assert isinstance(weights_volume, np.ndarray)
142 assert isinstance(dilation_xy, tuple)
143 assert isinstance(ifm_bitdepth, int)
144 assert isinstance(ofm_block_depth, int)
145 assert isinstance(is_depthwise, bool)
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100146 assert isinstance(block_traversal, NpuBlockTraversal)
Manupa Karunaratne8b24f2b2020-08-12 18:26:39 +0000147
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100148 # Checks for weight layout
149 assert len(weights_volume.shape) == 4, "weights ndarray should have a shape of 4"
150
151 # It cannot be both partkernel and depthwise
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100152 assert not (
153 is_depthwise and block_traversal == NpuBlockTraversal.PART_KERNEL_FIRST
154 ), "encode_weights :: partkernel and depthwise are mutually exclusive"
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100155
156 # Check valid values for dilation
157 assert dilation_xy[0] in (1, 2), "encode_weights :: dilation x should be 1 or 2 not {}".format(dilation_xy[0])
158 assert dilation_xy[1] in (1, 2), "encode_weights :: dilation y should be 1 or 2 not {}".format(dilation_xy[1])
159
160 ifm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ifm_ublock
161 ofm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ofm_ublock
James Peetc2449822021-07-19 17:09:16 +0100162 decomp_h = ArchitectureFeatures.SubKernelMax.height // dilation_xy[1]
163 decomp_w = ArchitectureFeatures.SubKernelMax.width // dilation_xy[0]
Mauricio Briceno67e11f72021-05-05 12:47:28 +0200164
165 return mlw_codec.reorder_encode(
166 ifm_ublock.depth,
167 ofm_ublock.depth,
168 weights_volume,
169 ofm_block_depth,
170 is_depthwise,
171 block_traversal == NpuBlockTraversal.PART_KERNEL_FIRST,
172 ifm_bitdepth,
173 decomp_h,
174 decomp_w,
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100175 )
Manupa Karunaratned83d2e12020-07-20 12:05:32 +0100176
177
Manupa Karunaratnebef228b2020-07-29 18:06:28 +0100178def encode_bias(bias: np.int64, scale: int, shift: int):
179 """
Louis Verhaardaeae5672020-11-02 18:04:27 +0100180 Internal implementation of public facing API to pack bias and scale values as required by the Ethos-U
Tim Hallc8a73862020-10-27 12:43:14 +0000181
Manupa Karunaratnebef228b2020-07-29 18:06:28 +0100182 :param bias: 64bit signed number that includes 40bit signed bias
183 :param scale: 32bit scale value
184 :param shift: 6bit shift value
185 :return: packed 80bit [0(2-bits),shift(6-bits),scale(32-bits),bias(40-bits)]
186 """
Manupa Karunaratne8b24f2b2020-08-12 18:26:39 +0000187 # Check arg types
188 assert isinstance(bias, np.int64)
189 assert isinstance(scale, int)
190 assert isinstance(shift, int)
191
Manupa Karunaratnebef228b2020-07-29 18:06:28 +0100192 assert -(1 << (40 - 1)) <= bias < (1 << (40 - 1)) # signed 40-bit range
193 assert 0 <= scale < (1 << 32) # unsigned 32-bit range
194 assert 0 <= shift < (1 << 6) # unsigned 6-bit range
195
196 data = bytearray(10)
197 data[0] = (bias >> (0 * 8)) & 0xFF
198 data[1] = (bias >> (1 * 8)) & 0xFF
199 data[2] = (bias >> (2 * 8)) & 0xFF
200 data[3] = (bias >> (3 * 8)) & 0xFF
201 data[4] = (bias >> (4 * 8)) & 0xFF
202 data[5] = (scale >> (0 * 8)) & 0xFF
203 data[6] = (scale >> (1 * 8)) & 0xFF
204 data[7] = (scale >> (2 * 8)) & 0xFF
205 data[8] = (scale >> (3 * 8)) & 0xFF
206 data[9] = shift & 0x3F
207 return data
208
209
Tim Hallf7e810a2020-06-25 15:04:31 +0100210def core_deinterleave(hwio, core, ncores):
211 # Put weights back into OHWI
Jacob Bohline843d332020-06-23 12:12:56 +0200212 ohwi = np.transpose(hwio, (3, 0, 1, 2))
213 return ohwi[core : ohwi.shape[0] : ncores]
214
Tim Hall79d07d22020-04-27 18:20:16 +0100215
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +0200216def _prepare_scale_and_bias(arch, tens, rescale_for_faf, explicit_scaling):
Andreas Nevalainen897cc142020-10-28 15:42:08 +0100217 assert tens.purpose in [TensorPurpose.FeatureMap, TensorPurpose.FSBias]
Tim Hall79d07d22020-04-27 18:20:16 +0100218 assert tens.format == TensorFormat.NHWC
219 # the connected operator should expect a bias input unless it is a FullyConnected
Louis Verhaardaee5d752020-09-30 09:01:52 +0200220 assert tens.consumer_list[0].type.needs_bias()
Tim Hall79d07d22020-04-27 18:20:16 +0100221 # the input bias tensor is the same as that connected to the operator
Louis Verhaardaee5d752020-09-30 09:01:52 +0200222 bias_tens = tens.consumer_list[0].bias
Jacob Bohlincf7da102020-05-20 09:03:40 +0200223 assert tens is bias_tens
224
Tim Hall79d07d22020-04-27 18:20:16 +0100225 # the operator should only have a single output
226 assert len(tens.consumer_list[0].outputs) == 1
James Peet7519d502021-07-19 16:47:58 +0100227 biases = tens.values
Tim Hall79d07d22020-04-27 18:20:16 +0100228
229 first_consumer_op = tens.consumer_list[0]
230 ifm_dtype = first_consumer_op.inputs[0].dtype
Dwight Lidman4f728c02020-12-17 15:14:45 +0100231 ifm_scale = first_consumer_op.get_input_quantization().scale_f32
Louis Verhaard98a34992020-09-01 10:39:04 +0200232 ofm_scale = first_consumer_op.get_output_quantization().scale_f32
Tim Hall79d07d22020-04-27 18:20:16 +0100233 weight_scales = first_consumer_op.inputs[1].quantization.scale_f32
234
235 # biases can have multiple consumers for rnn cells. if so, then check that they are all the same
236 for op in tens.consumer_list[1:]:
Dwight Lidman4f728c02020-12-17 15:14:45 +0100237 assert ifm_scale == op.get_input_quantization().scale_f32
Louis Verhaard98a34992020-09-01 10:39:04 +0200238 assert ofm_scale == op.get_output_quantization().scale_f32
Tim Hall79d07d22020-04-27 18:20:16 +0100239 assert weight_scales == op.inputs[1].quantization.scale_f32
240
241 if not hasattr(weight_scales, "__iter__"):
242 # If weight_scales is not already an iterable make it into a list
243 weight_scales = [weight_scales]
244
245 # Convert scales to np.double (from np.float32) to conform to TensorFlow Lite which
246 # uses double during scaling calculations
247 # TensorFlow Lite casts the scales slightly differently for uint8 and int8
248 if not rescale_for_faf:
249 if ifm_dtype == DataType.uint8:
Dwight Lidman4f728c02020-12-17 15:14:45 +0100250 # for some cases of the Mean operator, the scale must be calculated differently to match reference
251 if first_consumer_op.low_precision_scaling:
252 scales = [
253 np.double(np.single(ifm_scale) / (np.single(weight_scale) * np.single(ofm_scale)))
254 for weight_scale in weight_scales
255 ]
256 else:
257 scales = [np.double(ifm_scale * weight_scale) / np.double(ofm_scale) for weight_scale in weight_scales]
Fredrik Svedbergd67c0aa2020-03-30 13:15:28 +0200258 elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16:
Tim Hall79d07d22020-04-27 18:20:16 +0100259 scales = [
260 (np.double(ifm_scale) * np.double(weight_scale)) / np.double(ofm_scale)
261 for weight_scale in weight_scales
262 ]
263 else:
Michael McGeagh7a6f8432020-12-02 15:29:22 +0000264 raise UnsupportedFeatureError(f"Compression of {ifm_dtype} is not implemented; Tensor: '{tens.name}'")
Tim Hall79d07d22020-04-27 18:20:16 +0100265 else:
266 if ifm_dtype == DataType.uint8:
267 scales = [np.double(ifm_scale * weight_scale * 0x3000) for weight_scale in weight_scales]
Fredrik Svedbergd67c0aa2020-03-30 13:15:28 +0200268 elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16:
Tim Hall79d07d22020-04-27 18:20:16 +0100269 scales = [(np.double(ifm_scale * 0x3000) * np.double(weight_scale)) for weight_scale in weight_scales]
270 else:
Michael McGeagh7a6f8432020-12-02 15:29:22 +0000271 raise UnsupportedFeatureError(f"Compression of {ifm_dtype} is not implemented; Tensor: '{tens.name}'")
Tim Hall79d07d22020-04-27 18:20:16 +0100272
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +0200273 if explicit_scaling:
274 assert len(explicit_scaling.shift) == len(explicit_scaling.multiplier)
275 quantised_scales = [(int(m), int(s)) for s, m in zip(explicit_scaling.shift, explicit_scaling.multiplier)]
Fredrik Svedbergd67c0aa2020-03-30 13:15:28 +0200276 else:
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +0200277 # quantise all of the weight scales into (scale_factor, shift)
278 if ifm_dtype == DataType.int16:
279 quantised_scales = [reduced_quantise_scale(scale) for scale in scales]
280 else:
281 quantised_scales = [quantise_scale(scale) for scale in scales]
Tim Hall79d07d22020-04-27 18:20:16 +0100282
Tim Halld8339a72021-05-27 18:49:40 +0100283 # If only 1 quantised scale is used, repeat that value for the length of the biases
Tim Hall79d07d22020-04-27 18:20:16 +0100284 if len(quantised_scales) == 1:
Tim Hall79d07d22020-04-27 18:20:16 +0100285 quantised_scales = [quantised_scales[0]] * len(biases)
286
Tim Halld8339a72021-05-27 18:49:40 +0100287 return quantised_scales, biases
Tim Hall79d07d22020-04-27 18:20:16 +0100288
Jacob Bohline843d332020-06-23 12:12:56 +0200289
Tim Halld8339a72021-05-27 18:49:40 +0100290def encode_weight_and_scale_tensor(
291 arch, op, weight_tens, scale_tens, kernel, block_config, depth_offsets, rescale_for_faf=False
Jonas Ohlsson845e2322022-03-01 12:39:55 +0100292) -> Tuple[Optional[NpuWeightTensor], Optional[NpuWeightTensor]]:
Tim Halld8339a72021-05-27 18:49:40 +0100293 npu_block_type = op.type.npu_block_type
294
Tim Halld784af72021-06-08 21:25:57 +0100295 ifm_scale = scale_tens and scale_tens.consumer_list[0].get_input_quantization().scale_f32
296 ofm_scale = scale_tens and scale_tens.consumer_list[0].get_output_quantization().scale_f32
297
Tim Halld8339a72021-05-27 18:49:40 +0100298 wcc = create_weight_compression_config(
Tim Halld784af72021-06-08 21:25:57 +0100299 weight_tens, npu_block_type, block_config.ofm_block.depth, hash(str(depth_offsets)), kernel.dilation
Tim Halld8339a72021-05-27 18:49:40 +0100300 )
301
Tim Halld784af72021-06-08 21:25:57 +0100302 scc = ScaleCompressionConfig(scale_tens and scale_tens.value_id, ifm_scale, ofm_scale)
303
Tim Halld8339a72021-05-27 18:49:40 +0100304 tens_cached = CompressedWeightCache.get_tensor_with_same_compression(wcc)
305 if tens_cached is not None:
Tim Halld784af72021-06-08 21:25:57 +0100306 if tens_cached.scale_compression_config == scc:
307 return tens_cached, None
308 npu_tensor = NpuWeightTensor(scale_tens.name)
309 do_weights = False
310 do_scales = True
311 else:
312 npu_tensor = NpuWeightTensor(weight_tens.name)
313 do_weights = True
314 do_scales = True
Tim Halld8339a72021-05-27 18:49:40 +0100315
Tim Halld8339a72021-05-27 18:49:40 +0100316 npu_tensor.weight_compression_config = wcc
Tim Halld784af72021-06-08 21:25:57 +0100317 npu_tensor.scale_compression_config = scc
Tim Halld8339a72021-05-27 18:49:40 +0100318
Tim Halld8339a72021-05-27 18:49:40 +0100319 # Ensure depth offsets are terminated at end of OFM shape
320 assert len(depth_offsets) > 1, "Require closed depth ranges"
321
322 ifm_bitdepth = op.inputs[0].dtype.size_in_bits()
Tim Halld8339a72021-05-27 18:49:40 +0100323
Tim Halld784af72021-06-08 21:25:57 +0100324 # No cache hit, need to perform the encoding
325 if do_weights:
326 assert weight_tens.quantization is not None
Patrik Gustavssonb081d672021-08-25 13:49:25 +0200327 assert weight_tens.quantization.scale_f32 is not None or op.explicit_scaling
Tim Halld784af72021-06-08 21:25:57 +0100328 assert weight_tens.quantization.zero_point is not None
Tim Halld8339a72021-05-27 18:49:40 +0100329
Tim Halld784af72021-06-08 21:25:57 +0100330 # Early zero-point correction
James Peet7519d502021-07-19 16:47:58 +0100331 quant_buf = weight_tens.values.astype(np.int16)
Tim Hallb2798442021-06-24 19:31:38 +0100332 # the zero point can be either a native or numpy type
333 if isinstance(weight_tens.quantization.zero_point, (int, float)):
334 zero_point = np.int16(weight_tens.quantization.zero_point)
335 else:
336 zero_point = weight_tens.quantization.zero_point.astype(np.int16)
337 weights = quant_buf - zero_point
Tim Halld8339a72021-05-27 18:49:40 +0100338
Tim Halld784af72021-06-08 21:25:57 +0100339 if len(weights.shape) == 2:
340 weights = np.expand_dims(np.expand_dims(weights, axis=0), axis=0)
341
342 # Expect this (undilated) equivalence
343 assert kernel.height == weights.shape[0]
344 assert kernel.width == weights.shape[1]
345
346 ifm_depth = weights.shape[-2]
347
348 # Default HW traversal
349 npu_tensor.hw_traversal = NpuBlockTraversal.DEPTH_FIRST
350
351 if npu_block_type == NpuBlockType.ConvolutionMxN:
352 # Determine which block traversal strategy has better DPU utilization
353 kernel_size = weights.shape[0] * weights.shape[1]
354 depth_utilization = weights.shape[2] / round_up(weights.shape[2], 32 if ifm_bitdepth == 8 else 16)
355 part_kernel_utilization = (weights.shape[2] / round_up(weights.shape[2], 8)) * (
356 kernel_size / round_up(kernel_size, 4 if ifm_bitdepth == 8 else 2)
357 )
358 if part_kernel_utilization >= depth_utilization or ifm_depth <= 8:
359 # Part-kernel first is always better for ifm depths <= 8
360 npu_tensor.hw_traversal = NpuBlockTraversal.PART_KERNEL_FIRST
361
362 if op.type == Op.Conv2DBackpropInputSwitchedBias:
363 # Transpose Convoluion, reverse weights in H and W axes
364 weights = np.flip(weights, axis=(0, 1))
Tim Halld8339a72021-05-27 18:49:40 +0100365
366 encoded_stream = bytearray()
Rickard Bolinfd8b5002022-05-16 09:11:06 +0000367 double_buffer_sizes = [0, 0]
Tim Halld8339a72021-05-27 18:49:40 +0100368 is_depthwise = npu_block_type == NpuBlockType.ConvolutionDepthWise
369
370 # Bias & scale
Tim Halld784af72021-06-08 21:25:57 +0100371 if do_scales:
Patrik Gustavsson8f1f9aa2021-06-28 07:41:58 +0200372 quantised_scales, biases = _prepare_scale_and_bias(arch, scale_tens, rescale_for_faf, op.explicit_scaling)
Tim Halld8339a72021-05-27 18:49:40 +0100373 scale_tens.element_size_bytes = 10
374
375 # Slice the weight stream up depth-ways into bricks and compress
James Peet7519d502021-07-19 16:47:58 +0100376 full_ofm_depth = weight_tens.values.shape[-1]
Tim Halld8339a72021-05-27 18:49:40 +0100377 ofm_block_depth = block_config.ofm_block.depth
378
379 weight_range_index = 0
380 for idx, depth_offset in enumerate(depth_offsets[:-1]):
381 # Do not generate for offsets outside the OFM
382 assert depth_offset >= 0 and depth_offset < full_ofm_depth
383 depth_length = depth_offsets[idx + 1] - depth_offset
384
385 # Get the weights necessary for this brick
Tim Halld784af72021-06-08 21:25:57 +0100386 if do_weights:
387 brick_weights = weights[:, :, :, depth_offset : depth_offset + depth_length]
Tim Halld8339a72021-05-27 18:49:40 +0100388
389 buffer_start_offset = len(encoded_stream)
390
Tim Halld784af72021-06-08 21:25:57 +0100391 # For each core, deinterleave weights/scales from the larger volume
Tim Halld8339a72021-05-27 18:49:40 +0100392 # and generate separate compressed streams.
393 for core in range(0, min(arch.ncores, full_ofm_depth)):
394
395 core_block_depth = int((ofm_block_depth + arch.ncores - 1 - core) // arch.ncores)
396
397 if core_block_depth != 0:
398 key = WeightKey(core, depth_offset)
399 weight_range = WeightRange()
400 weight_range.offset = len(encoded_stream)
401 weight_range.index = weight_range_index
402 weight_range_index += 1
403
404 # Scales & biases
Tim Halld784af72021-06-08 21:25:57 +0100405 if do_scales:
Tim Halld8339a72021-05-27 18:49:40 +0100406 scale_stream = []
407 core_scales = quantised_scales[
408 depth_offset + core : depth_offset + core + depth_length : arch.ncores
409 ]
410 core_biases = biases[depth_offset + core : depth_offset + core + depth_length : arch.ncores]
411 for j, core_bias in enumerate(core_biases):
412 scale_stream.extend(encode_bias(np.int64(core_bias), *core_scales[j]))
413
414 weight_range.scale_bytes = len(scale_stream)
415
416 encoded_stream.extend(scale_stream)
417
418 # Align to 16 for start of next substream
419 remainder = len(encoded_stream) % 16
420 if remainder > 0:
421 encoded_stream.extend(bytearray(16 - remainder))
422
423 # Weights
Tim Halld784af72021-06-08 21:25:57 +0100424 if do_weights:
425 core_weights = core_deinterleave(brick_weights, core, arch.ncores)
426 encoded_substream, _ = encode_weights(
427 accelerator=arch.accelerator_config,
428 weights_volume=core_weights,
429 dilation_xy=kernel.dilation,
430 ifm_bitdepth=ifm_bitdepth,
431 ofm_block_depth=core_block_depth,
432 is_depthwise=is_depthwise,
433 block_traversal=npu_tensor.hw_traversal,
434 )
435 weight_range.weight_offset = len(encoded_stream) - weight_range.offset
436 weight_range.weight_bytes = len(encoded_substream)
437 # Append encoded section
438 encoded_stream.extend(encoded_substream)
439 assert len(encoded_stream) % 16 == 0
Diqing Zhong66d7ec02021-02-01 19:07:04 +0100440
Tim Halld784af72021-06-08 21:25:57 +0100441 # Record encoded range in tensor
Tim Halld8339a72021-05-27 18:49:40 +0100442 npu_tensor.encoded_ranges[key] = weight_range
443
444 # Remember maximum encoded length for DoubleBuffering
Rickard Bolinfd8b5002022-05-16 09:11:06 +0000445 double_buffer_sizes[idx % 2] = max(double_buffer_sizes[idx % 2], len(encoded_stream) - buffer_start_offset)
Tim Halld8339a72021-05-27 18:49:40 +0100446
Tim Halld784af72021-06-08 21:25:57 +0100447 # Attach buffer to tensor
Tim Halld8339a72021-05-27 18:49:40 +0100448 npu_tensor.buffer = encoded_stream
Rickard Bolinfd8b5002022-05-16 09:11:06 +0000449 npu_tensor.double_buffer_sizes = double_buffer_sizes
Tim Halld8339a72021-05-27 18:49:40 +0100450 npu_tensor.set_all_shapes([1, 1, 1, len(encoded_stream)])
451 npu_tensor.format = TensorFormat.WeightsCompressed
Tim Halld784af72021-06-08 21:25:57 +0100452
453 # Scale only tensor
454 if not do_weights:
455 npu_tensor.weight_compression_config = None
456 npu_tensor.purpose = TensorPurpose.FSBias
457 npu_tensor.mem_area = scale_tens.mem_area
458 npu_tensor.mem_type = scale_tens.mem_type
459 weights_tensor = tens_cached
460 scale_tensor = npu_tensor
461 else:
462 npu_tensor.purpose = TensorPurpose.Weights
463 npu_tensor.mem_area = weight_tens.mem_area
464 npu_tensor.mem_type = weight_tens.mem_type
465 weights_tensor = npu_tensor
466 scale_tensor = None
467 CompressedWeightCache.add(weights_tensor)
468
469 return weights_tensor, scale_tensor