blob: 9edde60110c1243807bf949c57ad761a25cf1531 [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Compresses and pads the weigths. It also calculates the scales and packs with the biases.
Tim Hall79d07d22020-04-27 18:20:16 +010018import math
Tim Hall79d07d22020-04-27 18:20:16 +010019from collections import namedtuple
Diego Russoea6111a2020-04-14 18:41:58 +010020
21import numpy as np
Tim Hall79d07d22020-04-27 18:20:16 +010022
Diego Russoe8a10452020-04-21 17:39:10 +010023from .data_type import DataType
Louis Verhaard7db78962020-05-25 15:05:26 +020024from .errors import UnsupportedFeatureError
Diego Russoe8a10452020-04-21 17:39:10 +010025from .nn_graph import SchedulingStrategy
26from .numeric_util import round_up
27from .operation import NpuBlockType
28from .scaling import quantise_scale
29from .scaling import reduced_quantise_scale
30from .tensor import TensorBlockTraversal
31from .tensor import TensorFormat
32from .tensor import TensorPurpose
33from .tensor import TensorSubPurpose
Louis Verhaardb2fb2122020-06-04 15:51:24 +020034from ethosu import mlw_codec
Diego Russoe8a10452020-04-21 17:39:10 +010035
Tim Hall79d07d22020-04-27 18:20:16 +010036
Louis Verhaard3c07c972020-05-07 08:12:58 +020037# Contains meta info for a weight compression. If two tensors have identical weight compression config,
38# then they also will have identical compressed weights.
39WeightCompressionConfig = namedtuple(
Louis Verhaardb2fb2122020-06-04 15:51:24 +020040 "WeightCompressionConfig", ["npu_block_type", "ofm_block_depth", "ofm_depth_step", "dilation", "equivalence_id"]
Louis Verhaard3c07c972020-05-07 08:12:58 +020041)
42
43
Louis Verhaardb2fb2122020-06-04 15:51:24 +020044def create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation):
Louis Verhaard3c07c972020-05-07 08:12:58 +020045 # Note: for an ofm block only its depth is used in weight compression.
46 # And block depth > ofm depth gives same result as block depth == ofm depth
47 block_depth = min(ofm_block_depth, tens.quant_values.shape[-1])
Louis Verhaardb2fb2122020-06-04 15:51:24 +020048 return WeightCompressionConfig(npu_block_type, block_depth, ofm_depth_step, dilation, tens.equivalence_id)
Louis Verhaard3c07c972020-05-07 08:12:58 +020049
50
51def set_storage_shape(tens):
52 # Sets the storage shape depending on the tensor's sub purpose
53 if tens.sub_purpose == TensorSubPurpose.DoubleBuffer and len(tens.compressed_values) > 2:
54 offset = 2 * np.amax([len(x) for x in tens.compressed_values])
55 assert offset % 16 == 0
56 else:
57 offset = tens.weight_compressed_offsets[-1]
58 tens.storage_shape = [1, 1, 1, offset]
59
60
61class CompressedWeightCache:
62 # Contains weight compressions for all weight tensors in a graph
63 def __init__(self):
64 self.cache = {} # maps from WeightCompressionConfig to a tensor clone containing compressed weights
65
66 def get_tensor_with_same_compression(self, wcc):
67 return self.cache.get(wcc)
68
69 def add(self, tens):
70 # Adds the compressed weights from the tensor to the cache
71 wcc = tens.weight_compression_config
72 # Clone the tensor to make sure that nothing related to the weight compression is modified
73 tens_clone = tens.clone("_weights{}_{}".format(wcc.ofm_block_depth, wcc.ofm_depth_step))
74 self.cache[wcc] = tens_clone
75
76
Tim Hall79d07d22020-04-27 18:20:16 +010077def encode(weight_stream):
78 assert np.amin(weight_stream) >= -255
79 assert np.amax(weight_stream) <= 255
80
81 # Encode flattened signed weight stream
82 compressed = mlw_codec.encode(weight_stream)
83
84 # pad with 0xFF as needed so the length of the weight stream
85 # is a multiple of 16
Diego Russoea6111a2020-04-14 18:41:58 +010086
Tim Hall79d07d22020-04-27 18:20:16 +010087 while (len(compressed) % 16) != 0:
88 compressed.append(0xFF)
89
90 return compressed
91
92
Louis Verhaardb2fb2122020-06-04 15:51:24 +020093def generate_brick(arch, brick_weights, ofm_block_depth, block_traversal, ifm_bitdepth, dilation):
Tim Hall79d07d22020-04-27 18:20:16 +010094 is_depthwise = block_traversal == TensorBlockTraversal.DepthWise
95 is_partkernel = block_traversal == TensorBlockTraversal.PartKernelFirst
Louis Verhaardb2fb2122020-06-04 15:51:24 +020096 decomp_h = arch.subkernel_max.height // dilation[0]
97 decomp_w = arch.subkernel_max.width // dilation[1]
Tim Hall79d07d22020-04-27 18:20:16 +010098 ofm_ublock = arch.ofm_ublock
99 ifm_ublock = arch.ifm_ublock
100 # Expect weights formatted HWIO
101 ofm_depth = brick_weights.shape[-1]
102 ifm_depth = brick_weights.shape[-2]
103 kernel_width = brick_weights.shape[-3]
104 kernel_height = brick_weights.shape[-4]
105 # IFM block depth
106 if is_partkernel or (ifm_bitdepth == 16):
107 # IFM block depth is always 16 for part-kernel-first
108 ifm_block_depth = 16
109 elif ifm_bitdepth == 8:
110 ifm_block_depth = 32
111 else:
112 assert False
113
114 stream = []
115
116 # Top level striping - OFM blocks in the entire brick's depth
Louis Verhaard3c07c972020-05-07 08:12:58 +0200117 for ofm_block_z in range(0, ofm_depth, ofm_block_depth):
118 clipped_ofm_block_depth = min(ofm_block_depth, ofm_depth - ofm_block_z)
Tim Hall79d07d22020-04-27 18:20:16 +0100119 # IFM blocks required for the brick
120 for ifm_block_z in range(0, (1 if is_depthwise else ifm_depth), ifm_block_depth):
121 if is_depthwise:
122 clipped_ifm_block_depth = ifm_ublock.depth
123 else:
124 clipped_ifm_block_depth = (
125 min(ifm_block_depth, ifm_depth - ifm_block_z) if is_partkernel else ifm_block_depth
126 )
127 # Weight decomposition
128 # Subkernel Splitting (H)
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200129 for subkernel_y in range(0, kernel_height, decomp_h):
130 sub_height = min(kernel_height - subkernel_y, decomp_h)
Tim Hall79d07d22020-04-27 18:20:16 +0100131 # Subkernel splitting (W)
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200132 for subkernel_x in range(0, kernel_width, decomp_w):
133 sub_width = min(kernel_width - subkernel_x, decomp_w)
Tim Hall79d07d22020-04-27 18:20:16 +0100134 subkernel_elements = sub_width * sub_height
135 # Part kernel first works across the kernel H/W and needs padding
136 if is_partkernel:
137 if ifm_bitdepth == 16 and subkernel_elements % 2 != 0:
138 subkernel_elements = int(math.ceil(subkernel_elements / 2) * 2)
139 elif ifm_bitdepth == 8 and subkernel_elements % 4 != 0:
140 subkernel_elements = int(math.ceil(subkernel_elements / 4) * 4)
141
142 # Depthwise Conv requires multiple of 4 kernel elements in its weight block
143 # this is different from normal conv which is considered "weights depth-first"
144 elif is_depthwise:
145 subkernel_elements = int(math.ceil(subkernel_elements / 4.0) * 4)
146
147 ifm_block_depth_outer = clipped_ifm_block_depth if is_partkernel else 1
148 ifm_block_depth_inner = 1 if is_partkernel else clipped_ifm_block_depth
149 # IFM Ublocks in IFM-block over depth for part-kernel-first mode
150 # For depth-first IFM Ublocks are traversed after subkernel elements so this loop is ignored.
151 for ifm_ublk_outer in range(0, ifm_block_depth_outer, ifm_ublock.depth):
152 # OFM Ublocks in OFM-block over depth
153 for ofm_ublk in range(0, clipped_ofm_block_depth, ofm_ublock.depth):
154 # HW Kernel element traversal - cannot be a H/W loop due to element
155 # padding requirement on depthwise/part-kernel configurations
156 for element in range(subkernel_elements):
157 kx = element % sub_width
158 ky = element // sub_width
159 # IFM Ublocks in IFM-block over depth (only 1 ublock if depthwise)
160 # In case of part-kernel-first IFM Ublock traversal have already been handled
161 # and this loop is ignored.
162 for ifm_ublk_inner in range(0, ifm_block_depth_inner, ifm_ublock.depth):
163 # Feed OFM ublock elements
164 for ofm_ublock_z in range(ofm_ublock.depth):
165 # Source IFM ublock elements (only 1 element deep if depthwise)
166 for ifm_ublock_z in range(1 if is_depthwise else ifm_ublock.depth):
167 # Source position within the current subkernel
168 wx = subkernel_x + kx
169 wy = subkernel_y + ky
170 # Source IFM/OFM slices
171 ifm_ublk = ifm_ublk_inner + ifm_ublk_outer
172 ifm_z = ifm_block_z + ifm_ublk + ifm_ublock_z
173 ofm_z = ofm_block_z + ofm_ublk + ofm_ublock_z
174 if (ifm_z >= ifm_depth) or (ofm_z >= ofm_depth) or (ky >= sub_height):
175 stream.append(0)
176 else:
177 stream.append(brick_weights[wy][wx][ifm_z][ofm_z])
178 return stream
179
180
181# Compress the weights
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200182def compress_weights(arch, nng, tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation):
Tim Hall79d07d22020-04-27 18:20:16 +0100183 assert tens.purpose == TensorPurpose.Weights
184 assert tens.format == TensorFormat.WeightsCompressed
185
Louis Verhaard3c07c972020-05-07 08:12:58 +0200186 # Check the weight cache
187 if nng.weight_cache is None:
188 nng.weight_cache = CompressedWeightCache()
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200189 wcc = create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation)
Louis Verhaard3c07c972020-05-07 08:12:58 +0200190 tens.weight_compression_config = wcc
191 tens_cached = nng.weight_cache.get_tensor_with_same_compression(wcc)
192 if tens_cached is not None:
193 # Cache hit, copy weights from the cache
194 tens.copy_compressed_weight_info(tens_cached)
195 set_storage_shape(tens)
196 return
Tim Hall79d07d22020-04-27 18:20:16 +0100197
Louis Verhaard3c07c972020-05-07 08:12:58 +0200198 # No cache hit, perform the compression
Tim Hall79d07d22020-04-27 18:20:16 +0100199 assert tens.quantization is not None
200 assert tens.quantization.scale_f32 is not None
201 assert tens.quantization.zero_point is not None
202
203 zero_point = tens.quantization.zero_point
204 quant_buf = tens.quant_values.astype(np.int64)
205
206 # Early zero-point correction
207 weights = quant_buf - zero_point
208
209 if len(weights.shape) == 2:
210 weights = np.expand_dims(np.expand_dims(weights, axis=0), axis=0)
211 weights_shape = (weights.shape[0], 1, 1, weights.shape[1])
212 else:
213 weights_shape = weights.shape
214
215 compression_scales = []
216 compressed_offsets = []
217 encoded_streams = []
218 offset = 0
Tim Hall79d07d22020-04-27 18:20:16 +0100219
220 ifm_bitdepth = tens.consumer_list[0].inputs[0].dtype.size_in_bits()
221 ifm_depth = weights.shape[-2]
222 if npu_block_type == NpuBlockType.ConvolutionDepthWise:
223 tens.block_traversal = TensorBlockTraversal.DepthWise
224 if npu_block_type == NpuBlockType.ConvolutionMxN:
225 # Determine which block traversal strategy has better DPU utilization
226 kernel_size = weights_shape[0] * weights_shape[1]
227 depth_utilization = weights_shape[2] / round_up(weights_shape[2], 32 if ifm_bitdepth == 8 else 16)
228 part_kernel_utilization = (weights_shape[2] / round_up(weights_shape[2], 8)) * (
229 kernel_size / round_up(kernel_size, 4 if ifm_bitdepth == 8 else 2)
230 )
231 if part_kernel_utilization >= depth_utilization or ifm_depth <= 8:
232 # Part-kernel first is always better for ifm depths <= 8
233 tens.block_traversal = TensorBlockTraversal.PartKernelFirst
234 else:
235 tens.block_traversal = TensorBlockTraversal.DepthFirst
236
237 # Slice weight stream up depth-ways into bricks and compress
238 full_ofm_depth = quant_buf.shape[-1]
239 for idx in range(0, full_ofm_depth, ofm_depth_step):
240 # Get the weights necessary for this brick
241 count = min(full_ofm_depth - idx, ofm_depth_step)
242 brick_weights = weights[:, :, :, idx : idx + count]
243
244 # Encode all weights into one chunk
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200245 raw_stream = generate_brick(arch, brick_weights, ofm_block_depth, tens.block_traversal, ifm_bitdepth, dilation)
Tim Hall79d07d22020-04-27 18:20:16 +0100246 encoded = encode(raw_stream)
247 encoded_streams.append(encoded)
248
Tim Hall79d07d22020-04-27 18:20:16 +0100249 # Remember where we put it for linear addressing
250 compressed_offsets.append(offset)
251 offset += len(encoded)
252 assert offset % 16 == 0
253
254 # Compression scale tracking
255 compression_scales.append(len(encoded) / len(raw_stream))
256
257 # Also track complete length in the offsets array
258 compressed_offsets.append(offset)
259
Tim Hall79d07d22020-04-27 18:20:16 +0100260 tens.weight_compression_scales = compression_scales
Tim Hall79d07d22020-04-27 18:20:16 +0100261 tens.weight_compressed_offsets = compressed_offsets
262 tens.compression_scale_for_worst_weight_stream = np.amax(compression_scales)
263 tens.storage_compression_scale = tens.bandwidth_compression_scale = np.average(compression_scales)
264 tens.compressed_values = encoded_streams
265 tens.brick_size = (weights_shape[0], weights_shape[1], weights_shape[2], min(tens.shape[-1], ofm_depth_step))
Louis Verhaard3c07c972020-05-07 08:12:58 +0200266 set_storage_shape(tens)
267 nng.weight_cache.add(tens)
Tim Hall79d07d22020-04-27 18:20:16 +0100268
269
270def calc_scales_and_pack_biases(tens, arch, oc_quantum, rescale_for_faf=False):
271 assert tens.purpose == TensorPurpose.FeatureMap
272 assert tens.format == TensorFormat.NHWC
273 # the connected operator should expect a bias input unless it is a FullyConnected
274 assert "Bias" in tens.consumer_list[0].type or tens.consumer_list[0].type.startswith("FullyConnected")
275 # the input bias tensor is the same as that connected to the operator
276 assert tens is tens.consumer_list[0].inputs[2]
277 # the operator should only have a single output
278 assert len(tens.consumer_list[0].outputs) == 1
279
280 def pack_bias_and_scale(bias, scale, shift):
281 bias = np.int64(bias)
282 assert -(1 << (40 - 1)) <= bias < (1 << (40 - 1)) # signed 40-bit range
283 assert 0 <= scale < (1 << 32) # unsigned 32-bit range
284 assert 0 <= shift < (1 << 6) # unsigned 6-bit range
285
286 # pack the 80 bit value = [0(2-bits),shift(6-bits),scale(32-bits),bias(40-bits)]
287 data = bytearray(10)
288 data[0] = (bias >> (0 * 8)) & 0xFF
289 data[1] = (bias >> (1 * 8)) & 0xFF
290 data[2] = (bias >> (2 * 8)) & 0xFF
291 data[3] = (bias >> (3 * 8)) & 0xFF
292 data[4] = (bias >> (4 * 8)) & 0xFF
293 data[5] = (scale >> (0 * 8)) & 0xFF
294 data[6] = (scale >> (1 * 8)) & 0xFF
295 data[7] = (scale >> (2 * 8)) & 0xFF
296 data[8] = (scale >> (3 * 8)) & 0xFF
297 data[9] = shift & 0x3F
298 return data
299
300 biases = tens.quant_values
301
302 first_consumer_op = tens.consumer_list[0]
303 ifm_dtype = first_consumer_op.inputs[0].dtype
304 ifm_scale = first_consumer_op.inputs[0].quantization.scale_f32
305 ofm_scale = first_consumer_op.outputs[0].quantization.scale_f32
306 weight_scales = first_consumer_op.inputs[1].quantization.scale_f32
307
308 # biases can have multiple consumers for rnn cells. if so, then check that they are all the same
309 for op in tens.consumer_list[1:]:
310 assert ifm_scale == op.inputs[0].quantization.scale_f32
311 assert ofm_scale == op.outputs[0].quantization.scale_f32
312 assert weight_scales == op.inputs[1].quantization.scale_f32
313
314 if not hasattr(weight_scales, "__iter__"):
315 # If weight_scales is not already an iterable make it into a list
316 weight_scales = [weight_scales]
317
318 # Convert scales to np.double (from np.float32) to conform to TensorFlow Lite which
319 # uses double during scaling calculations
320 # TensorFlow Lite casts the scales slightly differently for uint8 and int8
321 if not rescale_for_faf:
322 if ifm_dtype == DataType.uint8:
323 scales = [np.double(ifm_scale * weight_scale) / np.double(ofm_scale) for weight_scale in weight_scales]
Fredrik Svedbergd67c0aa2020-03-30 13:15:28 +0200324 elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16:
Tim Hall79d07d22020-04-27 18:20:16 +0100325 scales = [
326 (np.double(ifm_scale) * np.double(weight_scale)) / np.double(ofm_scale)
327 for weight_scale in weight_scales
328 ]
329 else:
Louis Verhaard7db78962020-05-25 15:05:26 +0200330 raise UnsupportedFeatureError(
331 "Compression of {} is not implemented; tensor: {}".format(ifm_dtype, tens.name)
332 )
Tim Hall79d07d22020-04-27 18:20:16 +0100333 else:
334 if ifm_dtype == DataType.uint8:
335 scales = [np.double(ifm_scale * weight_scale * 0x3000) for weight_scale in weight_scales]
Fredrik Svedbergd67c0aa2020-03-30 13:15:28 +0200336 elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16:
Tim Hall79d07d22020-04-27 18:20:16 +0100337 scales = [(np.double(ifm_scale * 0x3000) * np.double(weight_scale)) for weight_scale in weight_scales]
338 else:
Louis Verhaard7db78962020-05-25 15:05:26 +0200339 raise UnsupportedFeatureError(
340 "Compression of {} is not implemented; tensor: {}".format(ifm_dtype, tens.name)
341 )
Tim Hall79d07d22020-04-27 18:20:16 +0100342
343 # quantise all of the weight scales into (scale_factor, shift)
Fredrik Svedbergd67c0aa2020-03-30 13:15:28 +0200344 if ifm_dtype == DataType.int16:
345 quantised_scales = [reduced_quantise_scale(scale) for scale in scales]
346 else:
347 quantised_scales = [quantise_scale(scale) for scale in scales]
Tim Hall79d07d22020-04-27 18:20:16 +0100348
349 for _, shift in quantised_scales:
350 assert shift >= 16
351
352 # pack the biases and scales
353 tens.compressed_values = []
354 if len(quantised_scales) == 1:
355 # If only 1 quantised scale is used, repeat that value for the length of the biases
356 quantised_scales = [quantised_scales[0]] * len(biases)
357
358 assert len(quantised_scales) == len(biases)
359 for i, bias in enumerate(biases):
360 tens.compressed_values.append(pack_bias_and_scale(bias, *quantised_scales[i]))
361
362 tens.element_size_bytes = 10
363
364 # Figure out if we need padded storage (extra whole elements)
365 padding = (len(tens.compressed_values) * tens.element_size_bytes) % 16
366 if padding != 0:
367 padding = 16 - padding
368
369 # This adds enough padding to allow over-reads
370 while padding > 0:
371 tens.compressed_values.append(pack_bias_and_scale(0, 0, 0))
372 padding = padding - tens.element_size_bytes
373
374 tens.storage_shape = [len(tens.compressed_values)]
375
376
377def update_pass_weight_and_scale_tensors(nng, arch):
378 def find_npu_usage_of_tensor(tens):
379 # TODO: This function is identical to the one in mark_tensors.py. A common version should be used.
380 for op in tens.consumers():
381 if op.type == "DMA":
382 return find_npu_usage_of_tensor(op.outputs[0])
383 if "npu_block_type" in op.attrs:
384 return op.attrs["npu_block_type"]
385 return NpuBlockType.Default
386
387 for sg in nng.subgraphs:
388 for ps in sg.passes:
Louis Verhaard3c07c972020-05-07 08:12:58 +0200389 tens = ps.weight_tensor
390 if tens is not None:
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200391 op = tens.find_npu_op()
392 npu_usage_of_tensor = op.attrs["npu_block_type"]
Tim Hall79d07d22020-04-27 18:20:16 +0100393 if npu_usage_of_tensor == NpuBlockType.ConvolutionDepthWise:
Louis Verhaard3c07c972020-05-07 08:12:58 +0200394 tens.quant_values = np.transpose(tens.quant_values, (0, 1, 3, 2))
395 tens.shape = tens.storage_shape = tens.bandwidth_shape = list(tens.quant_values.shape)
396 tens.weight_transpose_depthwise = True
Tim Hall79d07d22020-04-27 18:20:16 +0100397
Louis Verhaard3c07c972020-05-07 08:12:58 +0200398 needs_dma = tens.needs_dma()
Tim Hall79d07d22020-04-27 18:20:16 +0100399 if ps.cascade.strategy == SchedulingStrategy.WeightStream and needs_dma:
400 ofm_depth_step = ps.block_config[-1]
401 else:
Louis Verhaard3c07c972020-05-07 08:12:58 +0200402 ofm_depth_step = tens.shape[-1]
Tim Hall79d07d22020-04-27 18:20:16 +0100403 compress_weights(
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200404 arch, nng, tens, npu_usage_of_tensor, ps.block_config[-1], ofm_depth_step, op.get_dilation_h_w()
Tim Hall79d07d22020-04-27 18:20:16 +0100405 )
406 # Update source tensor
Louis Verhaard3c07c972020-05-07 08:12:58 +0200407 if needs_dma:
408 src_tens = tens.get_dma_src_tensor()
409 src_tens.shape = tens.shape
410 src_tens.quant_values = tens.quant_values
411 src_tens.copy_compressed_weight_info(tens)
412 set_storage_shape(src_tens)
Tim Hall79d07d22020-04-27 18:20:16 +0100413
Diego Russoea6111a2020-04-14 18:41:58 +0100414 if ps.scale_tensor is not None:
Tim Hall79d07d22020-04-27 18:20:16 +0100415 rescale_for_faf = False
416 activation_ops = set(("Sigmoid", "Tanh"))
417 if (ps.ops[-1].type in activation_ops) and (ps.npu_block_type != NpuBlockType.ElementWise):
418 rescale_for_faf = True
419 calc_scales_and_pack_biases(ps.scale_tensor, arch, ps.block_config[3], rescale_for_faf)