Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1 | # Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 16 | # Description: |
| 17 | # Compresses and pads the weigths. It also calculates the scales and packs with the biases. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 18 | import math |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 19 | from collections import namedtuple |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 20 | |
| 21 | import numpy as np |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 22 | |
Manupa Karunaratne | d83d2e1 | 2020-07-20 12:05:32 +0100 | [diff] [blame^] | 23 | from .architecture_features import Accelerator |
| 24 | from .architecture_features import ArchitectureFeatures |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 25 | from .data_type import DataType |
Manupa Karunaratne | d83d2e1 | 2020-07-20 12:05:32 +0100 | [diff] [blame^] | 26 | from .errors import typecheck |
Louis Verhaard | 7db7896 | 2020-05-25 15:05:26 +0200 | [diff] [blame] | 27 | from .errors import UnsupportedFeatureError |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 28 | from .nn_graph import SchedulingStrategy |
| 29 | from .numeric_util import round_up |
Patrik Gustavsson | d89c09e | 2020-07-08 11:27:12 +0200 | [diff] [blame] | 30 | from .numeric_util import round_up_divide |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 31 | from .operation import NpuBlockType |
| 32 | from .scaling import quantise_scale |
| 33 | from .scaling import reduced_quantise_scale |
| 34 | from .tensor import TensorBlockTraversal |
| 35 | from .tensor import TensorFormat |
| 36 | from .tensor import TensorPurpose |
| 37 | from .tensor import TensorSubPurpose |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 38 | from ethosu import mlw_codec |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 39 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 40 | |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 41 | # Contains meta info for a weight compression. If two tensors have identical weight compression config, |
| 42 | # then they also will have identical compressed weights. |
| 43 | WeightCompressionConfig = namedtuple( |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 44 | "WeightCompressionConfig", ["npu_block_type", "ofm_block_depth", "ofm_depth_step", "dilation", "equivalence_id"] |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 45 | ) |
| 46 | |
| 47 | |
Manupa Karunaratne | d83d2e1 | 2020-07-20 12:05:32 +0100 | [diff] [blame^] | 48 | @typecheck |
| 49 | def encode_weights( |
| 50 | accelerator: Accelerator, |
| 51 | weights_volume: np.ndarray, |
| 52 | dilation_xy: tuple, |
| 53 | ifm_bitdepth: int, |
| 54 | ofm_block_depth: int, |
| 55 | is_depthwise: bool, |
| 56 | is_partkernel: bool, |
| 57 | ): |
| 58 | """ |
| 59 | Public facing API to use the ethosu weight encoding. |
| 60 | |
| 61 | :param accelerator: architecture_features.Accelerator enum to pick the correct ethosu accelerator |
| 62 | :param weights_volume: numpy.ndarray in OHWI layout with a shape of four |
| 63 | :param dilation_xy: a two element tuple of dilation attributes in x,y dimension |
| 64 | :param ifm_bitdepth: the bitdepth of input feature map |
| 65 | :param ofm_block_depth: the depth of blocks for ethosu processing |
| 66 | :param is_depthwise: a boolean indicating these weights are used for a depthwise traversal |
| 67 | :param is_partkernel: a boolean indicating these weights are traversed on sub-kernal basis |
| 68 | :return: a bytearray of compressed weights |
| 69 | """ |
| 70 | |
| 71 | # Checks for weight layout |
| 72 | assert len(weights_volume.shape) == 4, "weights ndarray should have a shape of 4" |
| 73 | |
| 74 | # It cannot be both partkernel and depthwise |
| 75 | assert not (is_depthwise and is_partkernel), "encode_weights :: partkernel and depthwise are mutually exclusive" |
| 76 | |
| 77 | # Check valid values for dilation |
| 78 | assert dilation_xy[0] in (1, 2), "encode_weights :: dilation x should be 1 or 2 not {}".format(dilation_xy[0]) |
| 79 | assert dilation_xy[1] in (1, 2), "encode_weights :: dilation y should be 1 or 2 not {}".format(dilation_xy[1]) |
| 80 | |
| 81 | ifm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ifm_ublock |
| 82 | ofm_ublock = ArchitectureFeatures.accelerator_configs[accelerator].ofm_ublock |
| 83 | raw_stream = generate_brick( |
| 84 | ifm_ublock=ifm_ublock, |
| 85 | ofm_ublock=ofm_ublock, |
| 86 | brick_weights=weights_volume, |
| 87 | ofm_block_depth=ofm_block_depth, |
| 88 | is_depthwise=is_depthwise, |
| 89 | is_partkernel=is_partkernel, |
| 90 | ifm_bitdepth=ifm_bitdepth, |
| 91 | dilation=dilation_xy, |
| 92 | ) |
| 93 | encoded_stream = encode(raw_stream) |
| 94 | return encoded_stream |
| 95 | |
| 96 | |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 97 | def create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation): |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 98 | # Note: for an ofm block only its depth is used in weight compression. |
| 99 | # And block depth > ofm depth gives same result as block depth == ofm depth |
| 100 | block_depth = min(ofm_block_depth, tens.quant_values.shape[-1]) |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 101 | return WeightCompressionConfig(npu_block_type, block_depth, ofm_depth_step, dilation, tens.equivalence_id) |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 102 | |
| 103 | |
| 104 | def set_storage_shape(tens): |
| 105 | # Sets the storage shape depending on the tensor's sub purpose |
| 106 | if tens.sub_purpose == TensorSubPurpose.DoubleBuffer and len(tens.compressed_values) > 2: |
| 107 | offset = 2 * np.amax([len(x) for x in tens.compressed_values]) |
| 108 | assert offset % 16 == 0 |
| 109 | else: |
| 110 | offset = tens.weight_compressed_offsets[-1] |
| 111 | tens.storage_shape = [1, 1, 1, offset] |
| 112 | |
| 113 | |
| 114 | class CompressedWeightCache: |
| 115 | # Contains weight compressions for all weight tensors in a graph |
| 116 | def __init__(self): |
| 117 | self.cache = {} # maps from WeightCompressionConfig to a tensor clone containing compressed weights |
| 118 | |
| 119 | def get_tensor_with_same_compression(self, wcc): |
| 120 | return self.cache.get(wcc) |
| 121 | |
| 122 | def add(self, tens): |
| 123 | # Adds the compressed weights from the tensor to the cache |
| 124 | wcc = tens.weight_compression_config |
| 125 | # Clone the tensor to make sure that nothing related to the weight compression is modified |
| 126 | tens_clone = tens.clone("_weights{}_{}".format(wcc.ofm_block_depth, wcc.ofm_depth_step)) |
| 127 | self.cache[wcc] = tens_clone |
| 128 | |
| 129 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 130 | def encode(weight_stream): |
Patrik Gustavsson | 5ff9944 | 2020-07-10 10:12:17 +0200 | [diff] [blame] | 131 | if len(weight_stream) == 0: |
| 132 | return [] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 133 | assert np.amin(weight_stream) >= -255 |
| 134 | assert np.amax(weight_stream) <= 255 |
| 135 | |
| 136 | # Encode flattened signed weight stream |
| 137 | compressed = mlw_codec.encode(weight_stream) |
| 138 | |
| 139 | # pad with 0xFF as needed so the length of the weight stream |
| 140 | # is a multiple of 16 |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 141 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 142 | while (len(compressed) % 16) != 0: |
| 143 | compressed.append(0xFF) |
| 144 | |
| 145 | return compressed |
| 146 | |
| 147 | |
Manupa Karunaratne | d83d2e1 | 2020-07-20 12:05:32 +0100 | [diff] [blame^] | 148 | def generate_brick( |
| 149 | ifm_ublock, ofm_ublock, brick_weights, ofm_block_depth, is_depthwise, is_partkernel, ifm_bitdepth, dilation |
| 150 | ): |
| 151 | |
| 152 | decomp_h = ArchitectureFeatures.SubKernelMax.height // dilation[0] |
| 153 | decomp_w = ArchitectureFeatures.SubKernelMax.width // dilation[1] |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 154 | # Expect weights formatted OHWI |
| 155 | ofm_depth = brick_weights.shape[-4] |
| 156 | ifm_depth = brick_weights.shape[-1] |
| 157 | kernel_width = brick_weights.shape[-2] |
| 158 | kernel_height = brick_weights.shape[-3] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 159 | # IFM block depth |
| 160 | if is_partkernel or (ifm_bitdepth == 16): |
| 161 | # IFM block depth is always 16 for part-kernel-first |
| 162 | ifm_block_depth = 16 |
| 163 | elif ifm_bitdepth == 8: |
| 164 | ifm_block_depth = 32 |
| 165 | else: |
| 166 | assert False |
| 167 | |
| 168 | stream = [] |
| 169 | |
| 170 | # Top level striping - OFM blocks in the entire brick's depth |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 171 | for ofm_block_z in range(0, ofm_depth, ofm_block_depth): |
| 172 | clipped_ofm_block_depth = min(ofm_block_depth, ofm_depth - ofm_block_z) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 173 | # IFM blocks required for the brick |
| 174 | for ifm_block_z in range(0, (1 if is_depthwise else ifm_depth), ifm_block_depth): |
| 175 | if is_depthwise: |
| 176 | clipped_ifm_block_depth = ifm_ublock.depth |
| 177 | else: |
| 178 | clipped_ifm_block_depth = ( |
| 179 | min(ifm_block_depth, ifm_depth - ifm_block_z) if is_partkernel else ifm_block_depth |
| 180 | ) |
| 181 | # Weight decomposition |
| 182 | # Subkernel Splitting (H) |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 183 | for subkernel_y in range(0, kernel_height, decomp_h): |
| 184 | sub_height = min(kernel_height - subkernel_y, decomp_h) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 185 | # Subkernel splitting (W) |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 186 | for subkernel_x in range(0, kernel_width, decomp_w): |
| 187 | sub_width = min(kernel_width - subkernel_x, decomp_w) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 188 | subkernel_elements = sub_width * sub_height |
| 189 | # Part kernel first works across the kernel H/W and needs padding |
| 190 | if is_partkernel: |
| 191 | if ifm_bitdepth == 16 and subkernel_elements % 2 != 0: |
| 192 | subkernel_elements = int(math.ceil(subkernel_elements / 2) * 2) |
| 193 | elif ifm_bitdepth == 8 and subkernel_elements % 4 != 0: |
| 194 | subkernel_elements = int(math.ceil(subkernel_elements / 4) * 4) |
| 195 | |
| 196 | # Depthwise Conv requires multiple of 4 kernel elements in its weight block |
| 197 | # this is different from normal conv which is considered "weights depth-first" |
| 198 | elif is_depthwise: |
| 199 | subkernel_elements = int(math.ceil(subkernel_elements / 4.0) * 4) |
| 200 | |
| 201 | ifm_block_depth_outer = clipped_ifm_block_depth if is_partkernel else 1 |
| 202 | ifm_block_depth_inner = 1 if is_partkernel else clipped_ifm_block_depth |
| 203 | # IFM Ublocks in IFM-block over depth for part-kernel-first mode |
| 204 | # For depth-first IFM Ublocks are traversed after subkernel elements so this loop is ignored. |
| 205 | for ifm_ublk_outer in range(0, ifm_block_depth_outer, ifm_ublock.depth): |
| 206 | # OFM Ublocks in OFM-block over depth |
| 207 | for ofm_ublk in range(0, clipped_ofm_block_depth, ofm_ublock.depth): |
| 208 | # HW Kernel element traversal - cannot be a H/W loop due to element |
| 209 | # padding requirement on depthwise/part-kernel configurations |
| 210 | for element in range(subkernel_elements): |
| 211 | kx = element % sub_width |
| 212 | ky = element // sub_width |
| 213 | # IFM Ublocks in IFM-block over depth (only 1 ublock if depthwise) |
| 214 | # In case of part-kernel-first IFM Ublock traversal have already been handled |
| 215 | # and this loop is ignored. |
| 216 | for ifm_ublk_inner in range(0, ifm_block_depth_inner, ifm_ublock.depth): |
| 217 | # Feed OFM ublock elements |
| 218 | for ofm_ublock_z in range(ofm_ublock.depth): |
| 219 | # Source IFM ublock elements (only 1 element deep if depthwise) |
| 220 | for ifm_ublock_z in range(1 if is_depthwise else ifm_ublock.depth): |
| 221 | # Source position within the current subkernel |
| 222 | wx = subkernel_x + kx |
| 223 | wy = subkernel_y + ky |
| 224 | # Source IFM/OFM slices |
| 225 | ifm_ublk = ifm_ublk_inner + ifm_ublk_outer |
| 226 | ifm_z = ifm_block_z + ifm_ublk + ifm_ublock_z |
| 227 | ofm_z = ofm_block_z + ofm_ublk + ofm_ublock_z |
| 228 | if (ifm_z >= ifm_depth) or (ofm_z >= ofm_depth) or (ky >= sub_height): |
| 229 | stream.append(0) |
| 230 | else: |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 231 | stream.append(brick_weights[ofm_z][wy][wx][ifm_z]) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 232 | return stream |
| 233 | |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 234 | |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 235 | def core_deinterleave(hwio, core, ncores): |
| 236 | # Put weights back into OHWI |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 237 | ohwi = np.transpose(hwio, (3, 0, 1, 2)) |
| 238 | return ohwi[core : ohwi.shape[0] : ncores] |
| 239 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 240 | |
| 241 | # Compress the weights |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 242 | def compress_weights(arch, nng, tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 243 | assert tens.purpose == TensorPurpose.Weights |
| 244 | assert tens.format == TensorFormat.WeightsCompressed |
| 245 | |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 246 | # Check the weight cache |
| 247 | if nng.weight_cache is None: |
| 248 | nng.weight_cache = CompressedWeightCache() |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 249 | wcc = create_weight_compression_config(tens, npu_block_type, ofm_block_depth, ofm_depth_step, dilation) |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 250 | tens.weight_compression_config = wcc |
| 251 | tens_cached = nng.weight_cache.get_tensor_with_same_compression(wcc) |
| 252 | if tens_cached is not None: |
| 253 | # Cache hit, copy weights from the cache |
| 254 | tens.copy_compressed_weight_info(tens_cached) |
| 255 | set_storage_shape(tens) |
| 256 | return |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 257 | |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 258 | # No cache hit, perform the compression |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 259 | assert tens.quantization is not None |
| 260 | assert tens.quantization.scale_f32 is not None |
| 261 | assert tens.quantization.zero_point is not None |
| 262 | |
| 263 | zero_point = tens.quantization.zero_point |
| 264 | quant_buf = tens.quant_values.astype(np.int64) |
| 265 | |
| 266 | # Early zero-point correction |
| 267 | weights = quant_buf - zero_point |
| 268 | |
| 269 | if len(weights.shape) == 2: |
| 270 | weights = np.expand_dims(np.expand_dims(weights, axis=0), axis=0) |
| 271 | weights_shape = (weights.shape[0], 1, 1, weights.shape[1]) |
| 272 | else: |
| 273 | weights_shape = weights.shape |
| 274 | |
| 275 | compression_scales = [] |
| 276 | compressed_offsets = [] |
| 277 | encoded_streams = [] |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 278 | encoded_streams_substream_offsets = [] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 279 | offset = 0 |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 280 | max_single_buffer_len = 0 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 281 | |
| 282 | ifm_bitdepth = tens.consumer_list[0].inputs[0].dtype.size_in_bits() |
| 283 | ifm_depth = weights.shape[-2] |
| 284 | if npu_block_type == NpuBlockType.ConvolutionDepthWise: |
| 285 | tens.block_traversal = TensorBlockTraversal.DepthWise |
| 286 | if npu_block_type == NpuBlockType.ConvolutionMxN: |
| 287 | # Determine which block traversal strategy has better DPU utilization |
| 288 | kernel_size = weights_shape[0] * weights_shape[1] |
| 289 | depth_utilization = weights_shape[2] / round_up(weights_shape[2], 32 if ifm_bitdepth == 8 else 16) |
| 290 | part_kernel_utilization = (weights_shape[2] / round_up(weights_shape[2], 8)) * ( |
| 291 | kernel_size / round_up(kernel_size, 4 if ifm_bitdepth == 8 else 2) |
| 292 | ) |
| 293 | if part_kernel_utilization >= depth_utilization or ifm_depth <= 8: |
| 294 | # Part-kernel first is always better for ifm depths <= 8 |
| 295 | tens.block_traversal = TensorBlockTraversal.PartKernelFirst |
| 296 | else: |
| 297 | tens.block_traversal = TensorBlockTraversal.DepthFirst |
| 298 | |
Manupa Karunaratne | d83d2e1 | 2020-07-20 12:05:32 +0100 | [diff] [blame^] | 299 | is_depthwise = tens.block_traversal == TensorBlockTraversal.DepthWise |
| 300 | is_partkernel = tens.block_traversal == TensorBlockTraversal.PartKernelFirst |
| 301 | |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 302 | if tens.consumer_list[0].type == "Conv2DBackpropInputSwitchedBias": |
| 303 | # Transpose Convoluion, reverse weights in H and W axes |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 304 | weights = np.flip(weights, axis=(0, 1)) |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 305 | |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 306 | # Calculate brick size |
| 307 | brick_size = (weights_shape[0], weights_shape[1], weights_shape[2], min(tens.shape[-1], ofm_depth_step)) |
| 308 | elements_in_brick = np.prod(brick_size) |
| 309 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 310 | # Slice weight stream up depth-ways into bricks and compress |
| 311 | full_ofm_depth = quant_buf.shape[-1] |
| 312 | for idx in range(0, full_ofm_depth, ofm_depth_step): |
| 313 | # Get the weights necessary for this brick |
| 314 | count = min(full_ofm_depth - idx, ofm_depth_step) |
| 315 | brick_weights = weights[:, :, :, idx : idx + count] |
| 316 | |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 317 | substream_offsets = [0] |
| 318 | encoded_stream = [] |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 319 | |
| 320 | # For each core, deinterleave weights from the larger volume |
| 321 | # and generate separate compressed streams. |
| 322 | for core in range(0, min(arch.ncores, full_ofm_depth)): |
| 323 | core_weights = core_deinterleave(brick_weights, core, arch.ncores) |
Tim Hall | 6231676 | 2020-06-25 16:55:02 +0100 | [diff] [blame] | 324 | |
| 325 | block_depth = (ofm_block_depth + arch.ncores - 1 - core) // arch.ncores |
Manupa Karunaratne | d83d2e1 | 2020-07-20 12:05:32 +0100 | [diff] [blame^] | 326 | encoded_substream = [] |
Tim Hall | 6231676 | 2020-06-25 16:55:02 +0100 | [diff] [blame] | 327 | if block_depth != 0: |
Manupa Karunaratne | d83d2e1 | 2020-07-20 12:05:32 +0100 | [diff] [blame^] | 328 | encoded_substream = encode_weights( |
| 329 | accelerator=arch.accelerator_config, |
| 330 | weights_volume=core_weights, |
| 331 | dilation_xy=dilation, |
| 332 | ifm_bitdepth=ifm_bitdepth, |
| 333 | ofm_block_depth=block_depth, |
| 334 | is_depthwise=is_depthwise, |
| 335 | is_partkernel=is_partkernel, |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 336 | ) |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 337 | encoded_stream.extend(encoded_substream) |
| 338 | substream_offsets.append(len(encoded_stream)) |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 339 | |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 340 | encoded_streams.append(encoded_stream) |
| 341 | encoded_streams_substream_offsets.append(substream_offsets) |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 342 | |
| 343 | # Remember maximum encoded length for DoubleBuffering |
| 344 | max_single_buffer_len = max(max_single_buffer_len, len(encoded_stream)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 345 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 346 | # Remember where we put it for linear addressing |
| 347 | compressed_offsets.append(offset) |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 348 | offset += len(encoded_stream) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 349 | assert offset % 16 == 0 |
| 350 | |
| 351 | # Compression scale tracking |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 352 | compression_scales.append(len(encoded_stream) / elements_in_brick) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 353 | |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 354 | # Track total length as last element of the offsets array |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 355 | compressed_offsets.append(offset) |
| 356 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 357 | tens.weight_compression_scales = compression_scales |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 358 | tens.weight_compressed_offsets = compressed_offsets |
| 359 | tens.compression_scale_for_worst_weight_stream = np.amax(compression_scales) |
| 360 | tens.storage_compression_scale = tens.bandwidth_compression_scale = np.average(compression_scales) |
| 361 | tens.compressed_values = encoded_streams |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 362 | tens.compressed_values_substream_offsets = encoded_streams_substream_offsets |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 363 | tens.brick_size = brick_size |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 364 | set_storage_shape(tens) |
| 365 | nng.weight_cache.add(tens) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 366 | |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 367 | |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 368 | def calc_scales_and_pack_biases(tens, arch, ofm_depth_step, rescale_for_faf=False): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 369 | assert tens.purpose == TensorPurpose.FeatureMap |
| 370 | assert tens.format == TensorFormat.NHWC |
| 371 | # the connected operator should expect a bias input unless it is a FullyConnected |
| 372 | assert "Bias" in tens.consumer_list[0].type or tens.consumer_list[0].type.startswith("FullyConnected") |
| 373 | # the input bias tensor is the same as that connected to the operator |
Jacob Bohlin | cf7da10 | 2020-05-20 09:03:40 +0200 | [diff] [blame] | 374 | _, _, bias_tens, _ = tens.consumer_list[0].get_ifm_weights_biases_ofm() |
| 375 | assert tens is bias_tens |
| 376 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 377 | # the operator should only have a single output |
| 378 | assert len(tens.consumer_list[0].outputs) == 1 |
| 379 | |
| 380 | def pack_bias_and_scale(bias, scale, shift): |
| 381 | bias = np.int64(bias) |
| 382 | assert -(1 << (40 - 1)) <= bias < (1 << (40 - 1)) # signed 40-bit range |
| 383 | assert 0 <= scale < (1 << 32) # unsigned 32-bit range |
| 384 | assert 0 <= shift < (1 << 6) # unsigned 6-bit range |
| 385 | |
| 386 | # pack the 80 bit value = [0(2-bits),shift(6-bits),scale(32-bits),bias(40-bits)] |
| 387 | data = bytearray(10) |
| 388 | data[0] = (bias >> (0 * 8)) & 0xFF |
| 389 | data[1] = (bias >> (1 * 8)) & 0xFF |
| 390 | data[2] = (bias >> (2 * 8)) & 0xFF |
| 391 | data[3] = (bias >> (3 * 8)) & 0xFF |
| 392 | data[4] = (bias >> (4 * 8)) & 0xFF |
| 393 | data[5] = (scale >> (0 * 8)) & 0xFF |
| 394 | data[6] = (scale >> (1 * 8)) & 0xFF |
| 395 | data[7] = (scale >> (2 * 8)) & 0xFF |
| 396 | data[8] = (scale >> (3 * 8)) & 0xFF |
| 397 | data[9] = shift & 0x3F |
| 398 | return data |
| 399 | |
| 400 | biases = tens.quant_values |
| 401 | |
| 402 | first_consumer_op = tens.consumer_list[0] |
| 403 | ifm_dtype = first_consumer_op.inputs[0].dtype |
| 404 | ifm_scale = first_consumer_op.inputs[0].quantization.scale_f32 |
| 405 | ofm_scale = first_consumer_op.outputs[0].quantization.scale_f32 |
| 406 | weight_scales = first_consumer_op.inputs[1].quantization.scale_f32 |
| 407 | |
| 408 | # biases can have multiple consumers for rnn cells. if so, then check that they are all the same |
| 409 | for op in tens.consumer_list[1:]: |
| 410 | assert ifm_scale == op.inputs[0].quantization.scale_f32 |
| 411 | assert ofm_scale == op.outputs[0].quantization.scale_f32 |
| 412 | assert weight_scales == op.inputs[1].quantization.scale_f32 |
| 413 | |
| 414 | if not hasattr(weight_scales, "__iter__"): |
| 415 | # If weight_scales is not already an iterable make it into a list |
| 416 | weight_scales = [weight_scales] |
| 417 | |
| 418 | # Convert scales to np.double (from np.float32) to conform to TensorFlow Lite which |
| 419 | # uses double during scaling calculations |
| 420 | # TensorFlow Lite casts the scales slightly differently for uint8 and int8 |
| 421 | if not rescale_for_faf: |
| 422 | if ifm_dtype == DataType.uint8: |
| 423 | scales = [np.double(ifm_scale * weight_scale) / np.double(ofm_scale) for weight_scale in weight_scales] |
Fredrik Svedberg | d67c0aa | 2020-03-30 13:15:28 +0200 | [diff] [blame] | 424 | elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 425 | scales = [ |
| 426 | (np.double(ifm_scale) * np.double(weight_scale)) / np.double(ofm_scale) |
| 427 | for weight_scale in weight_scales |
| 428 | ] |
| 429 | else: |
Louis Verhaard | 7db7896 | 2020-05-25 15:05:26 +0200 | [diff] [blame] | 430 | raise UnsupportedFeatureError( |
| 431 | "Compression of {} is not implemented; tensor: {}".format(ifm_dtype, tens.name) |
| 432 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 433 | else: |
| 434 | if ifm_dtype == DataType.uint8: |
| 435 | scales = [np.double(ifm_scale * weight_scale * 0x3000) for weight_scale in weight_scales] |
Fredrik Svedberg | d67c0aa | 2020-03-30 13:15:28 +0200 | [diff] [blame] | 436 | elif ifm_dtype == DataType.int8 or ifm_dtype == DataType.int16: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 437 | scales = [(np.double(ifm_scale * 0x3000) * np.double(weight_scale)) for weight_scale in weight_scales] |
| 438 | else: |
Louis Verhaard | 7db7896 | 2020-05-25 15:05:26 +0200 | [diff] [blame] | 439 | raise UnsupportedFeatureError( |
| 440 | "Compression of {} is not implemented; tensor: {}".format(ifm_dtype, tens.name) |
| 441 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 442 | |
| 443 | # quantise all of the weight scales into (scale_factor, shift) |
Fredrik Svedberg | d67c0aa | 2020-03-30 13:15:28 +0200 | [diff] [blame] | 444 | if ifm_dtype == DataType.int16: |
| 445 | quantised_scales = [reduced_quantise_scale(scale) for scale in scales] |
| 446 | else: |
| 447 | quantised_scales = [quantise_scale(scale) for scale in scales] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 448 | |
| 449 | for _, shift in quantised_scales: |
| 450 | assert shift >= 16 |
| 451 | |
| 452 | # pack the biases and scales |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 453 | if len(quantised_scales) == 1: |
| 454 | # If only 1 quantised scale is used, repeat that value for the length of the biases |
| 455 | quantised_scales = [quantised_scales[0]] * len(biases) |
| 456 | |
| 457 | assert len(quantised_scales) == len(biases) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 458 | tens.element_size_bytes = 10 |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 459 | tens.compressed_values = [] |
| 460 | tens.compressed_values_substream_offsets = [] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 461 | |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 462 | total_elements = len(quantised_scales) |
Patrik Gustavsson | d89c09e | 2020-07-08 11:27:12 +0200 | [diff] [blame] | 463 | alignment_bytes = 0 |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 464 | for i in range(0, total_elements, ofm_depth_step): |
| 465 | # Extract streams from brick to generate substreams for each core |
| 466 | stream = bytearray() |
| 467 | substream_offsets = [0] |
| 468 | max_len = min(ofm_depth_step, total_elements - i) |
| 469 | for core in range(0, min(arch.ncores, max_len)): |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 470 | core_scales = quantised_scales[i + core : i + core + max_len : arch.ncores] |
| 471 | core_biases = biases[i + core : i + core + max_len : arch.ncores] |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 472 | for j, core_bias in enumerate(core_biases): |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 473 | stream.extend(pack_bias_and_scale(core_bias, *core_scales[j])) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 474 | |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 475 | # Align to 16 for start for next substream |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 476 | remainder = (len(stream)) % 16 |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 477 | if remainder > 0: |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 478 | stream.extend(bytearray(16 - remainder)) |
Patrik Gustavsson | d89c09e | 2020-07-08 11:27:12 +0200 | [diff] [blame] | 479 | alignment_bytes += 16 - remainder |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 480 | |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 481 | substream_offsets.append(len(stream)) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 482 | |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 483 | # Add to compressed values with their substream offset lists to the tensor |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 484 | tens.compressed_values.append(stream) |
| 485 | tens.compressed_values_substream_offsets.append(substream_offsets) |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 486 | |
Patrik Gustavsson | d89c09e | 2020-07-08 11:27:12 +0200 | [diff] [blame] | 487 | tens.storage_shape = [total_elements + round_up_divide(alignment_bytes, tens.element_size_bytes)] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 488 | |
Jacob Bohlin | e843d33 | 2020-06-23 12:12:56 +0200 | [diff] [blame] | 489 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 490 | def update_pass_weight_and_scale_tensors(nng, arch): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 491 | for sg in nng.subgraphs: |
| 492 | for ps in sg.passes: |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 493 | tens = ps.weight_tensor |
| 494 | if tens is not None: |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 495 | op = tens.find_npu_op() |
| 496 | npu_usage_of_tensor = op.attrs["npu_block_type"] |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 497 | needs_dma = tens.needs_dma() |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 498 | if ps.cascade.strategy == SchedulingStrategy.WeightStream and needs_dma: |
| 499 | ofm_depth_step = ps.block_config[-1] |
| 500 | else: |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 501 | ofm_depth_step = tens.shape[-1] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 502 | compress_weights( |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 503 | arch, nng, tens, npu_usage_of_tensor, ps.block_config[-1], ofm_depth_step, op.get_dilation_h_w() |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 504 | ) |
| 505 | # Update source tensor |
Louis Verhaard | 3c07c97 | 2020-05-07 08:12:58 +0200 | [diff] [blame] | 506 | if needs_dma: |
| 507 | src_tens = tens.get_dma_src_tensor() |
| 508 | src_tens.shape = tens.shape |
| 509 | src_tens.quant_values = tens.quant_values |
| 510 | src_tens.copy_compressed_weight_info(tens) |
| 511 | set_storage_shape(src_tens) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 512 | |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 513 | if ps.scale_tensor is not None: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 514 | rescale_for_faf = False |
| 515 | activation_ops = set(("Sigmoid", "Tanh")) |
| 516 | if (ps.ops[-1].type in activation_ops) and (ps.npu_block_type != NpuBlockType.ElementWise): |
| 517 | rescale_for_faf = True |
Tim Hall | f7e810a | 2020-06-25 15:04:31 +0100 | [diff] [blame] | 518 | calc_scales_and_pack_biases(ps.scale_tensor, arch, ofm_depth_step, rescale_for_faf) |