Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1 | # Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 16 | # Description: |
| 17 | # Shared buffer allocation works out how to allocate the Ethos-U55 shared buffer for a given pass. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 18 | import numpy as np |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 19 | |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 20 | from .architecture_features import ArchitectureFeatures |
| 21 | from .architecture_features import Block |
Diego Russo | e8a1045 | 2020-04-21 17:39:10 +0100 | [diff] [blame] | 22 | from .architecture_features import SharedBufferArea |
| 23 | from .architecture_features import SHRAMElements |
Tim Hall | 2a7ebe3 | 2020-06-18 11:42:21 +0100 | [diff] [blame] | 24 | from .errors import VelaError |
Dwight Lidman | 7ad408b | 2020-08-11 11:55:22 +0200 | [diff] [blame] | 25 | from .ethos_u55_regs.ethos_u55_regs import resampling_mode |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 26 | from .operation import Kernel |
Diego Russo | ea6111a | 2020-04-14 18:41:58 +0100 | [diff] [blame] | 27 | from .operation import NpuBlockType |
Louis Verhaard | 814cfbb | 2020-08-21 14:06:25 +0200 | [diff] [blame] | 28 | from .range_set import MemoryRangeSet |
| 29 | from .tensor import MemArea |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 30 | |
| 31 | |
| 32 | class SharedBufferAllocation: |
| 33 | def __init__(self, arch, ps): |
| 34 | self.arch = arch |
| 35 | |
| 36 | self.bank_locations = np.zeros(SharedBufferArea.Size) |
| 37 | self.banks_required = np.zeros(SharedBufferArea.Size) |
| 38 | |
| 39 | ifm_tensor, ifm2_tensor, weight_tensor, ofm_tensor = ps.get_primary_op_ifm_ifm2_weights_ofm() |
Fredrik Svedberg | 2b60ce9 | 2020-09-15 17:18:40 +0200 | [diff] [blame] | 40 | tensors = [t for t in (ifm_tensor, ifm2_tensor, ofm_tensor) if t is not None] |
Fredrik Svedberg | 0f98b36 | 2020-09-29 10:00:39 +0200 | [diff] [blame] | 41 | scales = [t.quantization.scale_f32 for t in tensors if t.quantization is not None] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 42 | has_scale = len(tensors) == len(scales) and None not in scales |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 43 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 44 | self.kernel = Kernel(1, 1) |
Tim Hall | d5044a4 | 2020-10-06 12:07:04 +0100 | [diff] [blame] | 45 | self.is_elementwise = ps.npu_block_type == NpuBlockType.ElementWise |
Louis Verhaard | 814cfbb | 2020-08-21 14:06:25 +0200 | [diff] [blame] | 46 | self.uses_lut = False |
Andreas Nevalainen | 6e82708 | 2020-10-14 13:55:43 +0200 | [diff] [blame] | 47 | self.ifm_count = 1 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 48 | |
| 49 | if ps.primary_op: |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 50 | self.kernel = ps.primary_op.kernel |
Louis Verhaard | 814cfbb | 2020-08-21 14:06:25 +0200 | [diff] [blame] | 51 | self.uses_lut = ps.primary_op.activation_lut is not None |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 52 | |
Tim Hall | d5044a4 | 2020-10-06 12:07:04 +0100 | [diff] [blame] | 53 | self.is_equal_depth_op = self.is_elementwise or ps.npu_block_type in ( |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 54 | NpuBlockType.ConvolutionDepthWise, |
| 55 | NpuBlockType.Pooling, |
| 56 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 57 | |
| 58 | self.use_accumulator_element = SHRAMElements.Acc32 |
Tim Hall | d5044a4 | 2020-10-06 12:07:04 +0100 | [diff] [blame] | 59 | if self.is_elementwise: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 60 | self.use_ifm_element = SHRAMElements.IFM8_Elementwise |
| 61 | else: |
| 62 | self.use_ifm_element = SHRAMElements.IFM8 |
| 63 | |
Dwight Lidman | 7ad408b | 2020-08-11 11:55:22 +0200 | [diff] [blame] | 64 | self.ifm_resampling_mode = resampling_mode.NONE |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 65 | self.ifm_bits = 0 |
| 66 | self.ifm_depth = 0 |
| 67 | if ifm_tensor: |
Dwight Lidman | 7ad408b | 2020-08-11 11:55:22 +0200 | [diff] [blame] | 68 | self.ifm_resampling_mode = ifm_tensor.resampling_mode |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 69 | self.ifm_bits = ifm_tensor.dtype.size_in_bits() |
Andreas Nevalainen | 6e82708 | 2020-10-14 13:55:43 +0200 | [diff] [blame] | 70 | |
| 71 | if ifm_tensor.shape != []: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 72 | self.ifm_depth = ifm_tensor.shape[-1] |
Andreas Nevalainen | 6e82708 | 2020-10-14 13:55:43 +0200 | [diff] [blame] | 73 | |
| 74 | if self.is_elementwise: |
| 75 | self.ifm_count = 2 |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 76 | if ifm_tensor.shape == []: # Scalar in ifm1 |
Andreas Nevalainen | 6e82708 | 2020-10-14 13:55:43 +0200 | [diff] [blame] | 77 | assert ifm2_tensor |
| 78 | self.ifm_depth = ifm2_tensor.shape[-1] |
| 79 | self.ifm_count = 1 |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 80 | elif not ifm2_tensor or ifm2_tensor.shape == []: # Scalar in ifm2 |
Andreas Nevalainen | 6e82708 | 2020-10-14 13:55:43 +0200 | [diff] [blame] | 81 | self.ifm_count = 1 |
| 82 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 83 | if self.ifm_bits == 16: |
Fredrik Svedberg | 2b60ce9 | 2020-09-15 17:18:40 +0200 | [diff] [blame] | 84 | if ps.npu_block_type != NpuBlockType.Pooling and has_scale: |
Tim Hall | 749bfd5 | 2020-08-30 14:40:46 +0100 | [diff] [blame] | 85 | self.use_accumulator_element = SHRAMElements.Acc40 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 86 | self.use_ifm_element = self.use_ifm_element + 1 |
| 87 | assert (self.use_ifm_element == SHRAMElements.IFM16) or ( |
| 88 | self.use_ifm_element == SHRAMElements.IFM16_Elementwise |
| 89 | ) |
Tim Hall | 2b7a162 | 2020-09-08 17:00:33 +0100 | [diff] [blame] | 90 | elif self.ifm_bits == 32: |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 91 | assert ( |
| 92 | self.is_elementwise or ps.npu_block_type == NpuBlockType.ReduceSum |
| 93 | ), "Unsupported 32-bit IFM operation" |
Fredrik Svedberg | 597fd3f | 2020-08-13 10:02:53 +0200 | [diff] [blame] | 94 | self.use_ifm_element = SHRAMElements.IFM32 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 95 | else: |
| 96 | assert self.ifm_bits == 8, "Unexpected IFM bitdepth" |
| 97 | |
| 98 | self.ifm_block_depth = arch.calc_ifm_block_depth(self.ifm_depth, self.ifm_bits) |
| 99 | self.ofm_tensor = ofm_tensor |
| 100 | |
| 101 | self.banks_required[SharedBufferArea.Weights] = arch.shram_reserved_weight_banks |
| 102 | self.banks_required[SharedBufferArea.OFM] = arch.shram_reserved_output_banks |
| 103 | |
| 104 | def is_valid(self): |
| 105 | # Assign zero-based bank starts (first element remains zero) |
| 106 | self.bank_locations[1:] = np.cumsum(self.banks_required)[:-1] |
| 107 | |
| 108 | # Accumulator area is measured from the end of the buffer |
| 109 | self.bank_locations[SharedBufferArea.Accumulators] = ( |
Louis Verhaard | 814cfbb | 2020-08-21 14:06:25 +0200 | [diff] [blame] | 110 | self.arch.available_shram_banks(self.uses_lut) - self.banks_required[SharedBufferArea.Accumulators] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 111 | ) |
| 112 | ifm_end = self.bank_locations[SharedBufferArea.IFM] + self.banks_required[SharedBufferArea.IFM] |
| 113 | return ifm_end <= self.bank_locations[SharedBufferArea.Accumulators] |
| 114 | |
| 115 | def try_block(self, ofm_block: Block): |
| 116 | # Get IFM block configuration |
| 117 | ifm_block_depth = ofm_block.depth if self.is_equal_depth_op else self.ifm_block_depth |
Tim Hall | c30f495 | 2020-06-15 20:47:35 +0100 | [diff] [blame] | 118 | ifm_block = self.arch.get_ifm_block_size( |
| 119 | ifm_block_depth, ofm_block, self.kernel, ifm_resampling_mode=self.ifm_resampling_mode |
| 120 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 121 | ifm_config = self.arch.get_block_config(ifm_block.width, ifm_block.height, ifm_block.depth) |
| 122 | if ifm_config is None: |
| 123 | return None |
| 124 | |
| 125 | # Get OFM block configuration |
| 126 | ofm_config = self.arch.get_block_config(ofm_block.width, ofm_block.height, ofm_block.depth) |
| 127 | if ofm_config is None: |
| 128 | return None |
| 129 | |
Tim Hall | d5044a4 | 2020-10-06 12:07:04 +0100 | [diff] [blame] | 130 | acc_banks = ofm_config.banks[self.use_accumulator_element] |
| 131 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 132 | # Update bank counts for IFM and Accumulator |
Andreas Nevalainen | 6e82708 | 2020-10-14 13:55:43 +0200 | [diff] [blame] | 133 | self.banks_required[SharedBufferArea.IFM] = ifm_config.banks[self.use_ifm_element] * self.ifm_count |
Tim Hall | d5044a4 | 2020-10-06 12:07:04 +0100 | [diff] [blame] | 134 | self.banks_required[SharedBufferArea.Accumulators] = 0 if self.is_elementwise else acc_banks |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 135 | |
| 136 | # Validating calculates bank layout and returns validity |
| 137 | if not self.is_valid(): |
| 138 | return None |
| 139 | |
| 140 | return (ofm_block.height, ofm_block.width, ifm_block.depth, ofm_block.depth) |
| 141 | |
| 142 | def generate_used_mask(self, active_set): |
| 143 | res = np.zeros(self.arch.shram_total_banks, dtype=np.int64) |
| 144 | for kind in active_set: |
| 145 | start = int(self.bank_locations[kind]) |
| 146 | end = start + int(self.banks_required[kind]) |
| 147 | res[start:end] = 1 |
| 148 | return res |
| 149 | |
| 150 | def is_compatible(first, second): |
| 151 | """See if the bank allocations of two convolutions are compatible, |
| 152 | so that they can run back-to-back without a fence in between""" |
| 153 | |
| 154 | first_set = set((SharedBufferArea.OFM, SharedBufferArea.Accumulators)) |
| 155 | second_set = set((SharedBufferArea.IFM, SharedBufferArea.Weights)) |
| 156 | |
| 157 | first_mask = first.generate_used_mask(first_set) |
| 158 | second_mask = second.generate_used_mask(second_set) |
| 159 | |
| 160 | if np.sum(first_mask & second_mask): |
| 161 | # overlap |
| 162 | return False |
| 163 | |
| 164 | return True |
| 165 | |
Louis Verhaard | 814cfbb | 2020-08-21 14:06:25 +0200 | [diff] [blame] | 166 | def get_shram_memory_access_range(self): |
| 167 | # Returns the SHRAM memory access range used by this shared buffer, |
| 168 | # excluding access to LUT |
| 169 | return MemoryRangeSet( |
| 170 | MemArea.Shram, 0, self.arch.available_shram_banks(self.uses_lut) * self.arch.shram_bank_size |
| 171 | ) |
| 172 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 173 | |
| 174 | def shared_buffer_allocation_for_pass_and_block_config(arch, ps, block_config): |
| 175 | alloc = SharedBufferAllocation(arch, ps) |
| 176 | assert (alloc.ifm_block_depth == block_config[2]) or alloc.is_equal_depth_op |
| 177 | if alloc.try_block(Block(block_config[1], block_config[0], block_config[3])): |
| 178 | return alloc |
| 179 | |
| 180 | return None |
| 181 | |
| 182 | |
| 183 | def find_block_configs_suitable_for_pass_and_shared_buffer(arch, ps): |
| 184 | alloc = SharedBufferAllocation(arch, ps) |
| 185 | |
| 186 | if arch.override_block_config: |
| 187 | config = alloc.try_block(arch.override_block_config) |
Tim Hall | 2a7ebe3 | 2020-06-18 11:42:21 +0100 | [diff] [blame] | 188 | if config is None: |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 189 | raise VelaError("Block config override '{0}' cannot be allocated".format(arch.override_block_config)) |
Tim Hall | 2a7ebe3 | 2020-06-18 11:42:21 +0100 | [diff] [blame] | 190 | return [config] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 191 | |
| 192 | # Constrain the search space if the OFM is smaller than the max block size |
| 193 | # - Add other block search constraints here if required |
Fredrik Svedberg | 0f98b36 | 2020-09-29 10:00:39 +0200 | [diff] [blame] | 194 | if len(alloc.ofm_tensor.shape) <= 2: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 195 | max_block_height = max_block_width = alloc.ofm_tensor.shape[0] |
| 196 | else: |
| 197 | max_block_width = alloc.ofm_tensor.shape[-2] |
| 198 | max_block_height = alloc.ofm_tensor.shape[-3] |
| 199 | |
| 200 | # Common block depth |
| 201 | max_block_depth = alloc.ofm_tensor.shape[-1] |
| 202 | |
| 203 | # Constrain to valid ranges before search |
| 204 | max_block_width = min(arch.ofm_block_max.width, max_block_width) |
| 205 | max_block_height = min(arch.ofm_block_max.height, max_block_height) |
| 206 | max_block_depth = min(arch.ofm_block_max.depth, max_block_depth) |
| 207 | |
| 208 | valid_block_configs = [] |
| 209 | # Try a range of block shapes against this pass |
| 210 | for w in range(arch.ofm_ublock.width, max_block_width + arch.ofm_ublock.width, arch.ofm_ublock.width): |
| 211 | for h in range(arch.ofm_ublock.height, max_block_height + arch.ofm_ublock.height, arch.ofm_ublock.height): |
| 212 | # Try valid OFM block depths |
| 213 | for c in range(arch.ofm_ublock.depth, max_block_depth + arch.ofm_ublock.depth, arch.ofm_ublock.depth): |
| 214 | # OFM block depth has the constraint that if it causes the OFM to be |
| 215 | # split, it must be a multiple of the OFM split size |
| 216 | if (c >= max_block_depth) or (c < max_block_depth and (c % ArchitectureFeatures.OFMSplitDepth) == 0): |
| 217 | config = alloc.try_block(Block(w, h, c)) |
| 218 | if config: |
| 219 | valid_block_configs.append(config) |
| 220 | |
| 221 | assert len(valid_block_configs) > 0 |
| 222 | return valid_block_configs |