blob: f52d3a924bf6da356dbad93554d9dec424e1905e [file] [log] [blame]
Tim Hall79d07d22020-04-27 18:20:16 +01001# Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved.
2#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Shared buffer allocation works out how to allocate the Ethos-U55 shared buffer for a given pass.
Tim Hall79d07d22020-04-27 18:20:16 +010018import numpy as np
Diego Russoea6111a2020-04-14 18:41:58 +010019
Diego Russoe8a10452020-04-21 17:39:10 +010020from .architecture_features import ArchitectureFeatures
21from .architecture_features import Block
22from .architecture_features import Kernel
23from .architecture_features import SharedBufferArea
24from .architecture_features import SHRAMElements
Tim Hall2a7ebe32020-06-18 11:42:21 +010025from .errors import VelaError
Dwight Lidman7ad408b2020-08-11 11:55:22 +020026from .ethos_u55_regs.ethos_u55_regs import resampling_mode
Diego Russoea6111a2020-04-14 18:41:58 +010027from .operation import NpuBlockType
Louis Verhaardaee5d752020-09-30 09:01:52 +020028from .operation import Op
Louis Verhaard814cfbb2020-08-21 14:06:25 +020029from .range_set import MemoryRangeSet
30from .tensor import MemArea
Tim Hall79d07d22020-04-27 18:20:16 +010031
32
33class SharedBufferAllocation:
34 def __init__(self, arch, ps):
35 self.arch = arch
36
37 self.bank_locations = np.zeros(SharedBufferArea.Size)
38 self.banks_required = np.zeros(SharedBufferArea.Size)
39
40 ifm_tensor, ifm2_tensor, weight_tensor, ofm_tensor = ps.get_primary_op_ifm_ifm2_weights_ofm()
Fredrik Svedberg2b60ce92020-09-15 17:18:40 +020041 tensors = [t for t in (ifm_tensor, ifm2_tensor, ofm_tensor) if t is not None]
Fredrik Svedberg0f98b362020-09-29 10:00:39 +020042 scales = [t.quantization.scale_f32 for t in tensors if t.quantization is not None]
Louis Verhaardaee5d752020-09-30 09:01:52 +020043 has_scale = len(tensors) == len(scales) and None not in scales
Tim Hall79d07d22020-04-27 18:20:16 +010044
45 strides = (1, 1, 1, 1)
46 dilation = (1, 1, 1, 1)
47 self.kernel = Kernel(1, 1)
Tim Halld5044a42020-10-06 12:07:04 +010048 self.is_elementwise = ps.npu_block_type == NpuBlockType.ElementWise
Louis Verhaard814cfbb2020-08-21 14:06:25 +020049 self.uses_lut = False
Andreas Nevalainen6e827082020-10-14 13:55:43 +020050 self.ifm_count = 1
Tim Hall79d07d22020-04-27 18:20:16 +010051
52 if ps.primary_op:
53 strides = ps.primary_op.attrs.get("strides", strides)
54 dilation = ps.primary_op.attrs.get("dilation", dilation)
55 k_h = 1
56 k_w = 1
57 if weight_tensor:
Louis Verhaardaee5d752020-09-30 09:01:52 +020058 if ps.primary_op.type != Op.FullyConnected:
Tim Hall79d07d22020-04-27 18:20:16 +010059 k_h = weight_tensor.shape[0]
60 k_w = weight_tensor.shape[1]
61 else:
62 k_h = ps.primary_op.attrs.get("filter_height", 1)
63 k_w = ps.primary_op.attrs.get("filter_width", 1)
64
65 self.kernel = Kernel(k_w, k_h, strides[2], strides[1], dilation[2], dilation[1])
Louis Verhaard814cfbb2020-08-21 14:06:25 +020066 self.uses_lut = ps.primary_op.activation_lut is not None
Tim Hall79d07d22020-04-27 18:20:16 +010067
Tim Halld5044a42020-10-06 12:07:04 +010068 self.is_equal_depth_op = self.is_elementwise or ps.npu_block_type in (
Tim Hall79d07d22020-04-27 18:20:16 +010069 NpuBlockType.ConvolutionDepthWise,
70 NpuBlockType.Pooling,
71 )
72 self.strides = strides
73
74 self.use_accumulator_element = SHRAMElements.Acc32
Tim Halld5044a42020-10-06 12:07:04 +010075 if self.is_elementwise:
Tim Hall79d07d22020-04-27 18:20:16 +010076 self.use_ifm_element = SHRAMElements.IFM8_Elementwise
77 else:
78 self.use_ifm_element = SHRAMElements.IFM8
79
Dwight Lidman7ad408b2020-08-11 11:55:22 +020080 self.ifm_resampling_mode = resampling_mode.NONE
Tim Hall79d07d22020-04-27 18:20:16 +010081 self.ifm_bits = 0
82 self.ifm_depth = 0
83 if ifm_tensor:
Dwight Lidman7ad408b2020-08-11 11:55:22 +020084 self.ifm_resampling_mode = ifm_tensor.resampling_mode
Tim Hall79d07d22020-04-27 18:20:16 +010085 self.ifm_bits = ifm_tensor.dtype.size_in_bits()
Andreas Nevalainen6e827082020-10-14 13:55:43 +020086
87 if ifm_tensor.shape != []:
Tim Hall79d07d22020-04-27 18:20:16 +010088 self.ifm_depth = ifm_tensor.shape[-1]
Andreas Nevalainen6e827082020-10-14 13:55:43 +020089
90 if self.is_elementwise:
91 self.ifm_count = 2
92 if ifm_tensor.shape == []: # Scalar in ifm1
93 assert ifm2_tensor
94 self.ifm_depth = ifm2_tensor.shape[-1]
95 self.ifm_count = 1
96 elif not ifm2_tensor or ifm2_tensor.shape == []: # Scalar in ifm2
97 self.ifm_count = 1
98
Tim Hall79d07d22020-04-27 18:20:16 +010099 if self.ifm_bits == 16:
Fredrik Svedberg2b60ce92020-09-15 17:18:40 +0200100 if ps.npu_block_type != NpuBlockType.Pooling and has_scale:
Tim Hall749bfd52020-08-30 14:40:46 +0100101 self.use_accumulator_element = SHRAMElements.Acc40
Tim Hall79d07d22020-04-27 18:20:16 +0100102 self.use_ifm_element = self.use_ifm_element + 1
103 assert (self.use_ifm_element == SHRAMElements.IFM16) or (
104 self.use_ifm_element == SHRAMElements.IFM16_Elementwise
105 )
Tim Hall2b7a1622020-09-08 17:00:33 +0100106 elif self.ifm_bits == 32:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200107 assert (
108 self.is_elementwise or ps.npu_block_type == NpuBlockType.ReduceSum
109 ), "Unsupported 32-bit IFM operation"
Fredrik Svedberg597fd3f2020-08-13 10:02:53 +0200110 self.use_ifm_element = SHRAMElements.IFM32
Tim Hall79d07d22020-04-27 18:20:16 +0100111 else:
112 assert self.ifm_bits == 8, "Unexpected IFM bitdepth"
113
114 self.ifm_block_depth = arch.calc_ifm_block_depth(self.ifm_depth, self.ifm_bits)
115 self.ofm_tensor = ofm_tensor
116
117 self.banks_required[SharedBufferArea.Weights] = arch.shram_reserved_weight_banks
118 self.banks_required[SharedBufferArea.OFM] = arch.shram_reserved_output_banks
119
120 def is_valid(self):
121 # Assign zero-based bank starts (first element remains zero)
122 self.bank_locations[1:] = np.cumsum(self.banks_required)[:-1]
123
124 # Accumulator area is measured from the end of the buffer
125 self.bank_locations[SharedBufferArea.Accumulators] = (
Louis Verhaard814cfbb2020-08-21 14:06:25 +0200126 self.arch.available_shram_banks(self.uses_lut) - self.banks_required[SharedBufferArea.Accumulators]
Tim Hall79d07d22020-04-27 18:20:16 +0100127 )
128 ifm_end = self.bank_locations[SharedBufferArea.IFM] + self.banks_required[SharedBufferArea.IFM]
129 return ifm_end <= self.bank_locations[SharedBufferArea.Accumulators]
130
131 def try_block(self, ofm_block: Block):
132 # Get IFM block configuration
133 ifm_block_depth = ofm_block.depth if self.is_equal_depth_op else self.ifm_block_depth
Tim Hallc30f4952020-06-15 20:47:35 +0100134 ifm_block = self.arch.get_ifm_block_size(
135 ifm_block_depth, ofm_block, self.kernel, ifm_resampling_mode=self.ifm_resampling_mode
136 )
Tim Hall79d07d22020-04-27 18:20:16 +0100137 ifm_config = self.arch.get_block_config(ifm_block.width, ifm_block.height, ifm_block.depth)
138 if ifm_config is None:
139 return None
140
141 # Get OFM block configuration
142 ofm_config = self.arch.get_block_config(ofm_block.width, ofm_block.height, ofm_block.depth)
143 if ofm_config is None:
144 return None
145
Tim Halld5044a42020-10-06 12:07:04 +0100146 acc_banks = ofm_config.banks[self.use_accumulator_element]
147
Tim Hall79d07d22020-04-27 18:20:16 +0100148 # Update bank counts for IFM and Accumulator
Andreas Nevalainen6e827082020-10-14 13:55:43 +0200149 self.banks_required[SharedBufferArea.IFM] = ifm_config.banks[self.use_ifm_element] * self.ifm_count
Tim Halld5044a42020-10-06 12:07:04 +0100150 self.banks_required[SharedBufferArea.Accumulators] = 0 if self.is_elementwise else acc_banks
Tim Hall79d07d22020-04-27 18:20:16 +0100151
152 # Validating calculates bank layout and returns validity
153 if not self.is_valid():
154 return None
155
156 return (ofm_block.height, ofm_block.width, ifm_block.depth, ofm_block.depth)
157
158 def generate_used_mask(self, active_set):
159 res = np.zeros(self.arch.shram_total_banks, dtype=np.int64)
160 for kind in active_set:
161 start = int(self.bank_locations[kind])
162 end = start + int(self.banks_required[kind])
163 res[start:end] = 1
164 return res
165
166 def is_compatible(first, second):
167 """See if the bank allocations of two convolutions are compatible,
168 so that they can run back-to-back without a fence in between"""
169
170 first_set = set((SharedBufferArea.OFM, SharedBufferArea.Accumulators))
171 second_set = set((SharedBufferArea.IFM, SharedBufferArea.Weights))
172
173 first_mask = first.generate_used_mask(first_set)
174 second_mask = second.generate_used_mask(second_set)
175
176 if np.sum(first_mask & second_mask):
177 # overlap
178 return False
179
180 return True
181
Louis Verhaard814cfbb2020-08-21 14:06:25 +0200182 def get_shram_memory_access_range(self):
183 # Returns the SHRAM memory access range used by this shared buffer,
184 # excluding access to LUT
185 return MemoryRangeSet(
186 MemArea.Shram, 0, self.arch.available_shram_banks(self.uses_lut) * self.arch.shram_bank_size
187 )
188
Tim Hall79d07d22020-04-27 18:20:16 +0100189
190def shared_buffer_allocation_for_pass_and_block_config(arch, ps, block_config):
191 alloc = SharedBufferAllocation(arch, ps)
192 assert (alloc.ifm_block_depth == block_config[2]) or alloc.is_equal_depth_op
193 if alloc.try_block(Block(block_config[1], block_config[0], block_config[3])):
194 return alloc
195
196 return None
197
198
199def find_block_configs_suitable_for_pass_and_shared_buffer(arch, ps):
200 alloc = SharedBufferAllocation(arch, ps)
201
202 if arch.override_block_config:
203 config = alloc.try_block(arch.override_block_config)
Tim Hall2a7ebe32020-06-18 11:42:21 +0100204 if config is None:
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200205 raise VelaError("Block config override '{0}' cannot be allocated".format(arch.override_block_config))
Tim Hall2a7ebe32020-06-18 11:42:21 +0100206 return [config]
Tim Hall79d07d22020-04-27 18:20:16 +0100207
208 # Constrain the search space if the OFM is smaller than the max block size
209 # - Add other block search constraints here if required
Fredrik Svedberg0f98b362020-09-29 10:00:39 +0200210 if len(alloc.ofm_tensor.shape) <= 2:
Tim Hall79d07d22020-04-27 18:20:16 +0100211 max_block_height = max_block_width = alloc.ofm_tensor.shape[0]
212 else:
213 max_block_width = alloc.ofm_tensor.shape[-2]
214 max_block_height = alloc.ofm_tensor.shape[-3]
215
216 # Common block depth
217 max_block_depth = alloc.ofm_tensor.shape[-1]
218
219 # Constrain to valid ranges before search
220 max_block_width = min(arch.ofm_block_max.width, max_block_width)
221 max_block_height = min(arch.ofm_block_max.height, max_block_height)
222 max_block_depth = min(arch.ofm_block_max.depth, max_block_depth)
223
224 valid_block_configs = []
225 # Try a range of block shapes against this pass
226 for w in range(arch.ofm_ublock.width, max_block_width + arch.ofm_ublock.width, arch.ofm_ublock.width):
227 for h in range(arch.ofm_ublock.height, max_block_height + arch.ofm_ublock.height, arch.ofm_ublock.height):
228 # Try valid OFM block depths
229 for c in range(arch.ofm_ublock.depth, max_block_depth + arch.ofm_ublock.depth, arch.ofm_ublock.depth):
230 # OFM block depth has the constraint that if it causes the OFM to be
231 # split, it must be a multiple of the OFM split size
232 if (c >= max_block_depth) or (c < max_block_depth and (c % ArchitectureFeatures.OFMSplitDepth) == 0):
233 config = alloc.try_block(Block(w, h, c))
234 if config:
235 valid_block_configs.append(config)
236
237 assert len(valid_block_configs) > 0
238 return valid_block_configs