Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 1 | # Copyright (C) 2020 Arm Limited or its affiliates. All rights reserved. |
| 2 | # |
| 3 | # SPDX-License-Identifier: Apache-2.0 |
| 4 | # |
| 5 | # Licensed under the Apache License, Version 2.0 (the License); you may |
| 6 | # not use this file except in compliance with the License. |
| 7 | # You may obtain a copy of the License at |
| 8 | # |
| 9 | # www.apache.org/licenses/LICENSE-2.0 |
| 10 | # |
| 11 | # Unless required by applicable law or agreed to in writing, software |
| 12 | # distributed under the License is distributed on an AS IS BASIS, WITHOUT |
| 13 | # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. |
| 14 | # See the License for the specific language governing permissions and |
| 15 | # limitations under the License. |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 16 | # Description: |
| 17 | # Internal representation of a Neural Network Operation. |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 18 | import copy |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 19 | from collections import namedtuple |
| 20 | from enum import Enum |
Dwight Lidman | 9b43f84 | 2020-12-08 17:56:44 +0100 | [diff] [blame] | 21 | from typing import Any |
| 22 | from typing import Dict |
| 23 | from typing import List |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 24 | from typing import Optional |
Dwight Lidman | 9b43f84 | 2020-12-08 17:56:44 +0100 | [diff] [blame] | 25 | from typing import TYPE_CHECKING |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 26 | |
Michael McGeagh | 528a56d | 2020-12-16 11:33:21 +0000 | [diff] [blame] | 27 | from .errors import VelaError |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 28 | from .numeric_util import full_shape |
| 29 | |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 30 | |
Dwight Lidman | 9b43f84 | 2020-12-08 17:56:44 +0100 | [diff] [blame] | 31 | if TYPE_CHECKING: |
| 32 | from .tensor import Tensor |
| 33 | |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 34 | PointXY = namedtuple("PointXY", "x y") |
| 35 | PointXYZ = namedtuple("PointXYZ", "x y z") |
| 36 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 37 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 38 | class NpuBlockType(Enum): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 39 | Default = 0 |
| 40 | ConvolutionMxN = 1 |
| 41 | VectorProduct = 2 |
| 42 | Pooling = 3 |
| 43 | ConvolutionDepthWise = 4 |
| 44 | ElementWise = 5 |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 45 | ReduceSum = 6 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 46 | |
| 47 | |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 48 | class Kernel: |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 49 | """ |
| 50 | Kernel information for NPU operations |
| 51 | """ |
| 52 | |
| 53 | def __init__(self, w: int, h: int, stride_x: int = 1, stride_y: int = 1, dilation_x: int = 1, dilation_y: int = 1): |
| 54 | assert stride_x > 0 and stride_y > 0 |
| 55 | assert dilation_x > 0 and dilation_y > 0 |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 56 | self.width = w |
| 57 | self.height = h |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 58 | self.stride = PointXY(stride_x, stride_y) |
| 59 | self.dilation = PointXY(dilation_x, dilation_y) |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 60 | |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 61 | def elements_wh(self) -> int: |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 62 | return self.width * self.height |
| 63 | |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 64 | def area_width(self) -> int: |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 65 | return (self.width - 1) * self.dilation.x + 1 |
| 66 | |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 67 | def area_height(self) -> int: |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 68 | return (self.height - 1) * self.dilation.y + 1 |
| 69 | |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 70 | def __str__(self): |
| 71 | return f"w={self.width}, h={self.height}, stride={tuple(self.stride)}, dilation={tuple(self.dilation)}" |
| 72 | |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 73 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 74 | # Classifies operators of type Custom |
| 75 | class CustomType(Enum): |
| 76 | ThirdPartyOp = 0 # Third party custom op |
| 77 | NpuOp = 1 # NPU op |
| 78 | ExistingNpuOp = 2 # NPU op that was part of the input network |
| 79 | |
| 80 | |
| 81 | TensorIndices = namedtuple("TensorIndices", ["ifms", "weights", "biases"]) |
| 82 | |
| 83 | NO_INDICES = TensorIndices([], [], []) |
| 84 | IFM_INDICES = TensorIndices([0], [], []) |
| 85 | IFM_WEIGHTS_INDICES = TensorIndices([0], [1], []) |
| 86 | IFM_WEIGHTS_BIAS_INDICES = TensorIndices([0], [1], [2]) |
| 87 | IFM_IFM2_INDICES = TensorIndices([0, 1], [], []) |
| 88 | CONV2D_BACKPROP_INDICES = TensorIndices([2], [1], [3]) |
| 89 | TRANSPOSE_CONV_INDICES = TensorIndices([0], [1], [3]) |
| 90 | CONCAT_INDICES = TensorIndices([1, 2], [], []) |
| 91 | SPLIT_IFM_INDICES = TensorIndices([1], [], []) |
| 92 | BLOCK_LSTM_INDICES = TensorIndices([3], [4], []) |
| 93 | |
| 94 | |
| 95 | # Static information related to operation codes |
| 96 | class OperatorInfo: |
| 97 | __slots__ = ("id", "block_type", "indices", "is_unary") |
| 98 | _id = 0 |
| 99 | |
| 100 | def __init__(self, block_type=NpuBlockType.Default, indices=NO_INDICES, is_unary=False): |
| 101 | OperatorInfo._id += 1 |
| 102 | self.id = OperatorInfo._id |
| 103 | self.block_type = block_type |
| 104 | self.indices = indices # Indices of the different tensor purposes |
| 105 | self.is_unary = is_unary # Classifies elementwise operators |
| 106 | |
| 107 | |
| 108 | # Internally used operation codes |
| 109 | class Op(Enum): |
| 110 | Abs = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_INDICES, is_unary=True) |
| 111 | Add = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES) |
| 112 | AddN = OperatorInfo() |
| 113 | Any = OperatorInfo() |
| 114 | ArgMax = OperatorInfo() |
| 115 | ArgMin = OperatorInfo() |
| 116 | AvgPool = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES) |
| 117 | BatchMatMul = OperatorInfo() |
| 118 | BatchToSpaceND = OperatorInfo() |
| 119 | BidirectionalSequenceLstm = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES) |
| 120 | BidirectionalSequenceRnn = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES) |
| 121 | BlockLSTM = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=BLOCK_LSTM_INDICES) |
| 122 | |
| 123 | CLZ = OperatorInfo( |
| 124 | block_type=NpuBlockType.ElementWise, indices=IFM_INDICES, is_unary=True |
| 125 | ) # NPU specific operation |
| 126 | Call = OperatorInfo() |
| 127 | Cast = OperatorInfo() |
| 128 | Ceil = OperatorInfo() |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 129 | Clip = OperatorInfo() # NPU specific fused activation function for clipping between activation.min/max |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 130 | Concat = OperatorInfo(indices=CONCAT_INDICES) |
| 131 | ConcatEmbeddings = OperatorInfo() |
| 132 | ConcatSliceWrite = OperatorInfo(indices=IFM_INDICES) |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 133 | ConcatTFLite = OperatorInfo(indices=CONCAT_INDICES) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 134 | Const = OperatorInfo() # Constant tensor, only used in CPU subgraphs |
| 135 | Conv2D = OperatorInfo(block_type=NpuBlockType.ConvolutionMxN, indices=IFM_WEIGHTS_INDICES) |
| 136 | Conv2DBackpropInput = OperatorInfo(block_type=NpuBlockType.ConvolutionMxN, indices=CONV2D_BACKPROP_INDICES) |
| 137 | Conv2DBackpropInputSwitchedBias = OperatorInfo( |
| 138 | block_type=NpuBlockType.ConvolutionMxN, indices=TRANSPOSE_CONV_INDICES |
| 139 | ) |
| 140 | Conv2DBias = OperatorInfo(block_type=NpuBlockType.ConvolutionMxN, indices=IFM_WEIGHTS_BIAS_INDICES) |
| 141 | Cos = OperatorInfo() |
| 142 | Custom = OperatorInfo() # Custom 3rd party operator, only used in CPU subgraphs |
| 143 | CustomNpuOp = OperatorInfo() # NPU custom operator, only used in CPU subgraphs |
| 144 | DMA = OperatorInfo() |
| 145 | Delegate = OperatorInfo() |
| 146 | Densify = OperatorInfo() |
| 147 | DepthToSpace = OperatorInfo() |
| 148 | DepthwiseConv2DBias = OperatorInfo(block_type=NpuBlockType.ConvolutionDepthWise, indices=IFM_WEIGHTS_BIAS_INDICES) |
Louis Verhaard | 04f8c00 | 2020-10-09 11:40:21 +0200 | [diff] [blame] | 149 | Dequantize = OperatorInfo(indices=IFM_INDICES) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 150 | Div = OperatorInfo() |
| 151 | Elu = OperatorInfo() |
| 152 | EmbeddingLookup = OperatorInfo() |
| 153 | EmbeddingLookupSparse = OperatorInfo() |
| 154 | Equal = OperatorInfo() |
| 155 | Exp = OperatorInfo() |
| 156 | ExpandDims = OperatorInfo(indices=IFM_INDICES) |
| 157 | FakeQuantWithMinMaxArgs = OperatorInfo() |
| 158 | Fill = OperatorInfo() |
| 159 | Floor = OperatorInfo() |
| 160 | FloorDiv = OperatorInfo() |
| 161 | FloorMod = OperatorInfo() |
| 162 | FullyConnected = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_BIAS_INDICES) |
| 163 | GatherNd = OperatorInfo() |
| 164 | GatherV2 = OperatorInfo() |
| 165 | Greater = OperatorInfo() |
| 166 | GreaterEqual = OperatorInfo() |
| 167 | HardSwish = OperatorInfo() |
| 168 | HashtableLookup = OperatorInfo() |
| 169 | Identity = OperatorInfo() |
| 170 | If = OperatorInfo() |
| 171 | L2Norm = OperatorInfo() |
| 172 | L2Pool2D = OperatorInfo() |
| 173 | LRN = OperatorInfo() |
| 174 | LSHProjection = OperatorInfo() |
| 175 | LeakyRelu = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_INDICES, is_unary=True) |
| 176 | Less = OperatorInfo() |
| 177 | LessEqual = OperatorInfo() |
| 178 | Log = OperatorInfo() |
| 179 | LogSoftmax = OperatorInfo() |
| 180 | LogicalAnd = OperatorInfo() |
| 181 | LogicalNot = OperatorInfo() |
| 182 | LogicalOr = OperatorInfo() |
| 183 | Lstm = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES) |
| 184 | LUT = OperatorInfo() # NPU specific, operator has LUT, only used in fused activation functions |
| 185 | MatMul = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES) |
| 186 | MatrixDiag = OperatorInfo() |
| 187 | MatrixSetDiag = OperatorInfo() |
| 188 | Max = OperatorInfo() |
| 189 | MaxPool = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES) |
| 190 | Maximum = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES) |
| 191 | Mean = OperatorInfo() |
| 192 | Min = OperatorInfo() |
| 193 | Minimum = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES) |
| 194 | MirrorPad = OperatorInfo() |
| 195 | Mul = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES) |
| 196 | Neg = OperatorInfo() |
| 197 | NonMaxSuppressionV4 = OperatorInfo() |
| 198 | NonMaxSuppressionV5 = OperatorInfo() |
| 199 | NotEqual = OperatorInfo() |
| 200 | OneHot = OperatorInfo() |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 201 | Pack = OperatorInfo(indices=IFM_INDICES) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 202 | PackReshaped = OperatorInfo(indices=IFM_INDICES) |
| 203 | Pad = OperatorInfo() |
| 204 | PadV2 = OperatorInfo() |
| 205 | Placeholder = OperatorInfo() # Only used in CPU subgraphs |
| 206 | Pow = OperatorInfo() |
| 207 | Prelu = OperatorInfo() |
| 208 | Prod = OperatorInfo() |
Louis Verhaard | 04f8c00 | 2020-10-09 11:40:21 +0200 | [diff] [blame] | 209 | Quantize = OperatorInfo(indices=IFM_INDICES) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 210 | QuantizedAvgPool = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES) |
| 211 | QuantizedConv2D = OperatorInfo(block_type=NpuBlockType.ConvolutionMxN, indices=IFM_WEIGHTS_INDICES) |
| 212 | QuantizedMatMul = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES) |
| 213 | QuantizedMaxPool = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES) |
| 214 | QuantizedReshape = OperatorInfo(indices=IFM_INDICES) |
| 215 | Range = OperatorInfo() |
| 216 | Rank = OperatorInfo() |
| 217 | ReduceSum = OperatorInfo(block_type=NpuBlockType.ReduceSum, indices=IFM_INDICES) |
| 218 | Relu = OperatorInfo(indices=IFM_INDICES) |
| 219 | Relu6 = OperatorInfo(indices=IFM_INDICES) |
| 220 | ReluN1To1 = OperatorInfo(indices=IFM_INDICES) |
| 221 | Reshape = OperatorInfo(indices=IFM_INDICES) |
| 222 | ResizeBilinear = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES) |
| 223 | ResizeNearestNeighbor = OperatorInfo() |
| 224 | ReverseSequence = OperatorInfo() |
| 225 | ReverseV2 = OperatorInfo() |
| 226 | Rnn = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES) |
| 227 | Round = OperatorInfo() |
| 228 | Rsqrt = OperatorInfo() |
| 229 | SHL = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES) # NPU specific operation |
| 230 | SHR = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES) # NPU specific operation |
| 231 | ScatterNd = OperatorInfo() |
| 232 | SegmentSum = OperatorInfo() |
| 233 | Select = OperatorInfo() |
| 234 | SelectV2 = OperatorInfo() |
| 235 | Shape = OperatorInfo() |
| 236 | Sigmoid = OperatorInfo(indices=IFM_INDICES) |
| 237 | SignBit = OperatorInfo() |
| 238 | Sin = OperatorInfo() |
| 239 | SkipGram = OperatorInfo() |
| 240 | Slice = OperatorInfo(indices=IFM_INDICES) |
Michael McGeagh | 65fd998 | 2020-10-20 11:49:28 +0100 | [diff] [blame] | 241 | Softmax = OperatorInfo(indices=IFM_INDICES) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 242 | SpaceToBatchND = OperatorInfo() |
| 243 | SpaceToDepth = OperatorInfo() |
| 244 | SparseToDense = OperatorInfo() |
| 245 | Split = OperatorInfo(indices=SPLIT_IFM_INDICES) |
| 246 | SplitSliceRead = OperatorInfo(indices=IFM_INDICES) |
Jacob Bohlin | e3de4e5 | 2020-11-27 14:52:06 +0100 | [diff] [blame] | 247 | SplitV = OperatorInfo(indices=IFM_INDICES) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 248 | Sqrt = OperatorInfo() |
| 249 | Square = OperatorInfo() |
| 250 | SquaredDifference = OperatorInfo() |
| 251 | Squeeze = OperatorInfo(indices=IFM_INDICES) |
| 252 | StridedSlice = OperatorInfo(indices=IFM_INDICES) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 253 | Sub = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES) |
| 254 | SubgraphInput = OperatorInfo() # Only used in CPU subgraphs |
| 255 | Sum = OperatorInfo() |
| 256 | Svdf = OperatorInfo() |
| 257 | Tanh = OperatorInfo(indices=IFM_INDICES) |
| 258 | Tile = OperatorInfo() |
| 259 | TopKV2 = OperatorInfo() |
| 260 | Transpose = OperatorInfo() |
| 261 | UnidirectionalSequenceLstm = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES) |
| 262 | UnidirectionalSequenceRnn = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES) |
| 263 | Unique = OperatorInfo() |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 264 | Unpack = OperatorInfo(indices=IFM_INDICES) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 265 | UnpackReshaped = OperatorInfo(indices=IFM_INDICES) |
| 266 | Where = OperatorInfo() |
| 267 | While = OperatorInfo() |
| 268 | ZerosLike = OperatorInfo() |
| 269 | |
| 270 | @property |
| 271 | def info(self): |
| 272 | return self.value |
| 273 | |
| 274 | @property |
| 275 | def npu_block_type(self): |
| 276 | return self.info.block_type |
| 277 | |
| 278 | def is_conv2d_op(self): |
| 279 | return self.info.block_type == NpuBlockType.ConvolutionMxN |
| 280 | |
| 281 | def is_depthwise_conv2d_op(self): |
| 282 | return self.info.block_type == NpuBlockType.ConvolutionDepthWise |
| 283 | |
| 284 | def is_pool_op(self): |
| 285 | return self.info.block_type == NpuBlockType.Pooling |
| 286 | |
| 287 | def is_maxpool_op(self): |
| 288 | return self in (Op.MaxPool, Op.QuantizedMaxPool) |
| 289 | |
| 290 | def is_avgpool_op(self): |
| 291 | return self in (Op.QuantizedAvgPool, Op.AvgPool) |
| 292 | |
| 293 | def is_elementwise_op(self): |
| 294 | return self.info.block_type == NpuBlockType.ElementWise |
| 295 | |
| 296 | def is_unary_elementwise_op(self): |
| 297 | return self.info.block_type == NpuBlockType.ElementWise and self.info.is_unary |
| 298 | |
| 299 | def is_binary_elementwise_op(self): |
| 300 | return self.info.block_type == NpuBlockType.ElementWise and not self.info.is_unary |
| 301 | |
| 302 | def is_relu_op(self): |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 303 | return self in (Op.Relu, Op.Relu6, Op.ReluN1To1, Op.Clip) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 304 | |
| 305 | def is_activation_op(self): |
| 306 | return self.is_relu_op() or self in (Op.Tanh, Op.Sigmoid, Op.Softmax, Op.LUT) |
| 307 | |
| 308 | def is_split_op(self): |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 309 | return self in (Op.Split, Op.SplitV, Op.StridedSlice, Op.Slice, Op.UnpackReshaped, Op.Unpack) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 310 | |
| 311 | def is_concat_op(self): |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 312 | return self in (Op.Concat, Op.ConcatTFLite, Op.PackReshaped, Op.Pack) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 313 | |
| 314 | def needs_bias(self): |
| 315 | return bool(self.info.indices.biases) |
| 316 | |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 317 | def needs_shapes(self): |
| 318 | return bool(self.info.indices.ifms) |
| 319 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 320 | @classmethod |
| 321 | def op_set(cls, predicate): |
| 322 | # Returns the set of all operator codes that fulfill the given predicate |
| 323 | return {op_type for op_type in Op if predicate(op_type)} |
| 324 | |
| 325 | def __str__(self): |
| 326 | return self.name |
| 327 | |
| 328 | __repr__ = __str__ |
| 329 | |
| 330 | def __lt__(self, other): |
| 331 | return self.value.id < other.value.id |
| 332 | |
| 333 | |
Michael McGeagh | 1689548 | 2020-12-14 15:51:20 +0000 | [diff] [blame] | 334 | class Padding(Enum): |
| 335 | SAME = 0 |
| 336 | VALID = 1 |
| 337 | |
| 338 | |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 339 | class ActivationFunction: |
| 340 | """Fused activation function""" |
| 341 | |
| 342 | def __init__(self, op_type: Op): |
| 343 | self.op_type = op_type # The activation operation to be performed |
| 344 | # min/max are optional; if present they are non-quantized values |
| 345 | self.min: Optional[float] = None |
| 346 | self.max: Optional[float] = None |
| 347 | # Table lookup index, only applicable for Op.LUT activation, 0-7 |
| 348 | self.lut_index: int = 0 |
| 349 | |
| 350 | def clone(self): |
| 351 | res = copy.copy(self) |
| 352 | return res |
| 353 | |
| 354 | |
| 355 | def create_activation_function(op_type: Op) -> ActivationFunction: |
| 356 | """Creates activation function with min/max depending on op_type""" |
| 357 | act = ActivationFunction(op_type) |
| 358 | if op_type == Op.Relu: |
| 359 | act.min = 0.0 |
| 360 | elif op_type == Op.Relu6: |
| 361 | act.min = 0.0 |
| 362 | act.max = 6.0 |
| 363 | elif op_type == Op.ReluN1To1: |
| 364 | act.min = -1.0 |
| 365 | act.max = 1.0 |
| 366 | elif op_type == Op.Tanh: |
| 367 | act.min = -1.0 |
| 368 | act.max = 1.0 |
| 369 | elif op_type == Op.Sigmoid: |
| 370 | act.min = 0.0 |
| 371 | act.max = 1.0 |
| 372 | return act |
| 373 | |
| 374 | |
Louis Verhaard | fa2f92a | 2020-09-21 11:56:18 +0200 | [diff] [blame] | 375 | def get_slice_offsets(input_shape, offset_tens, offset_mask, is_begin=True): |
| 376 | # For strided slice operator: get start or end offsets |
| 377 | offsets = len(input_shape) * [0] if is_begin else input_shape[:] |
| 378 | for idx in range(len(input_shape)): |
| 379 | # If the i:th bit in the mask is set then the value on offset_tens[i] should be ignored |
| 380 | if (offset_mask & (1 << idx)) == 0: |
| 381 | offsets[idx] = offset_tens.values[idx] |
| 382 | if offsets[idx] < 0: |
| 383 | # Convert offset to positive value |
| 384 | offsets[idx] += input_shape[idx] |
| 385 | return offsets |
| 386 | |
| 387 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 388 | class Operation: |
| 389 | """Class representing a Neural Network operation. Has a name, a type, |
Dwight Lidman | c6ac194 | 2020-10-02 14:55:45 +0200 | [diff] [blame] | 390 | input and output tensors, as well as an attribute dictionary.""" |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 391 | |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 392 | __slots__ = ( |
| 393 | "type", |
| 394 | "name", |
| 395 | "op_index", |
| 396 | "attrs", |
| 397 | "inputs", |
| 398 | "outputs", |
| 399 | "flops", |
| 400 | "scheduled_pass", |
| 401 | "run_on_npu", |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 402 | "activation", |
| 403 | "memory_function", |
| 404 | "forced_output_quantization", |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 405 | "activation_lut", |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 406 | "_kernel", |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 407 | "ifm_shapes", |
| 408 | "ofm_shapes", |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 409 | ) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 410 | |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 411 | def __init__(self, op_type: Op, name: str): |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 412 | self.type = op_type |
| 413 | self.name = name |
Dwight Lidman | 9b43f84 | 2020-12-08 17:56:44 +0100 | [diff] [blame] | 414 | self.attrs: Dict[str, Any] = {} |
| 415 | self.inputs: List[Tensor] = [] |
| 416 | self.outputs: List[Tensor] = [] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 417 | self.flops = 0 |
| 418 | self.run_on_npu = True |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 419 | # Fused activation function. If not none: operator code. |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 420 | self.activation: Optional[ActivationFunction] = None |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 421 | # Fused memory function, if not None: operator code |
| 422 | self.memory_function = None |
| 423 | # If not none: contains QuantizationParameters to be used as output quantization |
| 424 | # (which overrides the ofm tensor's quantization), used in LUT |
| 425 | self.forced_output_quantization = None |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 426 | self.scheduled_pass = None |
Tim Hall | c8310b1 | 2020-06-17 14:53:11 +0100 | [diff] [blame] | 427 | self.op_index = None # input network operator index |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 428 | self.activation_lut = None |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 429 | self._kernel = None |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 430 | self.ifm_shapes = [] |
| 431 | self.ofm_shapes = [] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 432 | |
| 433 | def clone(self, suffix="_clone"): |
| 434 | res = Operation(self.type, self.name + suffix) |
| 435 | |
| 436 | res.attrs = dict(self.attrs) |
| 437 | res.inputs = list(self.inputs) |
| 438 | res.outputs = list(self.outputs) |
| 439 | res.flops = self.flops |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 440 | res.run_on_npu = self.run_on_npu |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 441 | res.activation = None if self.activation is None else self.activation.clone() |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 442 | res.memory_function = self.memory_function |
| 443 | res.forced_output_quantization = self.forced_output_quantization |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 444 | res.scheduled_pass = self.scheduled_pass |
Tim Hall | c8310b1 | 2020-06-17 14:53:11 +0100 | [diff] [blame] | 445 | res.op_index = None # not relevant as not part of input network |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 446 | |
| 447 | return res |
| 448 | |
| 449 | def __str__(self): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 450 | return "<nng.Operation '{}' type={}>".format(self.name, self.type) |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 451 | |
| 452 | __repr__ = __str__ |
| 453 | |
Michael McGeagh | 65fd998 | 2020-10-20 11:49:28 +0100 | [diff] [blame] | 454 | def get_kernel_size(self): |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 455 | weights = self.weights |
| 456 | if weights and self.type.npu_block_type in (NpuBlockType.ConvolutionDepthWise, NpuBlockType.ConvolutionMxN): |
| 457 | weight_shape = full_shape(4, weights.shape, 1) |
Michael McGeagh | 65fd998 | 2020-10-20 11:49:28 +0100 | [diff] [blame] | 458 | h = weight_shape[-4] |
| 459 | w = weight_shape[-3] |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 460 | elif self.type.npu_block_type in (NpuBlockType.Pooling, NpuBlockType.ReduceSum) and "ksize" in self.attrs: |
| 461 | h, w = self.attrs["ksize"][1:3] |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 462 | else: |
Michael McGeagh | 65fd998 | 2020-10-20 11:49:28 +0100 | [diff] [blame] | 463 | h = self.attrs.get("filter_height", 1) |
| 464 | w = self.attrs.get("filter_width", 1) |
| 465 | return w, h |
| 466 | |
| 467 | def get_kernel_stride(self): |
| 468 | if "strides" in self.attrs: |
| 469 | _, h, w, _ = self.attrs["strides"] |
| 470 | else: |
| 471 | h = self.attrs.get("stride_h", 1) |
| 472 | w = self.attrs.get("stride_w", 1) |
| 473 | return w, h |
| 474 | |
| 475 | def get_kernel_dilation(self): |
| 476 | if "dilation" in self.attrs: |
| 477 | _, h, w, _ = self.attrs["dilation"] |
| 478 | else: |
| 479 | h = self.attrs.get("dilation_h_factor", 1) |
| 480 | w = self.attrs.get("dilation_w_factor", 1) |
| 481 | return w, h |
| 482 | |
| 483 | @property |
| 484 | def kernel(self): |
| 485 | k_w, k_h = self.get_kernel_size() |
| 486 | s_w, s_h = self.get_kernel_stride() |
| 487 | d_w, d_h = self.get_kernel_dilation() |
| 488 | self._kernel = Kernel(k_w, k_h, s_w, s_h, d_w, d_h) |
Tim Hall | 4ed38bc | 2020-10-20 18:54:20 +0100 | [diff] [blame] | 489 | return self._kernel |
| 490 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 491 | def get_ifm_ifm2_weights_ofm(self): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 492 | return self.ifm, self.ifm2, self.weights, self.ofm |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 493 | |
| 494 | def get_ifm_weights_biases_ofm(self): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 495 | return self.ifm, self.weights, self.bias, self.ofm |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 496 | |
Jacob Bohlin | 49d9212 | 2020-08-19 14:36:46 +0200 | [diff] [blame] | 497 | def get_ifm_ifm2_weights_biases_ofm(self): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 498 | return self.ifm, self.ifm2, self.weights, self.bias, self.ofm |
Jacob Bohlin | 49d9212 | 2020-08-19 14:36:46 +0200 | [diff] [blame] | 499 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 500 | def get_ifm_ofm(self): |
| 501 | return self.ifm, self.ofm |
Jacob Bohlin | 49d9212 | 2020-08-19 14:36:46 +0200 | [diff] [blame] | 502 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 503 | @property |
| 504 | def ifm(self): |
| 505 | # Gets the IFM tensor, or None if not applicable |
| 506 | return self.get_input(self.type.info.indices.ifms, 0) |
Jacob Bohlin | 49d9212 | 2020-08-19 14:36:46 +0200 | [diff] [blame] | 507 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 508 | @property |
| 509 | def ifm2(self): |
| 510 | # Gets the IFM2 tensor, or None if not applicable |
| 511 | return self.get_input(self.type.info.indices.ifms, 1) |
Louis Verhaard | 98a3499 | 2020-09-01 10:39:04 +0200 | [diff] [blame] | 512 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 513 | @property |
| 514 | def bias(self): |
| 515 | # Gets the bias tensor, or None if not applicable |
| 516 | return self.get_input(self.type.info.indices.biases, 0) |
| 517 | |
| 518 | @property |
| 519 | def weights(self): |
| 520 | # Gets the weight tensor, or None if not applicable |
| 521 | return self.get_input(self.type.info.indices.weights, 0) |
| 522 | |
| 523 | def get_ifm_tensors(self): |
| 524 | # Gets the IFM tensors, or empty list if not applicable |
| 525 | return self._index_list_to_tensors(self.type.info.indices.ifms) |
| 526 | |
| 527 | def get_weight_tensors(self): |
| 528 | # Gets the weight tensors, or empty list if not applicable |
| 529 | return self._index_list_to_tensors(self.type.info.indices.weights) |
| 530 | |
| 531 | def get_bias_tensors(self): |
| 532 | # Gets the bias tensors, or empty list if not applicable |
| 533 | return self._index_list_to_tensors(self.type.info.indices.biases) |
| 534 | |
| 535 | def _index_list_to_tensors(self, index_list): |
| 536 | return [self.inputs[ix] for ix in index_list if ix < len(self.inputs)] |
| 537 | |
| 538 | def get_input(self, index_list, ix): |
| 539 | if ix >= len(index_list): |
| 540 | return None |
| 541 | if index_list[ix] >= len(self.inputs): |
| 542 | return None |
| 543 | return self.inputs[index_list[ix]] |
| 544 | |
| 545 | @property |
| 546 | def ofm(self): |
| 547 | # Gets the OFM tensor, or None if not applicable |
| 548 | return self.outputs[0] if self.outputs else None |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 549 | |
| 550 | def get_concat_inputs_axis(self): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 551 | assert self.type.is_concat_op() |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 552 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 553 | if self.type == Op.Concat: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 554 | axis_tensor = self.inputs[0] |
| 555 | inputs = self.inputs[1:] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 556 | elif self.type == Op.ConcatTFLite: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 557 | inputs = self.inputs |
| 558 | axis = self.attrs["axis"] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 559 | elif self.type == Op.PackReshaped: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 560 | # Requires fixup_pack_input to be called before this point |
| 561 | inputs = self.inputs |
| 562 | axis = self.attrs["axis"] |
| 563 | assert len(self.inputs) == self.attrs["values_count"] |
| 564 | else: |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 565 | assert len(axis_tensor.ops) == 1 and axis_tensor.ops[0].type == Op.Const |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 566 | axis = int(axis_tensor.values) |
| 567 | |
| 568 | return inputs, axis |
| 569 | |
Louis Verhaard | b2fb212 | 2020-06-04 15:51:24 +0200 | [diff] [blame] | 570 | def get_dilation_h_w(self): |
| 571 | _, dilation_h, dilation_w, _ = self.attrs.get("dilation", (1, 1, 1, 1)) |
| 572 | return dilation_h, dilation_w |
| 573 | |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 574 | def get_split_inputs_axis(self): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 575 | assert self.type.is_split_op() |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 576 | |
| 577 | offset_start = None |
| 578 | offset_end = None |
| 579 | axis = None |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 580 | if self.type == Op.Split: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 581 | num_splits = self.attrs.get("num_splits") |
| 582 | axis_tens = self.inputs[0] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 583 | assert len(axis_tens.ops) == 1 and axis_tens.ops[0].type == Op.Const |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 584 | axis = int(axis_tens.values) |
| 585 | input_tens = self.inputs[1] |
| 586 | outputs = self.outputs |
| 587 | assert num_splits == len(outputs) |
| 588 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 589 | elif self.type == Op.SplitV: |
Charles Xu | 53d4752 | 2020-05-04 11:32:05 +0200 | [diff] [blame] | 590 | num_splits = self.attrs.get("num_splits") |
| 591 | input_tens = self.inputs[0] |
| 592 | size_tens = self.inputs[1] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 593 | assert len(size_tens.ops) == 1 and size_tens.ops[0].type == Op.Const |
Charles Xu | 53d4752 | 2020-05-04 11:32:05 +0200 | [diff] [blame] | 594 | sizes = size_tens.values |
Patrik Gustavsson | 271ddc3 | 2020-09-01 09:15:27 +0200 | [diff] [blame] | 595 | |
Charles Xu | 53d4752 | 2020-05-04 11:32:05 +0200 | [diff] [blame] | 596 | axis_tens = self.inputs[2] |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 597 | assert len(axis_tens.ops) == 1 and axis_tens.ops[0].type == Op.Const |
Charles Xu | 53d4752 | 2020-05-04 11:32:05 +0200 | [diff] [blame] | 598 | axis = int(axis_tens.values) |
Patrik Gustavsson | 271ddc3 | 2020-09-01 09:15:27 +0200 | [diff] [blame] | 599 | |
| 600 | for idx, size in enumerate(sizes): |
| 601 | # One but only one size might be set to -1, indicating that size should be inferred |
| 602 | if size == -1: |
| 603 | sizes[idx] = input_tens.shape[axis] - (sum(sizes) + 1) |
| 604 | break |
| 605 | |
Charles Xu | 53d4752 | 2020-05-04 11:32:05 +0200 | [diff] [blame] | 606 | outputs = self.outputs |
| 607 | assert num_splits == len(outputs) |
| 608 | assert sum(sizes) == input_tens.shape[axis] |
| 609 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 610 | elif self.type == Op.Slice: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 611 | input_tens, begin_tens, size_tens = self.inputs |
| 612 | outputs = self.outputs |
| 613 | offset_start = [0] * len(input_tens.shape) |
| 614 | offset_end = [0] * len(input_tens.shape) |
| 615 | |
| 616 | for idx in range(len(begin_tens.values)): |
| 617 | # Check if the op should slice in dimension idx |
| 618 | if size_tens.values[idx] != input_tens.shape[idx]: |
| 619 | offset_start[idx] = begin_tens.values[idx] |
| 620 | offset_end[idx] = size_tens.values[idx] + offset_start[idx] |
| 621 | |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 622 | elif self.type == Op.StridedSlice: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 623 | input_tens, begin_tens, end_tens, strides_tens = self.inputs |
| 624 | outputs = self.outputs |
| 625 | out_tens = outputs[0] |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 626 | |
| 627 | # Extract masks |
| 628 | begin_mask = self.attrs["begin_mask"] |
| 629 | ellipsis_mask = self.attrs["ellipsis_mask"] |
| 630 | end_mask = self.attrs["end_mask"] |
| 631 | new_axis_mask = self.attrs["new_axis_mask"] |
| 632 | shrink_axis_mask = self.attrs["shrink_axis_mask"] |
Patrik Gustavsson | cf72890 | 2020-04-30 08:57:23 +0200 | [diff] [blame] | 633 | |
| 634 | # shrink_axis_mask/new_axis_mask/ellipsis_mask is not supported by the Operation class but the operation |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 635 | # may have the attribute modified and handled in the graph optimization phase. |
Patrik Gustavsson | cf72890 | 2020-04-30 08:57:23 +0200 | [diff] [blame] | 636 | assert shrink_axis_mask == new_axis_mask == ellipsis_mask == 0 |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 637 | assert len(input_tens.shape) == len(out_tens.shape) |
Louis Verhaard | fa2f92a | 2020-09-21 11:56:18 +0200 | [diff] [blame] | 638 | offset_start = get_slice_offsets(input_tens.shape, begin_tens, begin_mask, is_begin=True) |
| 639 | offset_end = get_slice_offsets(input_tens.shape, end_tens, end_mask, is_begin=False) |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 640 | elif self.type == Op.UnpackReshaped: |
Tim Hall | 79d07d2 | 2020-04-27 18:20:16 +0100 | [diff] [blame] | 641 | # Requires fixup_unpack_output to be called before this point |
| 642 | input_tens = self.inputs[0] |
| 643 | outputs = self.outputs |
| 644 | axis = self.attrs["axis"] |
| 645 | num_splits = self.attrs["num"] |
| 646 | # Number of outputs have to equal the value of the dimension to unpack |
| 647 | assert num_splits == len(outputs) == input_tens.shape[axis] |
| 648 | else: |
| 649 | assert False |
| 650 | |
| 651 | return input_tens, outputs, axis, offset_start, offset_end |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 652 | |
| 653 | def set_activation_lut(self, lut_tensor): |
Louis Verhaard | e8a5a78 | 2020-11-02 18:04:27 +0100 | [diff] [blame] | 654 | self.activation = ActivationFunction(Op.LUT) |
Fredrik Svedberg | a0c3624 | 2020-06-03 15:43:31 +0200 | [diff] [blame] | 655 | self.activation_lut = lut_tensor |
Michael McGeagh | c5b549b | 2020-08-07 11:54:28 +0100 | [diff] [blame] | 656 | self.add_input_tensor(lut_tensor) |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 657 | |
| 658 | def add_input_tensor(self, tens): |
| 659 | self.inputs.append(tens) |
| 660 | if self not in tens.consumer_list: |
| 661 | tens.consumer_list.append(self) |
| 662 | |
Jacob Bohlin | 67e0d8f | 2020-08-20 10:53:02 +0200 | [diff] [blame] | 663 | def set_input_tensor(self, tens, idx): |
| 664 | tens_to_remove = self.inputs[idx] |
| 665 | if tens_to_remove in tens.consumer_list: |
| 666 | tens.consumer_list.remove(tens_to_remove) |
| 667 | |
| 668 | self.inputs[idx] = tens |
| 669 | if self not in tens.consumer_list: |
| 670 | tens.consumer_list.append(self) |
| 671 | |
Michael McGeagh | 5778ffd | 2020-08-06 17:31:02 +0100 | [diff] [blame] | 672 | def set_output_tensor(self, tens): |
| 673 | tens.ops = [self] |
| 674 | self.outputs = [tens] |
Jacob Bohlin | a41cd4d | 2020-08-26 18:21:28 +0200 | [diff] [blame] | 675 | |
Louis Verhaard | 98a3499 | 2020-09-01 10:39:04 +0200 | [diff] [blame] | 676 | def get_output_quantization(self): |
Louis Verhaard | aee5d75 | 2020-09-30 09:01:52 +0200 | [diff] [blame] | 677 | if self.forced_output_quantization is not None: |
| 678 | return self.forced_output_quantization |
| 679 | return self.ofm.quantization |
Michael McGeagh | 528a56d | 2020-12-16 11:33:21 +0000 | [diff] [blame] | 680 | |
| 681 | def error(self, msg): |
| 682 | """ |
| 683 | Raises a VelaError exception for errors encountered when parsing an Operation |
| 684 | |
| 685 | :param self: Operation object that resulted in the error |
| 686 | :param msg: str object that contains a description of the specific error encountered |
| 687 | """ |
| 688 | |
| 689 | def _print_tensors(tensors): |
| 690 | lines = [] |
| 691 | for idx, tens in enumerate(tensors): |
| 692 | tens_name = getattr(tens, "name", "Not a Tensor") |
| 693 | lines.append(f" {idx} = {tens_name}") |
| 694 | return lines |
| 695 | |
| 696 | if self.op_index is None: |
| 697 | lines = [f"Invalid {self.type} (name = {self.name}) operator in the internal representation. {msg}"] |
| 698 | else: |
| 699 | lines = [f"Invalid {self.type} (op_index = {self.op_index}) operator in the input network. {msg}"] |
| 700 | |
| 701 | lines += [" Input tensors:"] |
| 702 | lines += _print_tensors(self.inputs) |
| 703 | |
| 704 | lines += [" Output tensors:"] |
| 705 | lines += _print_tensors(self.outputs) |
| 706 | |
| 707 | raise VelaError("\n".join(lines)) |
Patrik Gustavsson | 2349d42 | 2020-12-01 16:02:29 +0100 | [diff] [blame^] | 708 | |
| 709 | def set_ifm_ofm_shapes(self): |
| 710 | ifm_tensor, ifm2_tensor, weight_tensor, ofm_tensor = self.get_ifm_ifm2_weights_ofm() |
| 711 | |
| 712 | # set all shapes to op, as 4D |
| 713 | if self.type == Op.FullyConnected: |
| 714 | n_in_elems = weight_tensor.shape[-2] |
| 715 | elms = ifm_tensor.elements() |
| 716 | batch_size = elms // n_in_elems |
| 717 | assert batch_size * n_in_elems == elms |
| 718 | |
| 719 | self.ifm_shapes.append([batch_size, 1, 1, n_in_elems]) |
| 720 | self.ofm_shapes.append(ofm_tensor.get_full_shape()) |
| 721 | elif self.type == Op.Softmax: |
| 722 | self.ifm_shapes.append(ifm_tensor.get_full_shape()) |
| 723 | self.ofm_shapes.append(ofm_tensor.get_full_shape()) |
| 724 | elif self.type.is_split_op or self.type.is_concat_op(): |
| 725 | for inp in self.inputs: |
| 726 | if inp is not None: |
| 727 | self.ifm_shapes.append(full_shape(4, inp.shape, 1)) |
| 728 | else: |
| 729 | self.ifm_shapes.append(None) |
| 730 | for out in self.outputs: |
| 731 | if out is not None: |
| 732 | self.ofm_shapes.append(full_shape(4, out.shape, 1)) |
| 733 | else: |
| 734 | self.ofm_shapes.append(None) |
| 735 | else: |
| 736 | self.ifm_shapes.append(full_shape(4, ifm_tensor.shape, 1)) |
| 737 | if ifm2_tensor is not None: |
| 738 | self.ifm_shapes.append(full_shape(4, ifm2_tensor.shape, 1)) |
| 739 | self.ofm_shapes.append(full_shape(4, ofm_tensor.shape, 1)) |