blob: 6bd955d2884de76aab338eba306d3890ebc0bd41 [file] [log] [blame]
Louis Verhaardebf4af62021-01-27 15:57:57 +01001# Copyright (C) 2020-2021 Arm Limited or its affiliates. All rights reserved.
Tim Hall79d07d22020-04-27 18:20:16 +01002#
3# SPDX-License-Identifier: Apache-2.0
4#
5# Licensed under the Apache License, Version 2.0 (the License); you may
6# not use this file except in compliance with the License.
7# You may obtain a copy of the License at
8#
9# www.apache.org/licenses/LICENSE-2.0
10#
11# Unless required by applicable law or agreed to in writing, software
12# distributed under the License is distributed on an AS IS BASIS, WITHOUT
13# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14# See the License for the specific language governing permissions and
15# limitations under the License.
Tim Hall79d07d22020-04-27 18:20:16 +010016# Description:
17# Internal representation of a Neural Network Operation.
Louis Verhaarde8a5a782020-11-02 18:04:27 +010018import copy
Louis Verhaardaee5d752020-09-30 09:01:52 +020019from collections import namedtuple
20from enum import Enum
Dwight Lidman9b43f842020-12-08 17:56:44 +010021from typing import Any
22from typing import Dict
23from typing import List
Louis Verhaarde8a5a782020-11-02 18:04:27 +010024from typing import Optional
Louis Verhaardebf4af62021-01-27 15:57:57 +010025from typing import Tuple
Dwight Lidman9b43f842020-12-08 17:56:44 +010026from typing import TYPE_CHECKING
Tim Hall79d07d22020-04-27 18:20:16 +010027
Louis Verhaard1a92f782021-02-09 16:08:26 +010028from .api import NpuRoundingMode
Michael McGeagh528a56d2020-12-16 11:33:21 +000029from .errors import VelaError
Tim Hall4ed38bc2020-10-20 18:54:20 +010030from .numeric_util import full_shape
patrik.gustavssoneeb85152020-12-21 17:10:40 +000031from .shape4d import Shape4D
Tim Hall4ed38bc2020-10-20 18:54:20 +010032
Patrik Gustavsson2349d422020-12-01 16:02:29 +010033
Dwight Lidman9b43f842020-12-08 17:56:44 +010034if TYPE_CHECKING:
35 from .tensor import Tensor
36
Tim Hall4ed38bc2020-10-20 18:54:20 +010037PointXY = namedtuple("PointXY", "x y")
38PointXYZ = namedtuple("PointXYZ", "x y z")
39
Tim Hall79d07d22020-04-27 18:20:16 +010040
Louis Verhaardaee5d752020-09-30 09:01:52 +020041class NpuBlockType(Enum):
Tim Hall79d07d22020-04-27 18:20:16 +010042 Default = 0
43 ConvolutionMxN = 1
44 VectorProduct = 2
45 Pooling = 3
46 ConvolutionDepthWise = 4
47 ElementWise = 5
Fredrik Svedberga0c36242020-06-03 15:43:31 +020048 ReduceSum = 6
Tim Hall79d07d22020-04-27 18:20:16 +010049
50
Tim Hall4ed38bc2020-10-20 18:54:20 +010051class Kernel:
Louis Verhaarde8a5a782020-11-02 18:04:27 +010052 """
53 Kernel information for NPU operations
54 """
55
Tim Halld8339a72021-05-27 18:49:40 +010056 def __init__(
57 self,
58 w: int,
59 h: int,
60 stride_x: int = 1,
61 stride_y: int = 1,
62 dilation_x: int = 1,
63 dilation_y: int = 1,
64 valid_padding=False,
65 ):
Louis Verhaarde8a5a782020-11-02 18:04:27 +010066 assert stride_x > 0 and stride_y > 0
67 assert dilation_x > 0 and dilation_y > 0
Tim Hall4ed38bc2020-10-20 18:54:20 +010068 self.width = w
69 self.height = h
Louis Verhaarde8a5a782020-11-02 18:04:27 +010070 self.stride = PointXY(stride_x, stride_y)
71 self.dilation = PointXY(dilation_x, dilation_y)
Tim Halld8339a72021-05-27 18:49:40 +010072 self.valid_padding = valid_padding
Tim Hall4ed38bc2020-10-20 18:54:20 +010073
Louis Verhaarde8a5a782020-11-02 18:04:27 +010074 def elements_wh(self) -> int:
Tim Hall4ed38bc2020-10-20 18:54:20 +010075 return self.width * self.height
76
Louis Verhaarde8a5a782020-11-02 18:04:27 +010077 def area_width(self) -> int:
Tim Hall4ed38bc2020-10-20 18:54:20 +010078 return (self.width - 1) * self.dilation.x + 1
79
Louis Verhaarde8a5a782020-11-02 18:04:27 +010080 def area_height(self) -> int:
Tim Hall4ed38bc2020-10-20 18:54:20 +010081 return (self.height - 1) * self.dilation.y + 1
82
Tim Halld8339a72021-05-27 18:49:40 +010083 def dilation(self) -> PointXY:
84 return self.dilation
85
Louis Verhaardebf4af62021-01-27 15:57:57 +010086 def dilated_wh(self) -> Tuple[int, int]:
87 """Returns the dilated kernel width/height"""
88 return self.dilation.x * (self.width - 1) + 1, self.dilation.y * (self.height - 1) + 1
89
Louis Verhaarde8a5a782020-11-02 18:04:27 +010090 def __str__(self):
91 return f"w={self.width}, h={self.height}, stride={tuple(self.stride)}, dilation={tuple(self.dilation)}"
92
Tim Hall4ed38bc2020-10-20 18:54:20 +010093
Louis Verhaardaee5d752020-09-30 09:01:52 +020094# Classifies operators of type Custom
95class CustomType(Enum):
96 ThirdPartyOp = 0 # Third party custom op
97 NpuOp = 1 # NPU op
98 ExistingNpuOp = 2 # NPU op that was part of the input network
99
100
101TensorIndices = namedtuple("TensorIndices", ["ifms", "weights", "biases"])
102
103NO_INDICES = TensorIndices([], [], [])
104IFM_INDICES = TensorIndices([0], [], [])
105IFM_WEIGHTS_INDICES = TensorIndices([0], [1], [])
106IFM_WEIGHTS_BIAS_INDICES = TensorIndices([0], [1], [2])
107IFM_IFM2_INDICES = TensorIndices([0, 1], [], [])
108CONV2D_BACKPROP_INDICES = TensorIndices([2], [1], [3])
109TRANSPOSE_CONV_INDICES = TensorIndices([0], [1], [3])
110CONCAT_INDICES = TensorIndices([1, 2], [], [])
111SPLIT_IFM_INDICES = TensorIndices([1], [], [])
112BLOCK_LSTM_INDICES = TensorIndices([3], [4], [])
113
114
115# Static information related to operation codes
116class OperatorInfo:
117 __slots__ = ("id", "block_type", "indices", "is_unary")
118 _id = 0
119
120 def __init__(self, block_type=NpuBlockType.Default, indices=NO_INDICES, is_unary=False):
121 OperatorInfo._id += 1
122 self.id = OperatorInfo._id
123 self.block_type = block_type
124 self.indices = indices # Indices of the different tensor purposes
125 self.is_unary = is_unary # Classifies elementwise operators
126
127
128# Internally used operation codes
129class Op(Enum):
130 Abs = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_INDICES, is_unary=True)
131 Add = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES)
132 AddN = OperatorInfo()
133 Any = OperatorInfo()
134 ArgMax = OperatorInfo()
135 ArgMin = OperatorInfo()
136 AvgPool = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES)
137 BatchMatMul = OperatorInfo()
138 BatchToSpaceND = OperatorInfo()
139 BidirectionalSequenceLstm = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES)
140 BidirectionalSequenceRnn = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES)
141 BlockLSTM = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=BLOCK_LSTM_INDICES)
142
143 CLZ = OperatorInfo(
144 block_type=NpuBlockType.ElementWise, indices=IFM_INDICES, is_unary=True
145 ) # NPU specific operation
146 Call = OperatorInfo()
147 Cast = OperatorInfo()
148 Ceil = OperatorInfo()
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100149 Clip = OperatorInfo() # NPU specific fused activation function for clipping between activation.min/max
Louis Verhaardaee5d752020-09-30 09:01:52 +0200150 Concat = OperatorInfo(indices=CONCAT_INDICES)
151 ConcatEmbeddings = OperatorInfo()
152 ConcatSliceWrite = OperatorInfo(indices=IFM_INDICES)
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100153 ConcatTFLite = OperatorInfo(indices=CONCAT_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200154 Const = OperatorInfo() # Constant tensor, only used in CPU subgraphs
155 Conv2D = OperatorInfo(block_type=NpuBlockType.ConvolutionMxN, indices=IFM_WEIGHTS_INDICES)
156 Conv2DBackpropInput = OperatorInfo(block_type=NpuBlockType.ConvolutionMxN, indices=CONV2D_BACKPROP_INDICES)
157 Conv2DBackpropInputSwitchedBias = OperatorInfo(
158 block_type=NpuBlockType.ConvolutionMxN, indices=TRANSPOSE_CONV_INDICES
159 )
160 Conv2DBias = OperatorInfo(block_type=NpuBlockType.ConvolutionMxN, indices=IFM_WEIGHTS_BIAS_INDICES)
161 Cos = OperatorInfo()
Tim Hall42abec12021-02-04 21:31:57 +0000162 Cumsum = OperatorInfo()
Louis Verhaardaee5d752020-09-30 09:01:52 +0200163 Custom = OperatorInfo() # Custom 3rd party operator, only used in CPU subgraphs
164 CustomNpuOp = OperatorInfo() # NPU custom operator, only used in CPU subgraphs
Louis Verhaardaee5d752020-09-30 09:01:52 +0200165 Delegate = OperatorInfo()
166 Densify = OperatorInfo()
167 DepthToSpace = OperatorInfo()
168 DepthwiseConv2DBias = OperatorInfo(block_type=NpuBlockType.ConvolutionDepthWise, indices=IFM_WEIGHTS_BIAS_INDICES)
Louis Verhaard04f8c002020-10-09 11:40:21 +0200169 Dequantize = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200170 Div = OperatorInfo()
171 Elu = OperatorInfo()
172 EmbeddingLookup = OperatorInfo()
173 EmbeddingLookupSparse = OperatorInfo()
174 Equal = OperatorInfo()
175 Exp = OperatorInfo()
176 ExpandDims = OperatorInfo(indices=IFM_INDICES)
177 FakeQuantWithMinMaxArgs = OperatorInfo()
178 Fill = OperatorInfo()
179 Floor = OperatorInfo()
180 FloorDiv = OperatorInfo()
181 FloorMod = OperatorInfo()
182 FullyConnected = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_BIAS_INDICES)
183 GatherNd = OperatorInfo()
184 GatherV2 = OperatorInfo()
185 Greater = OperatorInfo()
186 GreaterEqual = OperatorInfo()
Diqing Zhong189f7482021-01-26 12:12:51 +0100187 HardSwish = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200188 HashtableLookup = OperatorInfo()
189 Identity = OperatorInfo()
190 If = OperatorInfo()
191 L2Norm = OperatorInfo()
192 L2Pool2D = OperatorInfo()
193 LRN = OperatorInfo()
194 LSHProjection = OperatorInfo()
195 LeakyRelu = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_INDICES, is_unary=True)
196 Less = OperatorInfo()
197 LessEqual = OperatorInfo()
198 Log = OperatorInfo()
199 LogSoftmax = OperatorInfo()
200 LogicalAnd = OperatorInfo()
201 LogicalNot = OperatorInfo()
202 LogicalOr = OperatorInfo()
203 Lstm = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES)
204 LUT = OperatorInfo() # NPU specific, operator has LUT, only used in fused activation functions
205 MatMul = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES)
206 MatrixDiag = OperatorInfo()
207 MatrixSetDiag = OperatorInfo()
208 Max = OperatorInfo()
209 MaxPool = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES)
210 Maximum = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES)
Dwight Lidman4f728c02020-12-17 15:14:45 +0100211 Mean = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200212 Min = OperatorInfo()
213 Minimum = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES)
214 MirrorPad = OperatorInfo()
215 Mul = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES)
216 Neg = OperatorInfo()
217 NonMaxSuppressionV4 = OperatorInfo()
218 NonMaxSuppressionV5 = OperatorInfo()
219 NotEqual = OperatorInfo()
220 OneHot = OperatorInfo()
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100221 Pack = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200222 PackReshaped = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardae2d5532020-12-11 17:19:54 +0100223 Pad = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200224 PadV2 = OperatorInfo()
225 Placeholder = OperatorInfo() # Only used in CPU subgraphs
226 Pow = OperatorInfo()
227 Prelu = OperatorInfo()
228 Prod = OperatorInfo()
Louis Verhaard04f8c002020-10-09 11:40:21 +0200229 Quantize = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200230 QuantizedAvgPool = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES)
231 QuantizedConv2D = OperatorInfo(block_type=NpuBlockType.ConvolutionMxN, indices=IFM_WEIGHTS_INDICES)
232 QuantizedMatMul = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES)
233 QuantizedMaxPool = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES)
234 QuantizedReshape = OperatorInfo(indices=IFM_INDICES)
235 Range = OperatorInfo()
236 Rank = OperatorInfo()
237 ReduceSum = OperatorInfo(block_type=NpuBlockType.ReduceSum, indices=IFM_INDICES)
238 Relu = OperatorInfo(indices=IFM_INDICES)
239 Relu6 = OperatorInfo(indices=IFM_INDICES)
240 ReluN1To1 = OperatorInfo(indices=IFM_INDICES)
Fredrik Svedberge82be7c2021-01-18 15:21:03 +0100241 RescaleAdd = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200242 Reshape = OperatorInfo(indices=IFM_INDICES)
243 ResizeBilinear = OperatorInfo(block_type=NpuBlockType.Pooling, indices=IFM_INDICES)
244 ResizeNearestNeighbor = OperatorInfo()
245 ReverseSequence = OperatorInfo()
246 ReverseV2 = OperatorInfo()
247 Rnn = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES)
248 Round = OperatorInfo()
249 Rsqrt = OperatorInfo()
250 SHL = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES) # NPU specific operation
251 SHR = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES) # NPU specific operation
252 ScatterNd = OperatorInfo()
253 SegmentSum = OperatorInfo()
254 Select = OperatorInfo()
255 SelectV2 = OperatorInfo()
256 Shape = OperatorInfo()
257 Sigmoid = OperatorInfo(indices=IFM_INDICES)
258 SignBit = OperatorInfo()
259 Sin = OperatorInfo()
260 SkipGram = OperatorInfo()
261 Slice = OperatorInfo(indices=IFM_INDICES)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100262 Softmax = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200263 SpaceToBatchND = OperatorInfo()
264 SpaceToDepth = OperatorInfo()
265 SparseToDense = OperatorInfo()
266 Split = OperatorInfo(indices=SPLIT_IFM_INDICES)
267 SplitSliceRead = OperatorInfo(indices=IFM_INDICES)
Jacob Bohline3de4e52020-11-27 14:52:06 +0100268 SplitV = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200269 Sqrt = OperatorInfo()
270 Square = OperatorInfo()
271 SquaredDifference = OperatorInfo()
272 Squeeze = OperatorInfo(indices=IFM_INDICES)
273 StridedSlice = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200274 Sub = OperatorInfo(block_type=NpuBlockType.ElementWise, indices=IFM_IFM2_INDICES)
275 SubgraphInput = OperatorInfo() # Only used in CPU subgraphs
276 Sum = OperatorInfo()
277 Svdf = OperatorInfo()
278 Tanh = OperatorInfo(indices=IFM_INDICES)
279 Tile = OperatorInfo()
280 TopKV2 = OperatorInfo()
281 Transpose = OperatorInfo()
282 UnidirectionalSequenceLstm = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES)
283 UnidirectionalSequenceRnn = OperatorInfo(block_type=NpuBlockType.VectorProduct, indices=IFM_WEIGHTS_INDICES)
284 Unique = OperatorInfo()
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100285 Unpack = OperatorInfo(indices=IFM_INDICES)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200286 UnpackReshaped = OperatorInfo(indices=IFM_INDICES)
287 Where = OperatorInfo()
288 While = OperatorInfo()
289 ZerosLike = OperatorInfo()
290
291 @property
292 def info(self):
293 return self.value
294
295 @property
296 def npu_block_type(self):
297 return self.info.block_type
298
299 def is_conv2d_op(self):
300 return self.info.block_type == NpuBlockType.ConvolutionMxN
301
302 def is_depthwise_conv2d_op(self):
303 return self.info.block_type == NpuBlockType.ConvolutionDepthWise
304
305 def is_pool_op(self):
306 return self.info.block_type == NpuBlockType.Pooling
307
308 def is_maxpool_op(self):
309 return self in (Op.MaxPool, Op.QuantizedMaxPool)
310
311 def is_avgpool_op(self):
312 return self in (Op.QuantizedAvgPool, Op.AvgPool)
313
314 def is_elementwise_op(self):
315 return self.info.block_type == NpuBlockType.ElementWise
316
317 def is_unary_elementwise_op(self):
318 return self.info.block_type == NpuBlockType.ElementWise and self.info.is_unary
319
320 def is_binary_elementwise_op(self):
321 return self.info.block_type == NpuBlockType.ElementWise and not self.info.is_unary
322
323 def is_relu_op(self):
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100324 return self in (Op.Relu, Op.Relu6, Op.ReluN1To1, Op.Clip)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200325
326 def is_activation_op(self):
Diqing Zhong189f7482021-01-26 12:12:51 +0100327 return self.is_relu_op() or self in (Op.Tanh, Op.Sigmoid, Op.Softmax, Op.LUT, Op.HardSwish)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200328
329 def is_split_op(self):
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100330 return self in (Op.Split, Op.SplitV, Op.StridedSlice, Op.Slice, Op.UnpackReshaped, Op.Unpack)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200331
332 def is_concat_op(self):
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100333 return self in (Op.Concat, Op.ConcatTFLite, Op.PackReshaped, Op.Pack)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200334
335 def needs_bias(self):
336 return bool(self.info.indices.biases)
337
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100338 def needs_shapes(self):
339 return bool(self.info.indices.ifms)
340
Louis Verhaardaee5d752020-09-30 09:01:52 +0200341 @classmethod
342 def op_set(cls, predicate):
343 # Returns the set of all operator codes that fulfill the given predicate
344 return {op_type for op_type in Op if predicate(op_type)}
345
346 def __str__(self):
347 return self.name
348
349 __repr__ = __str__
350
351 def __lt__(self, other):
352 return self.value.id < other.value.id
353
354
Michael McGeagh16895482020-12-14 15:51:20 +0000355class Padding(Enum):
356 SAME = 0
357 VALID = 1
Louis Verhaardae2d5532020-12-11 17:19:54 +0100358 EXPLICIT = 2 # Padding is specified in a PAD operation (only used for NPU operations)
Michael McGeagh16895482020-12-14 15:51:20 +0000359
360
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100361class ActivationFunction:
362 """Fused activation function"""
363
364 def __init__(self, op_type: Op):
365 self.op_type = op_type # The activation operation to be performed
366 # min/max are optional; if present they are non-quantized values
367 self.min: Optional[float] = None
368 self.max: Optional[float] = None
369 # Table lookup index, only applicable for Op.LUT activation, 0-7
370 self.lut_index: int = 0
371
372 def clone(self):
373 res = copy.copy(self)
374 return res
375
376
377def create_activation_function(op_type: Op) -> ActivationFunction:
378 """Creates activation function with min/max depending on op_type"""
379 act = ActivationFunction(op_type)
380 if op_type == Op.Relu:
381 act.min = 0.0
382 elif op_type == Op.Relu6:
383 act.min = 0.0
384 act.max = 6.0
385 elif op_type == Op.ReluN1To1:
386 act.min = -1.0
387 act.max = 1.0
388 elif op_type == Op.Tanh:
389 act.min = -1.0
390 act.max = 1.0
391 elif op_type == Op.Sigmoid:
392 act.min = 0.0
393 act.max = 1.0
Diqing Zhong189f7482021-01-26 12:12:51 +0100394 elif op_type == Op.HardSwish:
395 act.min = 0.0
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100396 return act
397
398
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000399def get_slice_offsets(input_shape: List[int], offset_tens: int, offset_mask: int, is_begin: bool = True):
Louis Verhaardfa2f92a2020-09-21 11:56:18 +0200400 # For strided slice operator: get start or end offsets
401 offsets = len(input_shape) * [0] if is_begin else input_shape[:]
402 for idx in range(len(input_shape)):
403 # If the i:th bit in the mask is set then the value on offset_tens[i] should be ignored
404 if (offset_mask & (1 << idx)) == 0:
405 offsets[idx] = offset_tens.values[idx]
406 if offsets[idx] < 0:
407 # Convert offset to positive value
408 offsets[idx] += input_shape[idx]
409 return offsets
410
411
Tim Hall79d07d22020-04-27 18:20:16 +0100412class Operation:
413 """Class representing a Neural Network operation. Has a name, a type,
Dwight Lidmanc6ac1942020-10-02 14:55:45 +0200414 input and output tensors, as well as an attribute dictionary."""
Tim Hall79d07d22020-04-27 18:20:16 +0100415
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200416 __slots__ = (
417 "type",
418 "name",
419 "op_index",
420 "attrs",
421 "inputs",
422 "outputs",
Fredrik Svedberg8d0f4892021-02-16 21:59:50 +0100423 "intermediates",
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200424 "flops",
425 "scheduled_pass",
426 "run_on_npu",
Louis Verhaardaee5d752020-09-30 09:01:52 +0200427 "activation",
428 "memory_function",
Dwight Lidman4f728c02020-12-17 15:14:45 +0100429 "forced_input_quantization",
Louis Verhaardaee5d752020-09-30 09:01:52 +0200430 "forced_output_quantization",
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200431 "activation_lut",
Tim Hall4ed38bc2020-10-20 18:54:20 +0100432 "_kernel",
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100433 "ifm_shapes",
434 "ofm_shapes",
Fredrik Svedberge82be7c2021-01-18 15:21:03 +0100435 "rescale",
Patrik Gustavssone3b1b912021-02-09 15:38:46 +0100436 "read_offsets",
Tim Halld8339a72021-05-27 18:49:40 +0100437 "read_shapes",
Louis Verhaard1a92f782021-02-09 16:08:26 +0100438 "rounding_mode",
Dwight Lidman4f728c02020-12-17 15:14:45 +0100439 "low_precision_scaling",
Louis Verhaardc822d622021-03-11 14:59:06 +0100440 "write_offset",
441 "write_shape",
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200442 )
Tim Hall79d07d22020-04-27 18:20:16 +0100443
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100444 def __init__(self, op_type: Op, name: str):
Tim Hall79d07d22020-04-27 18:20:16 +0100445 self.type = op_type
446 self.name = name
Dwight Lidman9b43f842020-12-08 17:56:44 +0100447 self.attrs: Dict[str, Any] = {}
448 self.inputs: List[Tensor] = []
449 self.outputs: List[Tensor] = []
Fredrik Svedberg8d0f4892021-02-16 21:59:50 +0100450 self.intermediates: List[Tensor] = []
Tim Hall79d07d22020-04-27 18:20:16 +0100451 self.flops = 0
452 self.run_on_npu = True
Louis Verhaardaee5d752020-09-30 09:01:52 +0200453 # Fused activation function. If not none: operator code.
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100454 self.activation: Optional[ActivationFunction] = None
Louis Verhaardaee5d752020-09-30 09:01:52 +0200455 # Fused memory function, if not None: operator code
Louis Verhaardc822d622021-03-11 14:59:06 +0100456 self.memory_function: Optional[Op] = None
Louis Verhaardaee5d752020-09-30 09:01:52 +0200457 # If not none: contains QuantizationParameters to be used as output quantization
458 # (which overrides the ofm tensor's quantization), used in LUT
Dwight Lidman4f728c02020-12-17 15:14:45 +0100459 self.forced_input_quantization = None
Louis Verhaardaee5d752020-09-30 09:01:52 +0200460 self.forced_output_quantization = None
Tim Hall79d07d22020-04-27 18:20:16 +0100461 self.scheduled_pass = None
Tim Hallc8310b12020-06-17 14:53:11 +0100462 self.op_index = None # input network operator index
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200463 self.activation_lut = None
Tim Hall4ed38bc2020-10-20 18:54:20 +0100464 self._kernel = None
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000465 self.ifm_shapes: List[Shape4D] = []
466 self.ofm_shapes: List[Shape4D] = []
Fredrik Svedberge82be7c2021-01-18 15:21:03 +0100467 # If not none: contains rescale to be used as output scaling
468 # (which overrides the ofm tensor's scale)
469 self.rescale = None
Patrik Gustavssone3b1b912021-02-09 15:38:46 +0100470 self.read_offsets: List[Shape4D] = [None, None] # offset for [ifm, ifm2]
Tim Halld8339a72021-05-27 18:49:40 +0100471 self.read_shapes: List[Shape4D] = [None, None] # read shape for [ifm, ifm2]
Louis Verhaard1a92f782021-02-09 16:08:26 +0100472 self.rounding_mode: Optional[NpuRoundingMode] = None
Dwight Lidman4f728c02020-12-17 15:14:45 +0100473 # The Mean operator (implemented as a depthwise convolution) requires scaling
474 # to be calculated differently in one case. In that case, this is set to True.
475 self.low_precision_scaling = False
Louis Verhaardc822d622021-03-11 14:59:06 +0100476 # Write offset, for operations that only produce a part of the OFM
477 self.write_offset: Optional[Shape4D] = None
478 # The amount of OFM that is produced by the operation (only if write_offset is not None).
479 # E.g. an operation that only fills the bottom row of an OFM of size 1x10x8x1 would have
480 # write_offset 0,9,0,0, write_shape 1,1,8,1
481 self.write_shape: Optional[Shape4D] = None
Tim Hall79d07d22020-04-27 18:20:16 +0100482
483 def clone(self, suffix="_clone"):
484 res = Operation(self.type, self.name + suffix)
485
486 res.attrs = dict(self.attrs)
487 res.inputs = list(self.inputs)
488 res.outputs = list(self.outputs)
Fredrik Svedberg8d0f4892021-02-16 21:59:50 +0100489 res.intermediates = list(self.intermediates)
Tim Hall79d07d22020-04-27 18:20:16 +0100490 res.flops = self.flops
Louis Verhaardaee5d752020-09-30 09:01:52 +0200491 res.run_on_npu = self.run_on_npu
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100492 res.activation = None if self.activation is None else self.activation.clone()
Louis Verhaardaee5d752020-09-30 09:01:52 +0200493 res.memory_function = self.memory_function
Dwight Lidman4f728c02020-12-17 15:14:45 +0100494 res.forced_input_quantization = self.forced_input_quantization
Louis Verhaardaee5d752020-09-30 09:01:52 +0200495 res.forced_output_quantization = self.forced_output_quantization
Tim Hall79d07d22020-04-27 18:20:16 +0100496 res.scheduled_pass = self.scheduled_pass
Tim Hallc8310b12020-06-17 14:53:11 +0100497 res.op_index = None # not relevant as not part of input network
Patrik Gustavssone3b1b912021-02-09 15:38:46 +0100498 res.read_offsets = list(self.read_offsets)
Tim Halld8339a72021-05-27 18:49:40 +0100499 res.read_shapes = list(self.read_shapes)
Louis Verhaard1a92f782021-02-09 16:08:26 +0100500 res.rounding_mode = self.rounding_mode
Dwight Lidman4f728c02020-12-17 15:14:45 +0100501 res.low_precision_scaling = self.low_precision_scaling
Tim Hall79d07d22020-04-27 18:20:16 +0100502
503 return res
504
505 def __str__(self):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200506 return "<nng.Operation '{}' type={}>".format(self.name, self.type)
Tim Hall79d07d22020-04-27 18:20:16 +0100507
508 __repr__ = __str__
509
Michael McGeagh65fd9982020-10-20 11:49:28 +0100510 def get_kernel_size(self):
Tim Hall4ed38bc2020-10-20 18:54:20 +0100511 weights = self.weights
512 if weights and self.type.npu_block_type in (NpuBlockType.ConvolutionDepthWise, NpuBlockType.ConvolutionMxN):
513 weight_shape = full_shape(4, weights.shape, 1)
Michael McGeagh65fd9982020-10-20 11:49:28 +0100514 h = weight_shape[-4]
515 w = weight_shape[-3]
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100516 elif self.type.npu_block_type in (NpuBlockType.Pooling, NpuBlockType.ReduceSum) and "ksize" in self.attrs:
517 h, w = self.attrs["ksize"][1:3]
Tim Hall4ed38bc2020-10-20 18:54:20 +0100518 else:
Michael McGeagh65fd9982020-10-20 11:49:28 +0100519 h = self.attrs.get("filter_height", 1)
520 w = self.attrs.get("filter_width", 1)
521 return w, h
522
523 def get_kernel_stride(self):
524 if "strides" in self.attrs:
525 _, h, w, _ = self.attrs["strides"]
526 else:
527 h = self.attrs.get("stride_h", 1)
528 w = self.attrs.get("stride_w", 1)
529 return w, h
530
531 def get_kernel_dilation(self):
532 if "dilation" in self.attrs:
533 _, h, w, _ = self.attrs["dilation"]
534 else:
535 h = self.attrs.get("dilation_h_factor", 1)
536 w = self.attrs.get("dilation_w_factor", 1)
537 return w, h
538
539 @property
540 def kernel(self):
541 k_w, k_h = self.get_kernel_size()
542 s_w, s_h = self.get_kernel_stride()
543 d_w, d_h = self.get_kernel_dilation()
544 self._kernel = Kernel(k_w, k_h, s_w, s_h, d_w, d_h)
Tim Hall4ed38bc2020-10-20 18:54:20 +0100545 return self._kernel
546
Tim Hall79d07d22020-04-27 18:20:16 +0100547 def get_ifm_ifm2_weights_ofm(self):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200548 return self.ifm, self.ifm2, self.weights, self.ofm
Tim Hall79d07d22020-04-27 18:20:16 +0100549
550 def get_ifm_weights_biases_ofm(self):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200551 return self.ifm, self.weights, self.bias, self.ofm
Tim Hall79d07d22020-04-27 18:20:16 +0100552
Jacob Bohlin49d92122020-08-19 14:36:46 +0200553 def get_ifm_ifm2_weights_biases_ofm(self):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200554 return self.ifm, self.ifm2, self.weights, self.bias, self.ofm
Jacob Bohlin49d92122020-08-19 14:36:46 +0200555
Louis Verhaardaee5d752020-09-30 09:01:52 +0200556 def get_ifm_ofm(self):
557 return self.ifm, self.ofm
Jacob Bohlin49d92122020-08-19 14:36:46 +0200558
Louis Verhaardaee5d752020-09-30 09:01:52 +0200559 @property
560 def ifm(self):
561 # Gets the IFM tensor, or None if not applicable
562 return self.get_input(self.type.info.indices.ifms, 0)
Jacob Bohlin49d92122020-08-19 14:36:46 +0200563
Louis Verhaardaee5d752020-09-30 09:01:52 +0200564 @property
565 def ifm2(self):
566 # Gets the IFM2 tensor, or None if not applicable
567 return self.get_input(self.type.info.indices.ifms, 1)
Louis Verhaard98a34992020-09-01 10:39:04 +0200568
Louis Verhaardaee5d752020-09-30 09:01:52 +0200569 @property
570 def bias(self):
571 # Gets the bias tensor, or None if not applicable
572 return self.get_input(self.type.info.indices.biases, 0)
573
574 @property
575 def weights(self):
576 # Gets the weight tensor, or None if not applicable
577 return self.get_input(self.type.info.indices.weights, 0)
578
579 def get_ifm_tensors(self):
580 # Gets the IFM tensors, or empty list if not applicable
581 return self._index_list_to_tensors(self.type.info.indices.ifms)
582
583 def get_weight_tensors(self):
584 # Gets the weight tensors, or empty list if not applicable
585 return self._index_list_to_tensors(self.type.info.indices.weights)
586
587 def get_bias_tensors(self):
588 # Gets the bias tensors, or empty list if not applicable
589 return self._index_list_to_tensors(self.type.info.indices.biases)
590
591 def _index_list_to_tensors(self, index_list):
592 return [self.inputs[ix] for ix in index_list if ix < len(self.inputs)]
593
594 def get_input(self, index_list, ix):
595 if ix >= len(index_list):
596 return None
597 if index_list[ix] >= len(self.inputs):
598 return None
599 return self.inputs[index_list[ix]]
600
601 @property
602 def ofm(self):
603 # Gets the OFM tensor, or None if not applicable
604 return self.outputs[0] if self.outputs else None
Tim Hall79d07d22020-04-27 18:20:16 +0100605
606 def get_concat_inputs_axis(self):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200607 assert self.type.is_concat_op()
Tim Hall79d07d22020-04-27 18:20:16 +0100608
Louis Verhaardaee5d752020-09-30 09:01:52 +0200609 if self.type == Op.Concat:
Tim Hall79d07d22020-04-27 18:20:16 +0100610 axis_tensor = self.inputs[0]
611 inputs = self.inputs[1:]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200612 elif self.type == Op.ConcatTFLite:
Tim Hall79d07d22020-04-27 18:20:16 +0100613 inputs = self.inputs
614 axis = self.attrs["axis"]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200615 elif self.type == Op.PackReshaped:
Tim Hall79d07d22020-04-27 18:20:16 +0100616 # Requires fixup_pack_input to be called before this point
617 inputs = self.inputs
618 axis = self.attrs["axis"]
619 assert len(self.inputs) == self.attrs["values_count"]
620 else:
Louis Verhaardaee5d752020-09-30 09:01:52 +0200621 assert len(axis_tensor.ops) == 1 and axis_tensor.ops[0].type == Op.Const
Tim Hall79d07d22020-04-27 18:20:16 +0100622 axis = int(axis_tensor.values)
623
624 return inputs, axis
625
Louis Verhaardb2fb2122020-06-04 15:51:24 +0200626 def get_dilation_h_w(self):
627 _, dilation_h, dilation_w, _ = self.attrs.get("dilation", (1, 1, 1, 1))
628 return dilation_h, dilation_w
629
Tim Hall79d07d22020-04-27 18:20:16 +0100630 def get_split_inputs_axis(self):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200631 assert self.type.is_split_op()
Tim Hall79d07d22020-04-27 18:20:16 +0100632
633 offset_start = None
634 offset_end = None
635 axis = None
Louis Verhaardaee5d752020-09-30 09:01:52 +0200636 if self.type == Op.Split:
Tim Hall79d07d22020-04-27 18:20:16 +0100637 num_splits = self.attrs.get("num_splits")
638 axis_tens = self.inputs[0]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200639 assert len(axis_tens.ops) == 1 and axis_tens.ops[0].type == Op.Const
Tim Hall79d07d22020-04-27 18:20:16 +0100640 axis = int(axis_tens.values)
641 input_tens = self.inputs[1]
642 outputs = self.outputs
643 assert num_splits == len(outputs)
644
Louis Verhaardaee5d752020-09-30 09:01:52 +0200645 elif self.type == Op.SplitV:
Charles Xu53d47522020-05-04 11:32:05 +0200646 num_splits = self.attrs.get("num_splits")
647 input_tens = self.inputs[0]
648 size_tens = self.inputs[1]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200649 assert len(size_tens.ops) == 1 and size_tens.ops[0].type == Op.Const
Charles Xu53d47522020-05-04 11:32:05 +0200650 sizes = size_tens.values
Patrik Gustavsson271ddc32020-09-01 09:15:27 +0200651
Charles Xu53d47522020-05-04 11:32:05 +0200652 axis_tens = self.inputs[2]
Louis Verhaardaee5d752020-09-30 09:01:52 +0200653 assert len(axis_tens.ops) == 1 and axis_tens.ops[0].type == Op.Const
Charles Xu53d47522020-05-04 11:32:05 +0200654 axis = int(axis_tens.values)
Patrik Gustavsson271ddc32020-09-01 09:15:27 +0200655
656 for idx, size in enumerate(sizes):
657 # One but only one size might be set to -1, indicating that size should be inferred
658 if size == -1:
659 sizes[idx] = input_tens.shape[axis] - (sum(sizes) + 1)
660 break
661
Charles Xu53d47522020-05-04 11:32:05 +0200662 outputs = self.outputs
663 assert num_splits == len(outputs)
664 assert sum(sizes) == input_tens.shape[axis]
665
Louis Verhaardaee5d752020-09-30 09:01:52 +0200666 elif self.type == Op.Slice:
Tim Hall79d07d22020-04-27 18:20:16 +0100667 input_tens, begin_tens, size_tens = self.inputs
668 outputs = self.outputs
669 offset_start = [0] * len(input_tens.shape)
670 offset_end = [0] * len(input_tens.shape)
671
672 for idx in range(len(begin_tens.values)):
673 # Check if the op should slice in dimension idx
674 if size_tens.values[idx] != input_tens.shape[idx]:
675 offset_start[idx] = begin_tens.values[idx]
676 offset_end[idx] = size_tens.values[idx] + offset_start[idx]
677
Louis Verhaardaee5d752020-09-30 09:01:52 +0200678 elif self.type == Op.StridedSlice:
Tim Hall79d07d22020-04-27 18:20:16 +0100679 input_tens, begin_tens, end_tens, strides_tens = self.inputs
680 outputs = self.outputs
Tim Hall79d07d22020-04-27 18:20:16 +0100681
682 # Extract masks
683 begin_mask = self.attrs["begin_mask"]
684 ellipsis_mask = self.attrs["ellipsis_mask"]
685 end_mask = self.attrs["end_mask"]
686 new_axis_mask = self.attrs["new_axis_mask"]
687 shrink_axis_mask = self.attrs["shrink_axis_mask"]
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200688
689 # shrink_axis_mask/new_axis_mask/ellipsis_mask is not supported by the Operation class but the operation
Tim Hall79d07d22020-04-27 18:20:16 +0100690 # may have the attribute modified and handled in the graph optimization phase.
Patrik Gustavssoncf728902020-04-30 08:57:23 +0200691 assert shrink_axis_mask == new_axis_mask == ellipsis_mask == 0
Louis Verhaardfa2f92a2020-09-21 11:56:18 +0200692 offset_start = get_slice_offsets(input_tens.shape, begin_tens, begin_mask, is_begin=True)
693 offset_end = get_slice_offsets(input_tens.shape, end_tens, end_mask, is_begin=False)
Louis Verhaardaee5d752020-09-30 09:01:52 +0200694 elif self.type == Op.UnpackReshaped:
Tim Hall79d07d22020-04-27 18:20:16 +0100695 # Requires fixup_unpack_output to be called before this point
696 input_tens = self.inputs[0]
697 outputs = self.outputs
698 axis = self.attrs["axis"]
699 num_splits = self.attrs["num"]
700 # Number of outputs have to equal the value of the dimension to unpack
701 assert num_splits == len(outputs) == input_tens.shape[axis]
702 else:
703 assert False
704
705 return input_tens, outputs, axis, offset_start, offset_end
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200706
707 def set_activation_lut(self, lut_tensor):
Louis Verhaarde8a5a782020-11-02 18:04:27 +0100708 self.activation = ActivationFunction(Op.LUT)
Fredrik Svedberga0c36242020-06-03 15:43:31 +0200709 self.activation_lut = lut_tensor
Michael McGeaghc5b549b2020-08-07 11:54:28 +0100710 self.add_input_tensor(lut_tensor)
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100711
712 def add_input_tensor(self, tens):
713 self.inputs.append(tens)
714 if self not in tens.consumer_list:
715 tens.consumer_list.append(self)
716
Jacob Bohlin67e0d8f2020-08-20 10:53:02 +0200717 def set_input_tensor(self, tens, idx):
718 tens_to_remove = self.inputs[idx]
719 if tens_to_remove in tens.consumer_list:
720 tens.consumer_list.remove(tens_to_remove)
721
722 self.inputs[idx] = tens
723 if self not in tens.consumer_list:
724 tens.consumer_list.append(self)
725
Dwight Lidman4f728c02020-12-17 15:14:45 +0100726 def get_input_quantization(self):
727 if self.forced_input_quantization is not None:
728 return self.forced_input_quantization
729 return self.ifm.quantization
730
Michael McGeagh5778ffd2020-08-06 17:31:02 +0100731 def set_output_tensor(self, tens):
732 tens.ops = [self]
733 self.outputs = [tens]
Jacob Bohlina41cd4d2020-08-26 18:21:28 +0200734
Louis Verhaard98a34992020-09-01 10:39:04 +0200735 def get_output_quantization(self):
Louis Verhaardaee5d752020-09-30 09:01:52 +0200736 if self.forced_output_quantization is not None:
737 return self.forced_output_quantization
738 return self.ofm.quantization
Michael McGeagh528a56d2020-12-16 11:33:21 +0000739
740 def error(self, msg):
741 """
742 Raises a VelaError exception for errors encountered when parsing an Operation
743
744 :param self: Operation object that resulted in the error
745 :param msg: str object that contains a description of the specific error encountered
746 """
747
748 def _print_tensors(tensors):
749 lines = []
750 for idx, tens in enumerate(tensors):
751 tens_name = getattr(tens, "name", "Not a Tensor")
752 lines.append(f" {idx} = {tens_name}")
753 return lines
754
755 if self.op_index is None:
756 lines = [f"Invalid {self.type} (name = {self.name}) operator in the internal representation. {msg}"]
757 else:
758 lines = [f"Invalid {self.type} (op_index = {self.op_index}) operator in the input network. {msg}"]
759
760 lines += [" Input tensors:"]
761 lines += _print_tensors(self.inputs)
762
763 lines += [" Output tensors:"]
764 lines += _print_tensors(self.outputs)
765
766 raise VelaError("\n".join(lines))
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100767
768 def set_ifm_ofm_shapes(self):
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000769 self.ifm_shapes = []
770 self.ofm_shapes = []
771
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100772 ifm_tensor, ifm2_tensor, weight_tensor, ofm_tensor = self.get_ifm_ifm2_weights_ofm()
773
774 # set all shapes to op, as 4D
775 if self.type == Op.FullyConnected:
Patrik Gustavsson2c2522d2021-01-29 11:51:31 +0100776 if len(self.ifm.shape) == 2:
777 self.ifm_shapes.append(Shape4D([self.ifm.shape[0], 1, 1, self.ifm.shape[1]]))
778 else:
779 # Special case, handled in graph optimization
780 self.ifm_shapes.append(Shape4D(ifm_tensor.get_full_shape()))
781 if len(self.ofm.shape) == 2:
782 self.ofm_shapes.append(Shape4D([self.ofm.shape[0], 1, 1, self.ofm.shape[1]]))
783 else:
784 self.ofm_shapes.append(Shape4D(ofm_tensor.get_full_shape()))
785 if self.type == Op.Softmax:
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000786 self.ifm_shapes.append(Shape4D(ifm_tensor.get_full_shape()))
787 self.ofm_shapes.append(Shape4D(ofm_tensor.get_full_shape()))
Patrik Gustavssonda2b0032021-02-04 16:28:29 +0100788 elif self.type.is_split_op() or self.type.is_concat_op():
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100789 for inp in self.inputs:
790 if inp is not None:
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000791 self.ifm_shapes.append(Shape4D(full_shape(4, inp.shape, 1)))
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100792 else:
793 self.ifm_shapes.append(None)
794 for out in self.outputs:
795 if out is not None:
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000796 self.ofm_shapes.append(Shape4D(full_shape(4, out.shape, 1)))
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100797 else:
798 self.ofm_shapes.append(None)
799 else:
Patrik Gustavssonda2b0032021-02-04 16:28:29 +0100800 if ifm_tensor is not None:
801 self.ifm_shapes.append(Shape4D(full_shape(4, ifm_tensor.shape, 1)))
Patrik Gustavsson2349d422020-12-01 16:02:29 +0100802 if ifm2_tensor is not None:
patrik.gustavssoneeb85152020-12-21 17:10:40 +0000803 self.ifm_shapes.append(Shape4D(full_shape(4, ifm2_tensor.shape, 1)))
Patrik Gustavssonda2b0032021-02-04 16:28:29 +0100804 if ofm_tensor is not None:
805 self.ofm_shapes.append(Shape4D(full_shape(4, ofm_tensor.shape, 1)))
Tim Halld8339a72021-05-27 18:49:40 +0100806
807 def has_scaling(self):
808 scaled = True
809 for tensor in [self.ifm, self.ifm2, self.ofm]:
810 if tensor is not None:
811 if tensor.quantization is None:
812 scaled = False
813 break
814
815 return scaled