blob: 3b5d458700b25eac8e3a08c9723c38122ee0e6eb [file] [log] [blame]
Luke Hutton261b7b62023-01-10 14:50:31 +00001# Copyright (c) 2021-2023, ARM Limited.
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002# SPDX-License-Identifier: Apache-2.0
3import itertools
4import math
James Ward8b390432022-08-12 20:48:56 +01005import warnings
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01006
Jeremy Johnson1271c442023-09-05 11:39:26 +01007import generator.tosa_utils as gtu
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01008import numpy as np
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01009from generator.tosa_error_if import ErrorIf
10from generator.tosa_error_if import TosaErrorIfArgGen
11from serializer.tosa_serializer import DTypeNames
12from tosa.DType import DType
13from tosa.Op import Op
14from tosa.ResizeMode import ResizeMode
15
16# DTypeNames, DType, Op and ResizeMode are convenience variables to the
17# flatc-generated types that should be enums, but aren't
18
19
20class TosaQuantGen:
21 """QuantizedInfo random generator helper functions.
22
23 Specify with 'qgen': in the operator defintion.
24 """
25
26 def __init__(self):
27 pass
28
29 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +000030 def getZeroPoint(testGen, dtype, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010031
32 if dtype == DType.INT8:
Jeremy Johnson00423432022-09-12 17:27:37 +010033 if testGen.args.zeropoint is not None:
34 return min(127, max(-128, testGen.args.zeropoint))
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010035 return testGen.randInt(-128, 128)
36 elif dtype == DType.UINT8:
Jeremy Johnson00423432022-09-12 17:27:37 +010037 if testGen.args.zeropoint is not None:
38 return min(255, max(0, testGen.args.zeropoint))
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010039 return testGen.randInt(0, 256)
40 elif error_name in [
41 ErrorIf.InputZeroPointNotZero,
42 ErrorIf.WeightZeroPointNotZero,
43 ErrorIf.OutputZeroPointNotZero,
44 ]:
45 zero_point = testGen.randInt(-128, 128)
46 if zero_point == 0:
47 zero_point = 1
48 return zero_point
49 return 0
50
51 @staticmethod
52 def qgUnary(testGen, op, dtype, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010053 if error_name == ErrorIf.InputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000054 qinfo = [
55 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
56 TosaQuantGen.getZeroPoint(testGen, dtype),
57 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010058 elif error_name == ErrorIf.OutputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000059 qinfo = [
60 TosaQuantGen.getZeroPoint(testGen, dtype),
61 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
62 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010063 else:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000064 qinfo = [
65 TosaQuantGen.getZeroPoint(testGen, dtype),
66 TosaQuantGen.getZeroPoint(testGen, dtype),
67 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010068 return qinfo
69
70 @staticmethod
71 def qgConv(testGen, op, dtype_or_dtypeList, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010072 if isinstance(dtype_or_dtypeList, list):
73 # a list of [input, weights, accumulator] dtypes
74 dtypeList = dtype_or_dtypeList
75 else:
76 # an int, [input, weights, accumulator] dtypes are the same
77 dtypeList = [dtype_or_dtypeList] * 3
78
79 if error_name == ErrorIf.InputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000080 qinfo = [
81 TosaQuantGen.getZeroPoint(testGen, dtypeList[0], error_name),
82 TosaQuantGen.getZeroPoint(testGen, dtypeList[1]),
83 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010084 elif error_name == ErrorIf.WeightZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000085 qinfo = [
86 TosaQuantGen.getZeroPoint(testGen, dtypeList[0]),
87 TosaQuantGen.getZeroPoint(testGen, dtypeList[1], error_name),
88 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010089 else:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000090 qinfo = [
91 TosaQuantGen.getZeroPoint(testGen, dtypeList[0]),
92 TosaQuantGen.getZeroPoint(testGen, dtypeList[1]),
93 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010094 return qinfo
95
96 @staticmethod
97 def qgMatmul(testGen, op, dtype, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010098 if error_name == ErrorIf.InputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000099 qinfo = [
100 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
101 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
102 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100103 else:
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000104 qinfo = [
105 TosaQuantGen.getZeroPoint(testGen, dtype),
106 TosaQuantGen.getZeroPoint(testGen, dtype),
107 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100108 return qinfo
109
110 @staticmethod
111 def computeMultiplierAndShift(scaleFp, scale32):
112 # Derived from computeMultiplierAndShiftTosaScale32
113 # Provide a floating-point scaling factor and the scale32 parameter
114 # to compute the multiplier and shift
115
116 if scale32:
117 scaleBits = 31
118 else:
119 scaleBits = 15
120
121 m, shift = math.frexp(scaleFp)
122
123 if scaleFp < 0.0:
124 m = -m
125
126 multiplier = round(m * (1 << scaleBits))
127 assert multiplier <= (1 << scaleBits)
128
129 if multiplier == (1 << scaleBits):
130 multiplier = multiplier // 2
131 shift = shift + 1
132
133 shift = (-shift) + scaleBits
134 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(
135 # scaleFp, scaleBits, m, multiplier, shift))
136
137 # Adjust multiplier such that shift is in allowed value range.
138 if shift == 0:
139 multiplier = multiplier // 4
140 shift = shift + 2
141 elif shift == 1:
142 multiplier = multiplier // 2
143 shift = shift + 1
144 elif shift == 63:
145 multiplier = multiplier * 2
146 shift = shift - 1
147
148 assert multiplier <= (1 << scaleBits)
149 assert shift >= 2 and shift <= 62
150
151 return multiplier, shift
152
153
154class TosaTensorGen:
155 """Tensor generators create a shape list for the placeholder and const tensor
156 data operands for the operator.
157
158 The actual random data is generated separately for each test.
159 """
160
161 def __init__(self):
162 pass
163
164 @staticmethod
165 def tgBasic(testGen, opName, rank, error_name=None):
166 pl, const = opName["operands"]
167 shape = testGen.makeShape(rank)
168
169 # Constrict the overall size of the shape when creating ERROR_IF tests
170 if error_name:
171 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
172
173 shape_list = []
174 for i in range(pl + const):
175 shape_list.append(shape.copy())
176
Luke Huttona4e48ca2023-02-22 11:53:48 +0000177 # Generates an input rank mismatch for operators with more than one input
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100178 if error_name == ErrorIf.RankMismatch:
179 if rank == 1 and i != 1:
180 shape = testGen.makeShape(rank + testGen.rng.choice([1, 2, 3]))
181 elif i != 1:
182 shape = testGen.makeShape(rank + testGen.rng.choice([-1, 1]))
183
184 return shape_list
185
186 @staticmethod
187 def tgNHWC(testGen, opName, rank, error_name=None):
188 pl, const = opName["operands"]
189
190 if error_name != ErrorIf.WrongRank:
191 assert rank == 4
192
193 shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000194 shape = testGen.constrictBatchSize(shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100195
196 # Constrict the overall size of the shape when creating ERROR_IF tests
197 if error_name and error_name != ErrorIf.MaxDimExceeded:
198 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
199
200 shape_list = []
201 for i in range(pl + const):
202 shape_list.append(shape.copy())
203
204 return shape_list
205
206 @staticmethod
207 def tgScatter(testGen, opName, rank, error_name=None):
208 pl, const = opName["operands"]
209
210 assert pl == 2
211 assert const == 0
212 if error_name != ErrorIf.WrongRank:
213 assert rank == 3
214
215 values_in_shape = testGen.makeShape(rank)
216
217 # ignore max batch size if target shape is set
218 if testGen.args.max_batch_size and not testGen.args.target_shapes:
James Ward30124a82023-02-02 14:56:33 +0000219 values_in_shape[0] = min(values_in_shape[0], testGen.args.max_batch_size)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100220
221 W = testGen.randInt(
222 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
223 )
224 # Constrict W if one dimension is too large to keep tensor size reasonable
225 if max(values_in_shape) > 5000:
226 W = testGen.randInt(0, 16)
227
228 input_shape = [values_in_shape[0], W, values_in_shape[2]]
229
230 shape_list = []
231 shape_list.append(values_in_shape.copy())
232 shape_list.append(input_shape.copy())
233
234 return shape_list
235
236 @staticmethod
237 def tgBroadcastFuzz(testGen, op, rank, error_name=None):
238 shape = testGen.makeShape(rank)
239
240 pl, const = op["operands"]
241
242 shape_list = []
243
244 # Choose one of the inputs to broadcast
245 # Note: Simplifies OutputShaper code if we don't change first shape for errors
246 bcast_idx = testGen.randInt(0 if error_name is None else 1, pl + const)
Jerry Ge135c9552023-05-23 20:59:32 +0000247 fuzz_idx = testGen.randInt(0, rank)
248
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100249 for i in range(pl + const):
250 shape_bcast = shape.copy()
251
Jerry Ge135c9552023-05-23 20:59:32 +0000252 # To test broadcasting, the chosen fuzz index dimension should not be 1
253 if shape_bcast[fuzz_idx] == 1:
254 shape_bcast[fuzz_idx] += 1
255
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100256 # If the chosen input, pick a random index to broadcast
257 if i == bcast_idx:
Jerry Ge135c9552023-05-23 20:59:32 +0000258 if error_name == ErrorIf.RankMismatch:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100259 # Add one rank to the shape (or more for rank of 1)
260 extra_ranks = testGen.rng.choice([1, 2, 3]) if rank == 1 else 1
261 shape_bcast = np.concatenate(
262 (shape_bcast, testGen.makeShape(extra_ranks))
263 )
264 if rank != 1:
265 # Either keep the extra rank, or remove it
266 new_len = testGen.rng.choice([-2, len(shape_bcast)])
267 shape_bcast = shape_bcast[:new_len]
Jerry Ge135c9552023-05-23 20:59:32 +0000268 elif error_name == ErrorIf.BroadcastShapesMismatch:
269 shape_bcast[fuzz_idx] += 2
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100270 else:
271 shape_bcast[fuzz_idx] = 1
272
273 shape_list.append(shape_bcast)
274
275 return shape_list
276
277 @staticmethod
278 def tgConv2D(testGen, op, rank, error_name=None):
279 pl, const = op["operands"]
280
281 if error_name != ErrorIf.WrongRank:
282 assert rank == 4
283
284 # IFM dimensions are NHWC
285 ifm_shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000286 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100287
288 # Constrict the overall size of the shape when creating ERROR_IF tests
289 if error_name:
290 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
291 ifm_shape, max_dim=24, max_items=10000
292 )
293
294 # Get the filter height/width from the operator parameters
295 filter_hw = op["filter"]
296
297 # Generate a random OFM depth
James Ward30124a82023-02-02 14:56:33 +0000298 ofm_depth = testGen.makeDimension()
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100299
300 # The filter dimensions are OHWI
301 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
302
303 # The bias is OC
304 bias_shape = np.asarray([ofm_depth])
305
306 return [ifm_shape, filter_shape, bias_shape]
307
308 @staticmethod
309 def tgConv3D(testGen, op, rank, error_name=None):
310 pl, const = op["operands"]
311
312 if error_name != ErrorIf.WrongRank:
313 assert rank == 5
314
315 # IFM dimensions are NDHWC
316 ifm_shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000317 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100318
319 # Constrict the overall size of the shape when creating ERROR_IF tests
320 if error_name:
321 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
322 ifm_shape, max_dim=24, max_items=10000
323 )
324
325 # Get the filter depth/height/width from the operator parameters
326 filter_dhw = op["filter"]
327
328 # Generate a random OFM channel
James Ward30124a82023-02-02 14:56:33 +0000329 ofm_channel = testGen.makeDimension()
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100330
331 # The filter dimensions are ODHWI
332 filter_shape = np.asarray(
333 [ofm_channel, filter_dhw[0], filter_dhw[1], filter_dhw[2], ifm_shape[4]]
334 )
335
336 # The bias is OC
337 bias_shape = np.asarray([ofm_channel])
338
339 return [ifm_shape, filter_shape, bias_shape]
340
341 @staticmethod
342 def tgTransposeConv2D(testGen, op, rank, error_name=None):
343 pl, const = op["operands"]
344
345 if error_name != ErrorIf.WrongRank:
346 assert rank == 4
347
348 # IFM dimensions are NHWC
349 ifm_shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000350 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100351
352 # Constrict the overall size of the shape when creating ERROR_IF tests
353 if error_name:
354 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
355 ifm_shape, max_dim=24, max_items=10000
356 )
357
358 # Get the filter height/width from the operator parameters
359 filter_hw = op["filter"]
360
361 # Generate a random OFM depth
James Ward30124a82023-02-02 14:56:33 +0000362 ofm_depth = testGen.makeDimension()
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100363
364 # The filter dimensions are OHWI
365 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
366
367 # The bias is OC
368 bias_shape = np.asarray([ofm_depth])
369
370 return [ifm_shape, filter_shape, bias_shape]
371
372 @staticmethod
373 def tgDepthwiseConv2D(testGen, op, rank, error_name=None):
374 pl, const = op["operands"]
375
376 if error_name != ErrorIf.WrongRank:
377 assert rank == 4
378 assert pl == 1 and const == 2
379
380 # IFM dimensions are NHWC
381 ifm_shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000382 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100383
384 # Constrict the overall size of the shape when creating ERROR_IF tests
385 if error_name:
386 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
387 ifm_shape, max_dim=24, max_items=10000
388 )
389
390 # Get the filter height/width from the operator parameters
391 # Filter is KH, HW, C, M
392 filter_hw = op["filter"]
393
394 # Generate a random OFM depth, but don't let it get too big because
395 # the output depth is M * C
396 filter_m = (
James Ward30124a82023-02-02 14:56:33 +0000397 testGen.makeDimension() % (testGen.args.tensor_shape_range[1] // 4)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100398 ) + 1
399
400 # The filter dimensions are HWCM
401 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
402
403 # The bias is M * C
404 bias_shape = np.asarray([ifm_shape[3] * filter_m])
405
406 return [ifm_shape, filter_shape, bias_shape]
407
408 @staticmethod
Luke Hutton57287132023-02-06 14:54:18 +0000409 def tgFFT2d(testGen, op, rank, error_name=None):
410 pl, const = op["operands"]
411
412 if error_name != ErrorIf.WrongRank:
413 assert rank == 3
414 assert pl == 2 and const == 0
415
416 # IFM dimensions are NHW
417 ifm_shape = testGen.makeShape(rank)
418
419 # Select nearest lower power of two from input height and width
420 ifm_shape[1] = 2 ** int(math.log(ifm_shape[1], 2))
421 ifm_shape[2] = 2 ** int(math.log(ifm_shape[2], 2))
422
423 # Constrict the overall size of the shape when creating ERROR_IF tests
424 if error_name:
425 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(ifm_shape)
426
427 # Generate an invalid kernel that is not a power of two
428 if error_name == ErrorIf.KernelNotPowerOfTwo:
429 inc_h = 2 if ifm_shape[1] == 1 else 1
430 inc_w = 2 if ifm_shape[2] == 1 else 1
431 inc_choices = [(inc_h, 0), (0, inc_w), (inc_h, inc_w)]
432 selected_inc = testGen.rng.choice(inc_choices)
433 ifm_shape[1] += selected_inc[0]
434 ifm_shape[2] += selected_inc[1]
435
436 ifm_shape = testGen.constrictBatchSize(ifm_shape)
437
438 ifm_shapes = [ifm_shape.copy(), ifm_shape.copy()]
439 if error_name == ErrorIf.FFTInputShapeMismatch:
440 modify_shape = testGen.rng.choice([0, 1])
441 # Only modify kernel (H, W)
442 modify_dim = testGen.rng.choice([1, 2])
443 ifm_shapes[modify_shape][modify_dim] *= 2
444
445 return [ifm_shapes[0], ifm_shapes[1]]
446
447 @staticmethod
Luke Hutton261b7b62023-01-10 14:50:31 +0000448 def tgRFFT2d(testGen, op, rank, error_name=None):
449 pl, const = op["operands"]
450
451 if error_name != ErrorIf.WrongRank:
452 assert rank == 3
453 assert pl == 1 and const == 0
454
455 # IFM dimensions are NHW
456 ifm_shape = testGen.makeShape(rank)
457
458 # Select nearest lower power of two from input height and width
459 ifm_shape[1] = 2 ** int(math.log(ifm_shape[1], 2))
460 ifm_shape[2] = 2 ** int(math.log(ifm_shape[2], 2))
461
462 # Constrict the overall size of the shape when creating ERROR_IF tests
463 if error_name:
464 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(ifm_shape)
465
466 # Generate an invalid kernel that is not a power of two
467 if error_name == ErrorIf.KernelNotPowerOfTwo:
468 # We must increment by 2 if current size is 1
469 inc_h = 2 if ifm_shape[1] == 1 else 1
470 inc_w = 2 if ifm_shape[2] == 1 else 1
471 inc_choices = [(inc_h, 0), (0, inc_w), (inc_h, inc_w)]
472 selected_inc = testGen.rng.choice(inc_choices)
473 ifm_shape[1] += selected_inc[0]
474 ifm_shape[2] += selected_inc[1]
475
James Ward30124a82023-02-02 14:56:33 +0000476 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Luke Hutton261b7b62023-01-10 14:50:31 +0000477
478 return [ifm_shape]
479
480 @staticmethod
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100481 def tgFullyConnected(testGen, op, rank, error_name=None):
482 pl, const = op["operands"]
483
484 if error_name != ErrorIf.WrongRank:
485 assert rank == 2
486
487 input_shape = testGen.makeShape(rank)
488
489 # Constrict the overall size of the shape when creating ERROR_IF tests
490 if error_name:
491 input_shape = TosaErrorIfArgGen.eiRestrictDimensions(input_shape)
492
493 filter_oc = testGen.rng.integers(
494 low=testGen.args.tensor_shape_range[0],
495 high=testGen.args.tensor_shape_range[1],
496 size=1,
497 )[0]
498 filter_shape = np.asarray([filter_oc, input_shape[1]])
499
500 bias_shape = np.asarray([filter_oc])
501
502 return [input_shape, filter_shape, bias_shape]
503
504 @staticmethod
505 def tgMatmul(testGen, op, rank, error_name=None):
506 pl, const = op["operands"]
507
508 if error_name != ErrorIf.WrongRank:
509 assert rank == 3
510 assert pl == 2 and const == 0
511
512 a_shape = testGen.makeShape(rank)
513
514 # Constrict the overall size of the shape when creating ERROR_IF tests
515 if error_name:
516 a_shape = TosaErrorIfArgGen.eiRestrictDimensions(a_shape)
517
518 # Get a random number for b_oc even if target shape is defined
519 b_oc = np.int32(
520 testGen.rng.integers(
521 low=testGen.args.tensor_shape_range[0],
522 high=testGen.args.tensor_shape_range[1],
523 size=1,
524 )
525 )[0]
526 # If N or H is large let b_oc be 1 to reduce output tensor size
527 if max(a_shape) > 1000:
528 b_oc = 1
529
530 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
531 return [a_shape, b_shape]
532
533 @staticmethod
534 def tgConcat(testGen, opName, rank, error_name=None):
535 pl, const = opName["operands"]
536 shape = testGen.makeShape(rank)
537
538 # Create extra tensors to concat.
539 # Take into account value of pl when getting maximum number of concats
540 num_tensors = testGen.randInt(0, 4)
541 shape_list = []
542 for i in range(pl + const + num_tensors):
543 if error_name == ErrorIf.ConcatInputRankMismatch and i != 0:
544 remove = testGen.rng.choice([True, False])
545 wrongShape = shape.copy()
546
547 if remove and len(shape) > 1:
548 wrongShape = wrongShape[1:]
549 else:
550 wrongShape = list(wrongShape)
551 wrongShape.append(testGen.rng.integers(1, 10))
552
553 shape_list.append(wrongShape)
554 else:
555 shape_list.append(shape.copy())
556
557 return shape_list
558
559 @staticmethod
560 def tgConcatConstInput(testGen, shapeList, axis, error_name=None):
561 if error_name in [
562 ErrorIf.AxisSmallerZero,
563 ErrorIf.AxisLargerRank,
564 ErrorIf.ConcatInputRankMismatch,
565 ]:
566 return shapeList
567
568 # Split concat shape along axis to allow for multiple const inputs
569 # without making too many large tensors
570 if len(shapeList) == 2 or shapeList[0][axis] < len(shapeList):
571 # If axis can't be split we still need to invalidate other dimensions
572 if error_name == ErrorIf.ConcatInputDimMismatch:
573 for shape in shapeList[1:]:
574 # Negative test shapeLists are created individually for each test,
575 # so no need to copy the shape before altering it.
576 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
577 return shapeList
578
579 # Create copy of shape we are going to split (so we don't alter shapeList)
580 shape = shapeList[0].copy()
581 # Add original shape as first input
582 new_shapeList = [shape.copy()]
583 length_on_axis = shape[axis]
584 remaining_length = length_on_axis
585 for i in range(len(shapeList) - 2):
586 # Calculate split on axis and remaining value
587 split_shape_val = int(shape[axis] / 2)
588 remaining_length = remaining_length - split_shape_val
589
590 # Append new shape, and set remaining shape
591 shape[axis] = split_shape_val
592 new_shapeList.append(shape.copy())
593
594 # invalidate dimensions
595 if error_name == ErrorIf.ConcatInputDimMismatch:
596 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
597 else:
598 shape[axis] = remaining_length
599
600 if i == len(shapeList) - 3:
601 new_shapeList.append(shape.copy())
602
603 return new_shapeList
604
605
606class TosaTensorValuesGen:
Jeremy Johnson1271c442023-09-05 11:39:26 +0100607 """Tensor Value generators create the random data for each tensor in each test."""
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100608
609 def __init__(self):
610 pass
611
Jeremy Johnson1271c442023-09-05 11:39:26 +0100612 class TVGInfo:
613 """Enhanced tensor values information including data gen dict."""
614
615 def __init__(self, tensorList, dataGenDict):
616 self.tensorList = tensorList
617 self.dataGenDict = dataGenDict
618
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100619 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000620 def tvgDefault(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100621 pCount, cCount = op["operands"]
622
623 tens = []
624 tens.extend(
625 testGen.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
626 )
627 tens.extend(testGen.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
628
629 return tens
630
631 @staticmethod
Jeremy Johnson1271c442023-09-05 11:39:26 +0100632 def tvgLazyGenDefault(
633 testGen, opName, dtypeList, shapeList, argsDict, error_name=None
634 ):
635 # Variable inputs versus constants
636 pCount, cCount = testGen.TOSA_OP_LIST[opName]["operands"]
637
Jeremy Johnson65ba8092023-10-09 16:31:13 +0100638 if error_name is not None or not gtu.dtypeIsSupportedByCompliance(dtypeList[0]):
639 # Fall back to original path when dealing with unsupported types
640
641 # First turn off lazy data gen so we always produce data
642 lazy_data_gen = testGen.args.lazy_data_gen
Jeremy Johnson1271c442023-09-05 11:39:26 +0100643 testGen.args.lazy_data_gen = False
644
Jeremy Johnson1271c442023-09-05 11:39:26 +0100645 tens_ser_list = TosaTensorValuesGen.tvgDefault(
646 testGen,
647 testGen.TOSA_OP_LIST[opName],
648 dtypeList,
649 shapeList,
650 [],
651 error_name,
652 )
Jeremy Johnson65ba8092023-10-09 16:31:13 +0100653 # Restore lazy data gen setting
654 testGen.args.lazy_data_gen = lazy_data_gen
Jeremy Johnson1271c442023-09-05 11:39:26 +0100655 return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
656
657 # Create data generator meta-data
658 dg_type = argsDict["dg_type"]
659 dg_tens_meta = {}
660 tens_ser_list = []
661 for idx, shape in enumerate(shapeList):
662
663 tens_meta = {}
664 tens_meta["generator"] = gtu.DataGenType(dg_type).name
665 tens_meta["data_type"] = gtu.DTYPE_ATTRIBUTES[dtypeList[idx]]["json"]
666 tens_meta["shape"] = [int(i) for i in shape]
667 tens_meta["input_pos"] = idx
668 tens_meta["op"] = opName
669
670 if idx < pCount:
671 tens_meta["input_type"] = "variable"
672 tens = testGen.ser.addPlaceholder(shape, dtypeList[idx], None)
673 else:
674 tens_meta["input_type"] = "constant"
675 tens = testGen.ser.addConst(shape, dtypeList[idx], None)
676 tens_ser_list.append(tens)
677
678 if dg_type == gtu.DataGenType.PSEUDO_RANDOM:
679 info = {}
680 # TODO - generate seed for this generator based on test
681 info["rng_seed"] = -1
682 info["range"] = [
683 str(v)
684 for v in testGen.getDTypeRange(dtypeList[idx], high_inclusive=True)
685 ]
686 tens_meta["pseudo_random_info"] = info
687 elif dg_type == gtu.DataGenType.DOT_PRODUCT:
688 info = {}
689 info["s"] = argsDict["s"]
690 info["ks"] = argsDict["ks"]
691 for key in gtu.DG_DOT_PRODUCT_OPTIONAL_INFO:
692 if key in argsDict:
693 if key.endswith("_type"):
694 info[key] = gtu.DTYPE_ATTRIBUTES[argsDict[key]]["json"]
695 else:
696 info[key] = argsDict[key]
697 tens_meta["dot_product_info"] = info
698 else:
699 # TODO - other data gen type
700 assert False, "TODO: support other data gen types"
701 dg_tens_meta[tens.name] = tens_meta
702
703 tens_data = {
704 "version": "0.1",
705 "tensors": dg_tens_meta,
706 }
707 return TosaTensorValuesGen.TVGInfo(tens_ser_list, tens_data)
708
709 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000710 def tvgNegate(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson0e463642022-05-03 12:10:23 +0100711 if dtypeList[0] == DType.INT32 and error_name is None:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100712 pCount, cCount = op["operands"]
713 assert (
714 pCount == 1 and cCount == 0
715 ), "Op.NEGATE must have 1 placeholders, 0 consts"
Jeremy Johnson0e463642022-05-03 12:10:23 +0100716 # Must create tensors with values within accumulator (int32) negatable
717 # range
718 max_val = (1 << 31) - 1
719 min_val = -max_val
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100720 arr = np.int32(
721 testGen.rng.integers(low=min_val, high=(max_val + 1), size=shapeList[0])
722 )
723 placeholders = []
724 placeholders.append(
725 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], arr)
726 )
727 return placeholders
728 else:
729 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000730 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100731 )
732
733 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000734 def tvgAddSub(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100735 if dtypeList[0] == DType.INT32 and error_name is None:
736 # Make sure the operation does not cause value saturation - where
737 # the number wraps due to limited number of bits to store the answer
738 pCount, cCount = op["operands"]
739 assert (
740 pCount == 2 and cCount == 0
741 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts"
742 placeholders = []
743 add = op["op"] == Op.ADD
744 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
745 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
746 if add:
747 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
748 else:
749 res_arr = np.subtract(a_arr, b_arr, dtype=np.int64)
750
751 # Work out the saturation limits
752 max_i32 = (1 << 31) - 1
753 min_i32 = -(1 << 31)
754 max_arr = np.full(shapeList[1], max_i32)
755 min_arr = np.full(shapeList[1], min_i32)
756
757 # Find how much values exceed the maximum/minimums
758 sat_max_arr = np.maximum(res_arr - max_arr, 0)
759 sat_min_arr = np.minimum(res_arr - min_arr, 0)
760
761 if not add:
762 # Swap saturation values and negate values as we need to perform opposite operations
763 sat_max_arr, sat_min_arr = -sat_min_arr, -sat_max_arr
764
765 # Create new array of unsaturated values by clipping values as needed
766 b_unsat_arr = b_arr
767 if (sat_max_arr != 0).any():
768 # Clip values that cause saturation
769 b_unsat_arr = np.subtract(b_unsat_arr, sat_max_arr, dtype=np.int32)
770 # Reduce axes in unsaturated tensor to match original tensor
771 for axis, dim in enumerate(b_arr.shape):
772 if dim != b_unsat_arr.shape[axis]:
773 assert (
774 dim == 1
775 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
776 b_unsat_arr = np.amin(b_unsat_arr, axis=axis, keepdims=True)
777
778 if (sat_min_arr != 0).any():
779 # Clip values that cause saturation
780 b_unsat_arr = np.subtract(b_unsat_arr, sat_min_arr, dtype=np.int32)
781 # Reduce axes in unsaturated tensor to match original tensor
782 for axis, dim in enumerate(b_arr.shape):
783 if dim != b_unsat_arr.shape[axis]:
784 assert (
785 dim == 1
786 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
787 b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True)
788
789 placeholders.append(
790 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
791 )
792 placeholders.append(
793 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr)
794 )
795
796 return placeholders
797 else:
798 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000799 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100800 )
801
802 @staticmethod
803 def tvgCondIfWhileLoop(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000804 testGen, op, dtypeList, shapeList, testArgs, error_name=None
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100805 ):
806 if dtypeList[0] in (
807 DType.INT32,
808 DType.INT16,
809 DType.INT8,
810 ):
811 # Limit input tensors with cond_if_binary or while_loop to stop
812 # saturation of add/sub ops with int32 and keep all logical shift
813 # values between 0 to 31 for int16 or int8
814 pCount, cCount = op["operands"]
815 pRemain = pCount
816 placeholders = []
817 for idx, shape in enumerate(shapeList[:]):
818 if dtypeList[0] == DType.INT32:
819 arr = testGen.getRandTensor(shapeList[idx], DType.INT16)
820 else:
821 arr = np.int32(
822 testGen.rng.integers(low=0, high=32, size=shapeList[idx])
823 )
824 if pRemain > 0:
825 placeholders.append(
826 testGen.ser.addPlaceholder(shape, dtypeList[idx], arr)
827 )
828 pRemain -= 1
829 else:
830 placeholders.append(
831 testGen.ser.addConst(shape, dtypeList[idx], arr)
832 )
833
834 return placeholders
835 else:
836 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000837 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100838 )
839
840 @staticmethod
841 def tvgArithmeticRightShift(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000842 testGen, op, dtypeList, shapeList, testArgs, error_name=None
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100843 ):
844 pCount, cCount = op["operands"]
845 # Force value of operand[1] to be within [0, num_bits]
846 assert (
847 pCount == 2 and cCount == 0
848 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
849
850 placeholders = []
851 for idx, shape in enumerate(shapeList[:]):
852 if idx == 1:
853 if dtypeList[idx] == DType.INT8:
854 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
855 elif dtypeList[idx] == DType.INT16:
856 arr = np.int32(testGen.rng.integers(low=0, high=16, size=shape))
857 elif dtypeList[idx] == DType.INT32:
858 arr = np.int32(testGen.rng.integers(low=0, high=32, size=shape))
859 elif error_name == ErrorIf.WrongInputType:
860 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
861 else:
862 raise Exception("OpArithmeticRightShift: invalid input dtype")
863 else:
864 arr = testGen.getRandTensor(shape, dtypeList[idx])
865 placeholders.append(testGen.ser.addPlaceholder(shape, dtypeList[idx], arr))
866
867 return placeholders
868
869 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000870 def tvgSelect(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100871 # Set datatype of condition tensor to boolean
872 dtypeList[0] = DType.BOOL
873
874 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000875 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100876 )
877
878 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000879 def tvgIntDiv(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100880 if error_name is None:
881 pCount, cCount = op["operands"]
882 assert (
883 pCount == 2 and cCount == 0
884 ), "Op.INTDIV must have 2 placeholders, 0 consts"
885
886 placeholders = []
887
888 # Two invalid cases for Op.INTDIV:
889 # 1. divisor == 0
890 # 2. dividend == -(1<<31) and divisor == -1
891 while True:
892 dividend_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
893 divisor_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
894
895 if (divisor_arr == 0).any():
896 continue
897
898 if (dividend_arr == -(2**31)).any() and (divisor_arr == -1).any():
899 continue
900
901 break
902
903 placeholders.append(
904 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
905 )
906 placeholders.append(
907 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
908 )
909
910 return placeholders
911 else:
912 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000913 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100914 )
915
916 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000917 def tvgMul(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100918 if error_name is None:
919 pCount, cCount = op["operands"]
920 assert (
921 pCount == 2 and cCount == 0
922 ), "Op.MUL must have 2 placeholders, 0 consts"
923
924 tens = []
James Ward24dbc422022-10-19 12:20:31 +0100925 if dtypeList[0] in (DType.FP16, DType.BF16, DType.FP32):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100926 tens.extend(testGen.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
927 else:
928 placeholders = []
929
930 # Make sure multiply result in int32 range
931 shift = testArgs[0]
932 if dtypeList[0] == DType.INT8:
933 num_bits = 8
934 elif dtypeList[0] == DType.INT16:
935 num_bits = 16
936 elif dtypeList[0] == DType.INT32:
937 num_bits = 32
938 elif error_name == ErrorIf.WrongInputType:
939 num_bits = 8
940 else:
941 raise Exception("OpMul: invalid input dtype")
942
943 for idx, shape in enumerate(shapeList[:]):
944 low = -(2 ** (num_bits - 1))
945 high = (2 ** (num_bits - 1)) - 1
946
947 a_arr = np.int32(
948 testGen.rng.integers(low=low, high=high, size=shapeList[0])
949 )
950 b_arr = np.int32(
951 testGen.rng.integers(low=low, high=high, size=shapeList[1])
952 )
953
954 i = 0
955 while True:
956
957 a_arr_64 = a_arr.astype(np.int64)
958 b_arr_64 = b_arr.astype(np.int64)
959
960 if shift > 0:
961 rounding = 1 << (shift - 1)
962 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
963 else:
964 result_arr = a_arr_64 * b_arr_64
965
966 if (result_arr > -(2**31)).all() and (
967 result_arr <= ((2**31) - 1)
968 ).all():
969 break
970
971 i = i + 1
972 a_arr = a_arr // 2
973 b_arr = b_arr // 2
974
975 placeholders.append(
976 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
977 )
978 placeholders.append(
979 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
980 )
981
982 tens.extend(placeholders)
983
984 return tens
985 else:
986 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000987 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100988 )
989
990 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000991 def tvgConcat(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100992 count = len(shapeList) - testGen.args.num_const_inputs_concat
993 if count < 1:
994 count = 1
995 if testGen.args.num_const_inputs_concat == 0:
996 count = len(shapeList)
997
998 # Ensure axis is an int
999 testArgs[0] = int(testArgs[0])
1000
1001 shapeList = TosaTensorGen.tgConcatConstInput(
1002 testGen, shapeList, testArgs[0], error_name
1003 )
1004
1005 tens = []
1006 tens.extend(
1007 testGen.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
1008 )
1009 tens.extend(testGen.buildConstTensors(shapeList[count:], dtypeList[count:]))
1010
1011 return tens
1012
1013 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +00001014 def tvgLogicalShift(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001015 pCount, cCount = op["operands"]
1016 assert (
1017 pCount == 2 and cCount == 0
1018 ), "Op.LOGICAL_LEFT_SHIFT or Op.LOGICAL_RIGHT_SHIFT must have 2 placeholders, 0 consts"
1019 values_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
1020 shift_arr = np.int32(testGen.rng.integers(low=0, high=32, size=shapeList[1]))
1021 placeholders = []
1022 placeholders.append(
1023 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
1024 )
1025 placeholders.append(
1026 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], shift_arr)
1027 )
1028
1029 return placeholders
1030
1031 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +00001032 def tvgEqual(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001033 if error_name is None:
1034 pCount, cCount = op["operands"]
1035 assert (
1036 pCount == 2 and cCount == 0
1037 ), "Op.EQUAL must have 2 placeholders, 0 consts"
1038 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
1039 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
1040 # Using random numbers means that it will be very unlikely that
1041 # there are any matching (equal) values, therefore force that
1042 # there are twice the number of matching values as the tensor rank
1043 for num in range(0, len(shapeList[0]) * 2):
1044 a_index = []
1045 b_index = []
1046 # Choose an index in each axis for the whole shape
1047 for axis in range(0, len(shapeList[0])):
1048 # Index can be up to the largest dimension in both shapes
1049 index = np.int32(
1050 testGen.rng.integers(
1051 0, max(shapeList[0][axis], shapeList[1][axis])
1052 )
1053 )
1054 # Reduce the index down to a shape's dim for broadcasting
1055 a_index.append(min(shapeList[0][axis] - 1, index))
1056 b_index.append(min(shapeList[1][axis] - 1, index))
1057
1058 a_arr[tuple(a_index)] = b_arr[tuple(b_index)]
1059
1060 placeholders = []
1061 placeholders.append(
1062 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
1063 )
1064 placeholders.append(
1065 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
1066 )
1067 return placeholders
1068 else:
1069 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +00001070 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001071 )
1072
1073 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +00001074 def tvgReduceSum(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001075 if dtypeList[0] == DType.INT32:
1076 pCount, cCount = op["operands"]
1077 assert (
1078 pCount == 1 and cCount == 0
1079 ), "Op.REDUCE_SUM must have 1 placeholders, 0 consts"
1080 # Limit values so that the sum cannot exceed the range of an int32 during
1081 # summation of any axis
1082 range_val = int((1 << 31) / max(shapeList[0]))
1083 values_arr = np.int32(
1084 testGen.rng.integers(low=-range_val, high=range_val, size=shapeList[0])
1085 )
1086 placeholders = []
1087 placeholders.append(
1088 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
1089 )
1090 return placeholders
1091 else:
1092 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +00001093 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001094 )
1095
1096
1097class TosaArgGen:
1098 """Argument generators create exhaustive or random lists of attributes for
1099 operators that take attributes or other parameters.
1100
1101 The return value is a list of (descriptive_name, [arglist]) tuples where
1102 the descriptive_name is appended to the test name and the arglist is expanded
1103 as arguments to the operator build function.
1104 """
1105
1106 def __init__(self):
1107 pass
1108
1109 @staticmethod
Jeremy Johnson1271c442023-09-05 11:39:26 +01001110 def _add_data_generators(testGen, opName, dtype, arg_list, error_name, **kwargs):
1111 """Add extra tests for each type of data generator for this op."""
Jeremy Johnson65ba8092023-10-09 16:31:13 +01001112 if (
1113 error_name is None
1114 and "data_gen" in testGen.TOSA_OP_LIST[opName]
1115 and gtu.dtypeIsSupportedByCompliance(dtype)
1116 ):
Jeremy Johnson1271c442023-09-05 11:39:26 +01001117 if dtype in [DType.FP16, DType.FP32, DType.BF16]:
1118 dataGenTypesList = testGen.TOSA_OP_LIST[opName]["data_gen"]["fp"]
1119 else:
1120 dataGenTypesList = testGen.TOSA_OP_LIST[opName]["data_gen"]["int"]
1121 else:
1122 # Error test or No data generator types listed - assume random
1123 dataGenTypesList = (gtu.DataGenType.PSEUDO_RANDOM,)
1124
1125 # Expand arg list with other data generator types
1126 new_arg_list = []
1127 for dg_type in dataGenTypesList:
1128 for arg_str, arg_attrs in arg_list:
1129 arg_dict = arg_attrs[0]
1130 arg_dict["dg_type"] = dg_type
1131
1132 if dg_type == gtu.DataGenType.PSEUDO_RANDOM:
1133 # Default test
1134 new_arg_list.append((arg_str, [arg_dict]))
1135
1136 elif dg_type == gtu.DataGenType.DOT_PRODUCT:
1137 # Extra tests for each dot product test set
1138 dot_products = kwargs["dot_products"]
1139 if dot_products < testGen.TOSA_MI_DOT_PRODUCT_MIN:
1140 print(
Jeremy Johnson51779fd2023-09-12 10:27:43 +01001141 f"Skipping {opName} dot product test as too few calculations {dot_products} < {testGen.TOSA_MI_DOT_PRODUCT_MIN}"
Jeremy Johnson1271c442023-09-05 11:39:26 +01001142 )
1143 continue
1144 arg_dict["ks"] = kwargs["ks"]
1145 for key in gtu.DG_DOT_PRODUCT_OPTIONAL_INFO:
1146 if key in kwargs:
1147 arg_dict[key] = kwargs[key]
1148
1149 for s in testGen.TOSA_MI_DOT_PRODUCT_TEST_SETS:
1150 new_arg_str = f"{arg_str}_s{s}"
1151 new_arg_dict = arg_dict.copy()
1152 new_arg_dict["s"] = s
1153 new_arg_list.append((new_arg_str, [new_arg_dict]))
1154
1155 return new_arg_list
1156
1157 @staticmethod
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001158 def agNone(testGen, opName, shapeList, dtype, error_name=None):
1159 """A trivial argument generator for operators that don't take any
1160 non-tensor arguments"""
1161 return [("", [])]
1162
1163 @staticmethod
1164 def agAxis(testGen, opName, shapeList, dtype, error_name=None):
1165 """Build the axis argument for operators that take a single axis"""
1166 axes = []
1167 shape = shapeList[0]
1168
1169 if error_name == ErrorIf.AxisSmallerZero:
1170 small_axis = testGen.rng.integers(-5, 0)
1171 axes.append(("axis{}".format(small_axis), [small_axis]))
1172 elif error_name == ErrorIf.AxisLargerRank:
1173 large_axis = testGen.rng.integers(len(shape) + 1, len(shape) + 10)
1174 axes.append(("axis{}".format(large_axis), [large_axis]))
1175 else:
1176 for a in range(0, len(shape)):
1177 axes.append(("axis{}".format(a), [a]))
1178
1179 return axes
1180
1181 @staticmethod
Jeremy Johnsonfd05bb32023-02-07 16:39:24 +00001182 def _calculate_sparsity(num_tests, sparsity_factor):
1183 sparsity = num_tests // sparsity_factor + 1
1184 # If there are only a small number of tests, just select them all
1185 if sparsity < 13:
1186 sparsity = 1
1187 # To get a variety of parameter combinations sparsity should not be a
1188 # multiple of 2, 3 or 5
1189 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1190 sparsity += 1
1191 return sparsity
1192
1193 @staticmethod
James Ward8b390432022-08-12 20:48:56 +01001194 def agConv(testGen, opName, shapeList, dtypes, error_name=None):
Jeremy Johnson0c716862023-04-13 17:18:19 +01001195 # Used by CONV2D, CONV3D and DEPTHWISE_CONV2D
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001196 arg_list = []
1197
Jeremy Johnson0c716862023-04-13 17:18:19 +01001198 if testGen.args.level8k and error_name is not None:
1199 # Don't produce negative large tests
1200 return arg_list
1201
1202 # Shape: Batches, (Depth), Height, Width, Channels
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001203 ifm_shape = shapeList[0]
Jeremy Johnson0c716862023-04-13 17:18:19 +01001204 # Shape: (OFM channels), (KD), KH, KW, IFM channels
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001205 filter_shape = shapeList[1]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001206
Jeremy Johnson1271c442023-09-05 11:39:26 +01001207 accum_dtype = gtu.get_accum_dtype_from_tgTypes(dtypes)
James Ward8b390432022-08-12 20:48:56 +01001208
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001209 # Check the rank
Jeremy Johnson0c716862023-04-13 17:18:19 +01001210 conv3d = opName.startswith("conv3d")
1211 rank = 5 if conv3d else 4
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001212 if error_name != ErrorIf.WrongRank:
1213 assert len(ifm_shape) == rank
1214 assert len(filter_shape) == rank
1215
Jeremy Johnson0c716862023-04-13 17:18:19 +01001216 # kernel rank omits channels
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001217 k_rank = rank - 2
Jeremy Johnson0c716862023-04-13 17:18:19 +01001218 k_pos = 0 if opName.startswith("depthwise") else 1
1219 k_shape = tuple(filter_shape[k_pos : (k_pos + k_rank)])
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001220
Jeremy Johnson0c716862023-04-13 17:18:19 +01001221 if not testGen.args.level8k:
1222 # Generate comprehensive argument lists
1223 # - except for named errors, which use specific invalid value(s)
1224 if error_name == ErrorIf.PadSmallerZero:
1225 p_vals = [testGen.rng.choice(range(-5, 0))]
1226 else:
1227 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
1228 paddings = {x for x in itertools.product(*([p_vals] * k_rank * 2))}
1229 if error_name == ErrorIf.StrideSmallerOne:
1230 # Can't use stride=0, as it is used to derive output shape, as a divisor
1231 s_vals = [testGen.rng.choice(range(-5, 0))]
1232 else:
1233 # Stride must be greater than 1 to force non-integer error
1234 startStride = (
1235 1 if error_name != ErrorIf.ConvOutputShapeNonInteger else 2
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001236 )
Jeremy Johnson0c716862023-04-13 17:18:19 +01001237 s_vals = [
1238 x for x in range(startStride, testGen.args.max_conv_stride + 1)
1239 ]
1240 strides = {x for x in itertools.product(*([s_vals] * k_rank))}
1241 if error_name == ErrorIf.DilationSmallerOne:
1242 d_vals = [testGen.rng.choice(range(-5, 1))]
1243 else:
1244 d_vals = [x for x in range(1, testGen.args.max_conv_dilation + 1)]
1245 dilations = {x for x in itertools.product(*([d_vals] * k_rank))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001246
Jeremy Johnson0c716862023-04-13 17:18:19 +01001247 if not error_name and testGen.args.oversize:
1248 # add some oversize argument values
1249 if max(ifm_shape) < 64:
1250 bigPadding = 9
1251 paddings.update(
1252 {
1253 x
1254 for x in itertools.product(
1255 *([[0, bigPadding]] * (k_rank * 2))
1256 )
1257 }
1258 )
1259 bigStride = 8
1260 strides.update(
1261 {x for x in itertools.product(*([[1, bigStride]] * k_rank))}
1262 )
1263 bigDilation = 7
1264 dilations.update(
1265 {x for x in itertools.product(*([[1, bigDilation]] * k_rank))}
1266 )
1267 max_dim_size = None
1268
1269 # There are too many parameter combinations, so generate them sparsely,
1270 # very sparse for negative tests
1271 sparsity_factor = 2 if error_name else 120
1272 sparsity = TosaArgGen._calculate_sparsity(
1273 len(paddings) * len(strides) * len(dilations), sparsity_factor
1274 )
1275 else:
1276 # Only test 8k levels boundaries
1277 bigStride = testGen.TOSA_8K_LEVEL_MAX_STRIDE
1278 bigKernel = testGen.TOSA_8K_LEVEL_MAX_KERNEL
1279 bigPadding = bigKernel
1280
1281 dilation_shape = [1] * k_rank
1282 pad_shape = [0] * k_rank * 2
1283 if conv3d:
1284 # Small stride apart from for big kernel (see below) to keep
1285 # tensor size/calculation small
1286 stride_shape = [1] * k_rank
1287 for idx in range(k_rank):
1288 pad_offset = idx * 2
1289 if k_shape[idx] == bigKernel:
1290 # Padding shape needs to account for tensor shape
1291 pad_shape[pad_offset] = bigPadding - ifm_shape[idx + 1]
1292 pad_shape[pad_offset + 1] = bigPadding - dilation_shape[idx] + 1
1293 # Big stride to reduce output size
1294 stride_shape[idx] = bigKernel
1295 else:
1296 # Account for kernel size
1297 pad_shape[pad_offset] = k_shape[idx] - 1
1298 else:
1299 # Always have a large stride with extra padding and dilation to keep
1300 # tensor calculation reasonable
1301 stride_shape = [bigKernel] * k_rank
1302 for idx in range(k_rank):
1303 # Dilation shape must account for kernel size
1304 dilation_shape[idx] = bigKernel // k_shape[idx]
1305 # Padding shape needs to accommodate tensor/kernel & dilation
1306 pad_offset = idx * 2
1307 pad_shape[pad_offset] = bigPadding - ifm_shape[idx + 1]
1308 pad_shape[pad_offset + 1] = bigPadding - dilation_shape[idx] + 1
1309
1310 strides = {tuple(stride_shape)}
1311 dilations = {tuple(dilation_shape)}
1312 paddings = {tuple(pad_shape)}
1313 # Create a limit for the output dimensions size
1314 max_dim_size = testGen.TOSA_8K_LEVEL_MAX_KERNEL
1315
1316 # Currently allow all combinations that are reasonable size
1317 sparsity = 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001318
1319 n = 0
1320 for s in sorted(list(strides)):
1321 for p in sorted(list(paddings)):
1322 for d in sorted(list(dilations)):
1323 if (
1324 n % sparsity == 0
Jeremy Johnson93d43902022-09-27 12:26:14 +01001325 # the padded shape must exceed the dilation * kernel to get a positive
1326 # sized output shape
Jeremy Johnson0c716862023-04-13 17:18:19 +01001327 and (ifm_shape[1] - 1 + p[0] + p[1]) > d[0] * (k_shape[0] - 1)
1328 and (ifm_shape[2] - 1 + p[2] + p[3]) > d[1] * (k_shape[1] - 1)
Jeremy Johnson93d43902022-09-27 12:26:14 +01001329 and (
1330 k_rank < 3
Jeremy Johnson0c716862023-04-13 17:18:19 +01001331 or (
1332 (ifm_shape[3] - 1 + p[4] + p[5])
1333 > d[2] * (k_shape[2] - 1)
1334 )
Jeremy Johnson93d43902022-09-27 12:26:14 +01001335 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001336 ):
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001337 remainders = []
Jeremy Johnson0c716862023-04-13 17:18:19 +01001338 outputs = []
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001339 for index in range(k_rank):
1340 pad_offset = index * 2
Jeremy Johnson0c716862023-04-13 17:18:19 +01001341 partial = (
1342 ifm_shape[index + 1]
1343 - 1
1344 + p[pad_offset]
1345 + p[pad_offset + 1]
1346 - (k_shape[index] - 1) * d[index]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001347 )
Jeremy Johnson0c716862023-04-13 17:18:19 +01001348 remainders.append(partial % s[index])
1349 outputs.append((partial // s[index]) + 1)
1350
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001351 if (
1352 # the parameters must produce integer exact output
1353 error_name != ErrorIf.ConvOutputShapeNonInteger
1354 and max(remainders) == 0
1355 ) or (
1356 error_name == ErrorIf.ConvOutputShapeNonInteger
1357 and max(remainders) > 0
1358 ):
Jeremy Johnson0c716862023-04-13 17:18:19 +01001359 if (
1360 max_dim_size is not None
1361 and max(outputs) >= max_dim_size
1362 ):
1363 # Test will consume too much memory - skip it
1364 continue
1365
1366 # Support for larger values than 9 needs different delimiter
1367 delim = "" if max(s + p + d) <= 9 else "x"
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001368 arg_list.append(
1369 (
James Ward8b390432022-08-12 20:48:56 +01001370 "acc{}_st{}_pad{}_dilat{}".format(
1371 testGen.typeStr(accum_dtype),
Jeremy Johnson0c716862023-04-13 17:18:19 +01001372 delim.join([str(x) for x in s]),
1373 delim.join([str(x) for x in p]),
1374 delim.join([str(x) for x in d]),
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001375 ),
James Ward8b390432022-08-12 20:48:56 +01001376 [accum_dtype, s, p, d],
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001377 )
1378 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001379 n += 1
1380
1381 return arg_list
1382
1383 @staticmethod
James Ward8b390432022-08-12 20:48:56 +01001384 def agFullyConnected(testGen, opName, shapeList, dtypes, error_name=None):
1385
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001386 assert isinstance(dtypes, list) or isinstance(
1387 dtypes, tuple
1388 ), f"{dtypes} unexpected"
1389 input_dtype = dtypes[0]
James Ward8b390432022-08-12 20:48:56 +01001390
1391 if error_name == ErrorIf.WrongOutputType:
Jeremy Johnson1271c442023-09-05 11:39:26 +01001392 accum_dtype = gtu.get_wrong_output_type(opName, testGen.rng, input_dtype)
James Ward8b390432022-08-12 20:48:56 +01001393 elif error_name == ErrorIf.WrongInputType:
1394 # Pick some potentially correct output dtype if input type is incorrect
1395 accum_dtype = DType.INT32
1396 else:
Jeremy Johnson1271c442023-09-05 11:39:26 +01001397 accum_dtype = gtu.get_accum_dtype_from_tgTypes(dtypes)
James Ward8b390432022-08-12 20:48:56 +01001398
1399 return [(f"acc{testGen.typeStr(accum_dtype)}", [accum_dtype])]
1400
1401 @staticmethod
1402 def agMatMul(testGen, opName, shapeList, dtype, error_name=None):
1403 # Get valid accumulate type(s)
1404 if dtype == DType.INT8:
1405 accum_dtypes = [DType.INT32]
1406 elif dtype == DType.INT16:
1407 accum_dtypes = [DType.INT48]
1408 elif dtype == DType.FP16:
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001409 accum_dtypes = [DType.FP16, DType.FP32]
James Ward24dbc422022-10-19 12:20:31 +01001410 elif dtype == DType.BF16:
1411 accum_dtypes = [DType.FP32]
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001412 elif dtype == DType.FP32:
1413 accum_dtypes = [DType.FP32]
James Ward8b390432022-08-12 20:48:56 +01001414 elif error_name is None:
1415 assert False, f"Invalid I/O DType for MatMul: {DTypeNames[dtype]}"
1416
1417 if error_name == ErrorIf.WrongOutputType:
1418 # Get incorrect output dtype for ErrorIf case
Jeremy Johnson1271c442023-09-05 11:39:26 +01001419 accum_dtypes = [gtu.get_wrong_output_type(opName, testGen.rng, dtype)]
James Ward8b390432022-08-12 20:48:56 +01001420 elif error_name == ErrorIf.WrongInputType:
1421 # Pick some potentially correct output dtype if input type is incorrect
1422 accum_dtypes = [DType.INT32]
1423
Jeremy Johnson1271c442023-09-05 11:39:26 +01001424 arg_list = [
1425 (f"acc{testGen.typeStr(a)}", [{"acc_type": a}]) for a in accum_dtypes
1426 ]
1427
1428 arg_list = TosaArgGen._add_data_generators(
1429 testGen,
1430 opName,
1431 dtype,
1432 arg_list,
1433 error_name,
1434 ks=int(shapeList[0][2]), # Set KS = C, from input A (N,H,C)
1435 # Set dot_products = N*H*W
1436 dot_products=gtu.product(
1437 (shapeList[0][0], shapeList[0][1], shapeList[1][2])
1438 ),
1439 )
1440 return arg_list
James Ward8b390432022-08-12 20:48:56 +01001441
1442 @staticmethod
1443 def agTransposeConv2D(testGen, opName, shapeList, dtypes, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001444 arg_list = []
1445
Jeremy Johnson0c716862023-04-13 17:18:19 +01001446 if testGen.args.level8k and error_name is not None:
1447 # Don't produce negative large tests
1448 return arg_list
1449
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001450 ifm_shape = shapeList[0]
1451 filter_shape = shapeList[1]
1452
Jeremy Johnson1271c442023-09-05 11:39:26 +01001453 accum_dtype = gtu.get_accum_dtype_from_tgTypes(dtypes)
James Ward8b390432022-08-12 20:48:56 +01001454
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001455 # Must be rank 4
1456 if error_name != ErrorIf.WrongRank:
1457 assert len(ifm_shape) == 4
1458 assert len(filter_shape) == 4
1459
Jeremy Johnson0c716862023-04-13 17:18:19 +01001460 k_shape = tuple(filter_shape[1:3])
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001461
Jeremy Johnson0c716862023-04-13 17:18:19 +01001462 if not testGen.args.level8k:
1463 # Generate comprehensive argument lists
1464 # - except for named errors, which use specific invalid value(s)
1465 smallest_padding_size = -min(k_shape[0], k_shape[1]) + 1
1466 if error_name == ErrorIf.PadLargerEqualKernel:
1467 max_filter_size = -max(k_shape[0], k_shape[1])
1468 p_vals = [
1469 testGen.rng.choice(range(max_filter_size - 10, max_filter_size))
1470 ]
1471 else:
1472 p_vals = [
1473 x
1474 for x in range(
1475 smallest_padding_size, testGen.args.max_conv_padding + 1
1476 )
1477 ]
1478 paddings = {x for x in itertools.product(*([p_vals] * 4))}
1479 if error_name == ErrorIf.StrideSmallerOne:
1480 # Can't use stride=0, as it is used to derive output shape, as a divisor
1481 s_vals = [testGen.rng.choice(range(-5, 0))]
1482 else:
1483 s_vals = [x for x in range(1, testGen.args.max_conv_stride + 1)]
1484 strides = {x for x in itertools.product(*([s_vals] * 2))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001485
Jeremy Johnson0c716862023-04-13 17:18:19 +01001486 if not error_name and testGen.args.oversize:
1487 # add some oversize argument values
1488 if max(ifm_shape) < 64:
1489 bigPadding = 9
1490 paddings.update(
1491 {
1492 x
1493 for x in itertools.product(
1494 *([[smallest_padding_size, bigPadding]] * 4)
1495 )
1496 }
1497 )
1498 bigStride = 8
1499 strides.update({x for x in itertools.product(*([[1, bigStride]] * 2))})
1500
1501 # There are too many parameter combinations, so generate them sparsely,
1502 # very sparse for negative tests
1503 sparsity_factor = 2 if error_name else 10
1504 sparsity = len(paddings) * len(strides) // sparsity_factor + 1
1505 # If there are only a small number of tests, just select them all
1506 if sparsity < 13:
1507 sparsity = 1
1508 # To get a variety of parameter combinations sparsity should not be a
1509 # multiple of 2, 3 or 5
1510 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1511 sparsity += 1
1512 else:
1513 # Only test 8k levels boundaries
1514 bigStride = testGen.TOSA_8K_LEVEL_MAX_STRIDE
1515 bigKernel = testGen.TOSA_8K_LEVEL_MAX_KERNEL
1516 bigPadding = bigKernel
1517
1518 pad_shape = [0] * (len(k_shape) * 2)
1519 stride_shape = [1] * len(k_shape)
1520 # The point at which input dimension combined with the stride will
1521 # create large output sizes!
1522 LARGE_SIZE = 2
1523 for idx in range(len(k_shape)):
1524 pad_offset = idx * 2
1525 if k_shape[idx] == bigKernel:
1526 # Set large stride
1527 stride_shape[idx] = bigKernel
1528 # Use negative output padding to reduce shape size
1529 pad_shape[pad_offset] = -(bigPadding - 1)
1530 if ifm_shape[idx + 1] > LARGE_SIZE:
1531 pad_shape[pad_offset + 1] = -(bigPadding - 1)
1532 else:
1533 # The other dimension should be the bigKernel
1534 alt_idx = 1 - idx
1535 if (
1536 k_shape[alt_idx] == bigKernel
1537 and ifm_shape[alt_idx + 1] < LARGE_SIZE
1538 ):
1539 # As the input is small, the large stride won't
1540 # affect the output so we can add some padding
1541 pad_shape[pad_offset + 1] = bigPadding
1542
1543 strides = {tuple(stride_shape)}
1544 paddings = {tuple(pad_shape)}
1545
1546 # Currently allow all combinations that are reasonable size
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001547 sparsity = 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001548
1549 n = 0
1550 for s in sorted(list(strides)):
1551 for p in sorted(list(paddings)):
TatWai Chong24594f52022-06-08 00:48:04 -07001552 if n % sparsity == 0:
1553 # Determine the output shape
Jeremy Johnson0c716862023-04-13 17:18:19 +01001554 oh = (ifm_shape[1] - 1) * s[0] + p[0] + p[1] + k_shape[0]
1555 ow = (ifm_shape[2] - 1) * s[1] + p[2] + p[3] + k_shape[1]
TatWai Chong24594f52022-06-08 00:48:04 -07001556 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Jeremy Johnson0c716862023-04-13 17:18:19 +01001557
1558 # Support for larger values than 9 needs different delimiter
1559 delim = "" if max(s + p) <= 9 else "x"
TatWai Chong24594f52022-06-08 00:48:04 -07001560 arg_list.append(
1561 (
James Ward8b390432022-08-12 20:48:56 +01001562 "acc{}_st{}_pad{}_os{}".format(
1563 testGen.typeStr(accum_dtype),
Jeremy Johnson0c716862023-04-13 17:18:19 +01001564 delim.join([str(x) for x in s]),
1565 delim.join([str(x) for x in p]),
TatWai Chong24594f52022-06-08 00:48:04 -07001566 "x".join([str(x) for x in os]),
1567 ),
James Ward8b390432022-08-12 20:48:56 +01001568 [accum_dtype, s, p, os],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001569 )
TatWai Chong24594f52022-06-08 00:48:04 -07001570 )
1571 n += 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001572
1573 return arg_list
1574
1575 @staticmethod
1576 def agPad(testGen, opName, shapeList, dtype, error_name=None):
1577 arg_list = []
1578 rank = len(shapeList[0])
1579
1580 # Exhaustively test combinations of padding on each side of each dimension
1581 # - the range of padding values is defined by pad_min and pad_max
1582 # - for padding >9, the name format needs to be more distinctive
1583 pad_min, pad_max = 0, 1
1584 pad_values = [x for x in range(pad_min, pad_max + 1)]
1585 if error_name == ErrorIf.PadSmallerZero:
1586 pad_values = [x for x in range(-2, 0)]
1587 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
1588 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
1589
1590 if dtype in [DType.BOOL, DType.INT8, DType.INT16, DType.INT32]:
1591 pad_const_int = testGen.getRandNumberDType(dtype)
1592 pad_const_fp = 0
James Wardf0890992022-11-17 11:15:14 +00001593 elif dtype in (DType.FP16, DType.BF16, DType.FP32):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001594 pad_const_int = 0
1595 pad_const_fp = testGen.getRandNumberDType(dtype)
1596 else:
1597 return []
1598
Jeremy Johnsonfd05bb32023-02-07 16:39:24 +00001599 list_shape_pad_values = list(shape_pad_values)
1600 # If we are producing tests for rank 6 or greater use sparsity
1601 if len(list_shape_pad_values) > 1024:
1602 sparsity_factor = 2 if error_name else 120
1603 sparsity = TosaArgGen._calculate_sparsity(
1604 len(list_shape_pad_values), sparsity_factor
1605 )
1606 else:
1607 sparsity = 1
1608
1609 for n, paddings in enumerate(list_shape_pad_values):
James Ward8b390432022-08-12 20:48:56 +01001610 paddings = list(paddings)
1611 args_valid = True
1612
1613 if error_name == ErrorIf.PadSmallerZero:
1614 # Prevent negative output shapes while ensuring still testing for negative padding
1615 for i in range(rank):
1616 dim_after_padding = (
1617 paddings[i][0] + paddings[i][1] + shapeList[0][i]
1618 )
1619 if dim_after_padding < 1:
1620 paddings[i] = (0, 0)
1621 if all([p > -1 for p in paddings[i]]):
1622 args_valid = False
Jeremy Johnsonfd05bb32023-02-07 16:39:24 +00001623 if args_valid and n % sparsity == 0:
James Ward8b390432022-08-12 20:48:56 +01001624 name = "pad"
1625 for r in range(rank):
1626 before, after = paddings[r]
1627 name = f"{name}{before}{after}"
1628 arg_list.append(
1629 (name, [np.array(paddings), pad_const_int, pad_const_fp])
1630 )
1631
1632 if error_name == ErrorIf.PadSmallerZero and len(arg_list) == 0:
1633 warnings.warn(f"No ErrorIf test created for input shape: {shapeList[0]}")
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001634
1635 return arg_list
1636
1637 @staticmethod
1638 def agPooling(testGen, opName, shapeList, dtype, error_name=None):
1639 arg_list = []
1640
1641 shape = shapeList[0]
1642 if error_name != ErrorIf.WrongRank:
1643 assert len(shape) == 4
1644
Jeremy Johnson0c716862023-04-13 17:18:19 +01001645 test_level8k = testGen.args.level8k and error_name is None
1646
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001647 startStride = 1 if error_name != ErrorIf.PoolingOutputShapeNonInteger else 2
Jeremy Johnson0c716862023-04-13 17:18:19 +01001648 startKernel = 2
1649 startPad = 0
1650 if not test_level8k:
1651 # Generate comprehensive argument lists
1652 p_vals = [x for x in range(startPad, testGen.args.max_pooling_padding + 1)]
1653 paddings = {x for x in itertools.product(*([p_vals] * 4))}
1654 # Stride must be greater than 1 to force non-integer error
1655 s_vals = [
1656 x for x in range(startStride, testGen.args.max_pooling_stride + 1)
1657 ]
1658 strides = {x for x in itertools.product(*([s_vals] * 2))}
1659 k_vals = [
1660 x for x in range(startKernel, testGen.args.max_pooling_kernel + 1)
1661 ]
1662 kernels = {x for x in itertools.product(*([k_vals] * 2))}
1663 max_dim_size = None
1664 else:
1665 # Only test 8k levels
1666 bigStride = testGen.TOSA_8K_LEVEL_MAX_STRIDE
1667 bigKernel = testGen.TOSA_8K_LEVEL_MAX_KERNEL
1668 strides = {(1, bigStride), (bigStride, 4)}
1669 kernels = {(1, bigKernel), (bigKernel, 3)}
1670 paddings = set()
1671 for s in sorted(list(strides)):
1672 for k in sorted(list(kernels)):
1673 padding = []
1674 for idx in range(len(k)):
1675 total_padding = s[idx] - shape[idx + 1] + k[idx]
1676 while total_padding < 0:
1677 # Must meet: shape + padding > kernel
1678 total_padding += s[idx]
1679 if total_padding < k[idx]:
1680 padding.extend([0, total_padding])
1681 else:
1682 # Note this may produce padding >= k[idx] which is not
1683 # allowed - but will be ignored in the creation loop below
1684 padding.extend([k[idx] - 1, total_padding - (k[idx] - 1)])
1685 paddings.add(tuple(padding))
1686 # Create a limit for the output dimensions size
1687 max_dim_size = testGen.TOSA_8K_LEVEL_MAX_KERNEL
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001688
James Ward8b390432022-08-12 20:48:56 +01001689 if opName == "max_pool2d":
1690 accum_dtypes = [None] # max_pool has no accumulate dtype
1691 elif dtype == DType.INT8 or dtype == DType.INT16:
1692 accum_dtypes = [DType.INT32]
1693 elif dtype == DType.FP16:
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001694 accum_dtypes = [DType.FP16, DType.FP32]
James Ward24dbc422022-10-19 12:20:31 +01001695 elif dtype == DType.BF16 or dtype == DType.FP32:
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001696 accum_dtypes = [DType.FP32]
James Ward8b390432022-08-12 20:48:56 +01001697 elif error_name is None:
1698 assert False, f"Invalid I/O DType for pooling: {DTypeNames[dtype]}"
1699 else:
1700 # Set to something for the ErrorIf case which has
1701 # incorrect input data-type
1702 accum_dtypes = [DType.INT32]
1703
Jeremy Johnson0c716862023-04-13 17:18:19 +01001704 if not test_level8k:
1705 if testGen.args.oversize:
1706 # add some oversize argument values
1707 bigStride = 7
1708 bigKernel = 9
1709 strides.update(
1710 {x for x in itertools.product(*([[startStride, bigStride]] * 2))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001711 )
Jeremy Johnson0c716862023-04-13 17:18:19 +01001712 kernels.update(
1713 {x for x in itertools.product(*([[startKernel, bigKernel]] * 2))}
1714 )
1715 if max(shape) < 64:
1716 # padding must be less than the kernel size
1717 bigPadding = bigKernel - 1
1718 paddings.update(
1719 {x for x in itertools.product(*([[startPad, bigPadding]] * 4))}
1720 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001721
Jeremy Johnson0c716862023-04-13 17:18:19 +01001722 # There are too many parameter combinations, so generate them sparsely,
1723 # very sparse for negative tests
1724 sparsity_factor = 2 if error_name else 500
1725 sparsity = (
1726 len(paddings) * len(strides) * len(kernels) // sparsity_factor + 1
1727 )
1728 else:
1729 # We have already limited test output combinations for 8k tests
1730 sparsity = 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001731
James Ward8b390432022-08-12 20:48:56 +01001732 arg_str = (
1733 "acc{}_st{}_kern{}_pad{}"
1734 if accum_dtypes[0] is not None
1735 else "st{}_kern{}_pad{}"
1736 )
1737
1738 def get_arg_list_element(accum, stride, pad, kern):
1739 # Return tuple containing the formatted argument string and
1740 # the corresponding argument values
Jeremy Johnson0c716862023-04-13 17:18:19 +01001741
1742 # Support for larger values than 9 needs different delimiter
1743 delim = "" if max(stride + kern + pad) <= 9 else "x"
James Ward8b390432022-08-12 20:48:56 +01001744 arg_str_elems = [
Jeremy Johnson0c716862023-04-13 17:18:19 +01001745 delim.join([str(x) for x in stride]),
1746 delim.join([str(x) for x in kern]),
1747 delim.join([str(x) for x in pad]),
James Ward8b390432022-08-12 20:48:56 +01001748 ]
1749 # Note: different order to string
1750 arg_val_elems = [stride, pad, kern]
1751
1752 if accum is not None:
1753 arg_str_elems.insert(0, testGen.typeStr(accum))
1754 arg_val_elems.insert(0, accum)
1755 return (arg_str.format(*arg_str_elems), arg_val_elems)
1756
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001757 n = 0
James Ward8b390432022-08-12 20:48:56 +01001758 for a in accum_dtypes:
1759 for s in sorted(list(strides)):
1760 for p in sorted(list(paddings)):
1761 for k in sorted(list(kernels)):
1762 if error_name in [
1763 ErrorIf.StrideSmallerOne,
1764 ErrorIf.KernelSmallerOne,
1765 ErrorIf.PadSmallerZero,
1766 ErrorIf.PadLargerEqualKernel,
1767 ]:
1768 sNew, pNew, kNew = TosaErrorIfArgGen.eiPoolingErrorIf(
1769 testGen, error_name, s, p, k
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001770 )
James Ward8b390432022-08-12 20:48:56 +01001771 if None not in [sNew, pNew, kNew] and n % sparsity == 0:
1772 arg_vals = [a, sNew, pNew, kNew]
1773 arg_list.append(get_arg_list_element(*arg_vals))
1774 elif (
1775 n % sparsity == 0
1776 # padding must not exceed the kernel size
1777 and p[0] < k[0]
1778 and p[1] < k[0]
1779 and p[2] < k[1]
1780 and p[3] < k[1]
1781 # the padded shape must exceed the kernel size
1782 and (shape[1] + p[0] + p[1]) > k[0]
1783 and (shape[2] + p[2] + p[3]) > k[1]
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001784 ):
Jeremy Johnson0c716862023-04-13 17:18:19 +01001785 partial_h = shape[1] + p[0] + p[1] - k[0]
1786 partial_w = shape[2] + p[2] + p[3] - k[1]
1787 remainder_h = partial_h % s[0]
1788 remainder_w = partial_w % s[1]
1789 output_h = partial_h // s[0] + 1
1790 output_w = partial_w // s[1] + 1
1791 # debug print(shape, remainder_h, remainder_w, "/", output_h, output_w)
James Ward8b390432022-08-12 20:48:56 +01001792 if (
1793 # the parameters must produce integer exact output
1794 error_name != ErrorIf.PoolingOutputShapeNonInteger
1795 and remainder_h == 0
1796 and remainder_w == 0
1797 ) or (
1798 error_name == ErrorIf.PoolingOutputShapeNonInteger
1799 and (remainder_h != 0 or remainder_w != 0)
1800 ):
Jeremy Johnson0c716862023-04-13 17:18:19 +01001801 if (
1802 max_dim_size is not None
1803 and max(output_h, output_w) > max_dim_size
1804 ):
1805 # Test will consume too much memory - skip it
1806 continue
James Ward8b390432022-08-12 20:48:56 +01001807 arg_vals = [a, s, p, k]
1808 arg_list.append(get_arg_list_element(*arg_vals))
1809 n += 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001810
1811 return arg_list
1812
1813 @staticmethod
1814 def agCast(testGen, opName, shapeList, inDtype, error_name=None):
1815 arg_list = []
1816
1817 # Enumerate the output types here
1818 if error_name == ErrorIf.WrongOutputType:
1819 dtypeList = TosaErrorIfArgGen.eiCastErrorIf(testGen, inDtype)
1820 elif inDtype == DType.INT8:
James Ward736fd1a2023-01-23 17:13:37 +00001821 dtypeList = [
1822 DType.BOOL,
1823 DType.INT16,
1824 DType.INT32,
1825 DType.FP16,
1826 DType.BF16,
1827 DType.FP32,
1828 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001829 elif inDtype == DType.INT16:
James Ward736fd1a2023-01-23 17:13:37 +00001830 dtypeList = [
1831 DType.BOOL,
1832 DType.INT8,
1833 DType.INT32,
1834 DType.FP16,
1835 DType.BF16,
1836 DType.FP32,
1837 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001838 elif inDtype == DType.INT32:
James Ward736fd1a2023-01-23 17:13:37 +00001839 dtypeList = [
1840 DType.BOOL,
1841 DType.INT8,
1842 DType.INT16,
1843 DType.FP16,
1844 DType.BF16,
1845 DType.FP32,
1846 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001847 elif inDtype == DType.BOOL:
1848 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
James Ward8b390432022-08-12 20:48:56 +01001849 elif inDtype == DType.FP16:
James Ward736fd1a2023-01-23 17:13:37 +00001850 dtypeList = [DType.INT8, DType.INT16, DType.INT32, DType.FP32]
James Ward24dbc422022-10-19 12:20:31 +01001851 elif inDtype == DType.BF16:
James Ward736fd1a2023-01-23 17:13:37 +00001852 dtypeList = [DType.INT8, DType.INT16, DType.INT32, DType.FP32]
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001853 elif inDtype == DType.FP32:
James Ward736fd1a2023-01-23 17:13:37 +00001854 dtypeList = [DType.INT8, DType.INT16, DType.INT32, DType.FP16, DType.BF16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001855 elif error_name == ErrorIf.WrongInputType:
1856 # Pick some potentially correct output type for incorrect input type
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001857 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FP32]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001858 else:
1859 raise Exception("Unexpected input dtype: {}".format(inDtype))
1860
1861 for dtype in dtypeList:
Jeremy Johnson3b0544c2022-10-18 16:32:19 +01001862 arg_list.append(("out{}".format(testGen.typeStr(dtype)), [dtype]))
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001863
1864 return arg_list
1865
1866 @staticmethod
1867 def agRescale(testGen, opName, shapeList, inDtype, error_name=None):
1868 arg_list = []
1869
1870 # Enumerate the output types here
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001871 for outDtype in [
1872 DType.UINT8,
1873 DType.INT8,
1874 DType.INT16,
1875 DType.INT32,
1876 DType.UINT16,
1877 ]:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001878 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001879 outDtype in [DType.UINT8, DType.INT8, DType.UINT16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001880 and error_name == ErrorIf.OutputZeroPointNotZero
1881 ):
1882 continue
1883 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001884 outDtype != DType.UINT16
1885 and error_name == ErrorIf.U16OutputZeroPointNotValid
1886 ) or (
1887 inDtype != DType.UINT16
1888 and error_name == ErrorIf.U16InputZeroPointNotValid
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001889 ):
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001890 # ErrorIfs only valid with UINT16
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001891 continue
1892 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001893 inDtype == DType.UINT8
1894 and outDtype not in [DType.INT8, DType.INT16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001895 and error_name != ErrorIf.WrongOutputType
1896 ):
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001897 # The only output dtypes for UINT8 are INT8/INT16, skip all others
1898 continue
1899 if (
1900 inDtype not in [DType.INT8, DType.INT16]
1901 and outDtype == DType.UINT8
1902 and error_name != ErrorIf.WrongOutputType
1903 ):
1904 # The only input dtypes for UINT8 are INT8/INT16, skip all others
1905 continue
1906 if (
1907 inDtype == DType.UINT16
1908 and outDtype != DType.INT16
1909 and error_name != ErrorIf.WrongOutputType
1910 ):
1911 # The only output dtype for UINT16 is INT16, skip all others
1912 continue
1913 if (
1914 inDtype != DType.INT16
1915 and outDtype == DType.UINT16
1916 and error_name != ErrorIf.WrongOutputType
1917 ):
1918 # The only input dtype for UINT16 is INT16, skip all others
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001919 continue
1920 if (
1921 error_name == ErrorIf.WrongOutputType
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001922 and not TosaErrorIfArgGen.eiRescaleWrongOutputType(inDtype, outDtype)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001923 ):
1924 continue
1925
1926 for scale32 in [False, True]:
1927 if error_name == ErrorIf.ScaleTrue and not scale32:
1928 continue
1929 elif error_name == ErrorIf.ScaleNotTrue and scale32:
1930 continue
1931 for double_round in [False, True]:
1932 if error_name == ErrorIf.ScaleNotTrue and not double_round:
1933 continue
1934 for per_channel in [False, True]:
1935
1936 if (
1937 inDtype == DType.INT48
1938 and scale32
1939 and error_name != ErrorIf.ScaleTrue
1940 ):
1941 # Illegal condition. Must be scale32=False
1942 continue
1943 if (
1944 double_round
1945 and not scale32
1946 and error_name != ErrorIf.ScaleNotTrue
1947 ):
1948 # Illegal condition. ERROR_IF(!scale32 && double_round)
1949 continue
1950
1951 arg_list.append(
1952 (
1953 "out{}_sc{}_dr{}_pc{}".format(
Jeremy Johnson3b0544c2022-10-18 16:32:19 +01001954 testGen.typeStr(outDtype),
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001955 int(scale32),
1956 int(double_round),
1957 int(per_channel),
1958 ),
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001959 [outDtype, scale32, double_round, per_channel],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001960 )
1961 )
1962
1963 return arg_list
1964
1965 @staticmethod
1966 def agMul(testGen, opName, shapeList, dtype, error_name=None):
1967 arg_list = []
1968
1969 if dtype is DType.INT32:
1970 for p in range(testGen.args.num_rand_permutations):
1971
1972 shift = testGen.randInt(0, 32)
1973
1974 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
1975 else:
1976 arg_list.append(("perm0_shift0", [0]))
1977
1978 return arg_list
1979
1980 @staticmethod
1981 def agArithmeticRightShift(testGen, opName, shapeList, dtype, error_name=None):
1982 arg_list = []
1983
1984 arg_list.append(("roundTrue", [True]))
1985 arg_list.append(("roundFalse", [False]))
1986
1987 return arg_list
1988
Luke Hutton57287132023-02-06 14:54:18 +00001989 @staticmethod
1990 def agFFT2d(testGen, opName, shapeList, dtype, error_name=None):
1991 arg_list = []
1992
1993 arg_list.append(("inverseTrue", [True]))
1994 arg_list.append(("inverseFalse", [False]))
1995
1996 return arg_list
1997
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001998 # Helper function for reshape. Gets some factors of a larger number.
1999 @staticmethod
2000 def getFactors(val, start=1):
2001 factors = []
2002
2003 for i in range(start, int(np.sqrt(val)) + 1):
2004 if (val % i) == 0:
2005 factors.append(i)
2006
2007 return factors
2008
2009 @staticmethod
2010 def agReshape(testGen, opName, shapeList, dtype, error_name=None):
2011 arg_list = []
2012
2013 origShape = shapeList[0]
2014
2015 totalElements = 1
2016 for s in origShape:
2017 totalElements *= s
2018
2019 # This code is NOT fast. Fortunately, the numbers are fairly small.
2020 factors = TosaArgGen.getFactors(totalElements)
2021
2022 for p in range(testGen.args.num_rand_permutations):
Jeremy Johnsonfd05bb32023-02-07 16:39:24 +00002023 # Rank from 1 to TOSA_TENSOR_MAX_RANK
2024 newRank = testGen.randInt(1, (testGen.TOSA_TENSOR_MAX_RANK + 1))
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002025 if len(factors) < newRank:
2026 continue
2027
2028 found = True
2029 # escape_counter breaks while loop if it continues on for too long
2030 escape_counter = 0
2031 while found:
2032 newShape = []
Jerry Ge264f7fa2023-04-21 22:49:57 +00002033 new_shape_inferred = []
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002034 # Generate newShape ensuring it isn't a duplicate
2035 remainingElements = totalElements
2036 shuffledFactors = testGen.rng.permutation(factors)
Jerry Ge264f7fa2023-04-21 22:49:57 +00002037 inferred_dim = testGen.rng.integers(1, newRank + 1)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002038 for i in range(1, newRank):
2039 # pick rank-1 factors
2040 newShape.append(shuffledFactors[0])
2041 remainingElements = remainingElements // shuffledFactors[0]
Jerry Ge264f7fa2023-04-21 22:49:57 +00002042 if i == inferred_dim:
2043 new_shape_inferred.append(-1)
2044 else:
2045 new_shape_inferred.append(shuffledFactors[0])
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002046 shuffledFactors = testGen.rng.permutation(
2047 TosaArgGen.getFactors(remainingElements)
2048 )
2049 newShape.append(remainingElements)
Jerry Ge264f7fa2023-04-21 22:49:57 +00002050 if inferred_dim == newRank:
2051 new_shape_inferred.append(-1)
2052 else:
2053 new_shape_inferred.append(remainingElements)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002054
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002055 # Check for duplicates
2056 found = False
2057 for name, other_shape in arg_list:
2058 if other_shape[0] == newShape:
2059 found = True
2060 break
2061
2062 escape_counter += 1
2063 if escape_counter >= 100:
2064 break
2065
2066 if not found:
Jerry Ge264f7fa2023-04-21 22:49:57 +00002067 if error_name in [
2068 ErrorIf.ReshapeOutputSizeNonInteger,
2069 ErrorIf.ReshapeOutputSizeMultiInference,
2070 ]:
2071 if newRank < 2:
2072 # Need at least two dimensions
2073 continue
2074 # NOTE: Change inferred_dim starting offset from 1 to 0
2075 inferred_dim -= 1
2076 extra_dim = inferred_dim + testGen.rng.integers(1, newRank)
2077 extra_dim = extra_dim % newRank
2078 assert extra_dim != inferred_dim
2079 if error_name == ErrorIf.ReshapeOutputSizeNonInteger:
2080 elements = 1
2081 for i, dim_value in enumerate(new_shape_inferred):
2082 if i != inferred_dim and i != extra_dim:
2083 elements *= dim_value
2084 dim_value = new_shape_inferred[extra_dim]
2085 while totalElements % (elements * dim_value) == 0:
2086 dim_value += 1
2087 new_shape_inferred[extra_dim] = dim_value
2088 else:
2089 assert error_name == ErrorIf.ReshapeOutputSizeMultiInference
2090 new_shape_inferred[extra_dim] = -1
2091 else:
2092 arg_list.append(
2093 ("perm{}_rank{}_outdefined".format(p, newRank), [newShape])
2094 )
2095 if error_name != ErrorIf.TensorSizeInputOutputMismatch:
2096 arg_list.append(
2097 (
2098 "perm{}_rank{}_outinferred".format(p, newRank),
2099 [new_shape_inferred],
2100 )
2101 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002102
2103 return arg_list
2104
2105 @staticmethod
2106 def agTranspose(testGen, opName, shapeList, dtype, error_name=None):
2107 arg_list = []
2108
2109 ifm_shape = shapeList[0]
2110
2111 if error_name == ErrorIf.IndexOutsideBounds:
2112 incorrect_large_index = range(len(ifm_shape) + 1, 2 * len(ifm_shape) + 1)
2113 incorrect_small_index = range(-len(ifm_shape), 0)
2114 permutations = [p for p in itertools.permutations(incorrect_large_index)]
2115 permutations.extend(
2116 [p for p in itertools.permutations(incorrect_small_index)]
2117 )
2118 elif error_name == ErrorIf.IndexUsedTwice:
2119 # Create list with a duplicated index
2120 perm_range = list(range(len(ifm_shape)))
2121 index_choice = testGen.rng.choice(range(len(perm_range)))
2122 perm_range[(index_choice + 1) % len(perm_range)] = perm_range[index_choice]
2123 permutations = [p for p in itertools.permutations(perm_range)]
2124
2125 else:
2126 # Get all permutations
2127 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
2128
2129 # Limit to possible permutations from shape dimension or argument setting
2130 limit = min(len(permutations), testGen.args.num_rand_permutations)
2131
2132 # Get random permutation generator that uses all permutations
2133 random_permutations = testGen.rng.permutation(permutations)
2134
2135 # Create list of required amount of permutations
2136 arg_list = [
2137 ("perm{}".format(p), [random_permutations[p].tolist()])
2138 for p in range(limit)
2139 ]
2140 return arg_list
2141
2142 @staticmethod
2143 def agSlice(testGen, opName, shapeList, dtype, error_name=None):
2144 arg_list = []
2145
2146 ifm_shape = shapeList[0]
2147 rank = len(ifm_shape)
2148
2149 for p in range(testGen.args.num_rand_permutations):
2150 start = []
2151 size = []
2152
2153 valid = True
2154
2155 for i in range(rank):
2156 if ifm_shape[i] > 1:
2157 start.append(testGen.randInt(0, ifm_shape[i]))
2158 size.append(testGen.randInt(0, ifm_shape[i] - start[i]))
2159
2160 # Invalid slice size?
2161 if size[i] == 0:
2162 valid = False
2163 else:
2164 start.append(0)
2165 size.append(1)
2166
2167 if valid:
2168 # If ERROR_IF test required then incorrect start, size will be returned
2169 start, size = TosaErrorIfArgGen.eiSliceErrorIf(
2170 testGen, error_name, ifm_shape, start, size
2171 )
2172 arg_list.append(("perm{}".format(p), [start, size]))
2173 return arg_list
2174
2175 @staticmethod
2176 def agTile(testGen, opName, shapeList, dtype, error_name=None):
2177 arg_list = []
2178
2179 ifm_shape = shapeList[0]
2180 rank = len(ifm_shape)
2181
2182 for p in range(testGen.args.num_rand_permutations):
2183
2184 # Pick a few random, but small multiple values
2185 # because otherwise this has a tendency to generate
2186 # enormous tensors
2187 multiples = []
2188 for i in range(rank):
2189 if ifm_shape[i] > 1000:
2190 # Multiple of 1 if ifm_shape dimension is large to reduce
2191 # tensor size
2192 multiples.append(1)
2193 elif max(ifm_shape) > 1000:
2194 multiples.append(2)
2195 else:
2196 multiples.append(testGen.randInt(1, 4))
2197 arg_list.append(("perm{}".format(p), [multiples]))
2198
2199 return arg_list
2200
2201 @staticmethod
2202 def agResize(testGen, opName, shapeList, dtype, error_name=None):
2203 arg_list = []
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002204 ifm_shape = shapeList[0]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002205
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002206 def get_aspect_ratio_resize_params():
2207 common_aspect_ratios = ((3, 2), (16, 9), (4, 3))
2208 aspect_ratio = testGen.rng.choice(common_aspect_ratios)
2209 invert = testGen.rng.choice((False, True))
2210 letterbox = testGen.rng.choice((False, True))
2211
2212 scale_y_n = aspect_ratio[0] if invert else aspect_ratio[1]
2213 scale_x_n = aspect_ratio[1] if invert else aspect_ratio[0]
2214 scale_y_d = scale_x_d = 1
2215 offset_x = offset_y = 0
2216
2217 if letterbox:
2218 max_border = scale_y_n
2219 border_y = testGen.randInt(low=0, high=max_border)
2220 border_x = 0
2221 else:
2222 # Pillarboxing
2223 border_y = 0
2224 max_border = scale_x_n
2225 border_x = testGen.randInt(low=0, high=max_border)
2226
2227 scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
2228 offset = (offset_y, offset_x)
2229 border = (border_y, border_x)
2230
2231 return scale, offset, border
2232
2233 def get_upscale_downscale_params():
2234 valid_params = False
2235 while not valid_params:
2236 upscale = testGen.rng.choice((False, True))
2237
2238 # True if sampling begins from (0,0). Otherwise (-0.5,-0.5)
2239 origin_sampling = testGen.rng.choice((False, True))
2240
2241 if upscale:
2242 shift = testGen.randInt(low=1, high=4)
2243 scale_x_d = scale_y_d = 1
2244 scale_x_n = scale_y_n = (
2245 1 << shift if origin_sampling else 2 << shift
2246 )
2247 border_x = border_y = 0 if origin_sampling else (1 << shift) - 1
2248 offset_x = offset_y = 0 if origin_sampling else -(1 << shift) + 1
2249 else:
2250 scale_x_n = 1
2251 scale_y_n = 1
2252
2253 # Return list of valid scale_*_d values (max value 4) given input dim shape
2254 def get_valid_denom(ifm_dim):
2255 return [x for x in range(1, 5) if ifm_dim % x == 1]
2256
2257 # Generate list of valid downscale values and choose one randomly
2258 valid_scale_y_ds = get_valid_denom(ifm_shape[1])
2259 valid_scale_x_ds = get_valid_denom(ifm_shape[2])
2260
2261 if not valid_scale_y_ds and not valid_scale_x_ds:
2262 # Bad parameters, skip
2263 continue
2264
2265 if not valid_scale_y_ds:
2266 scale_y_d = 1
2267 else:
2268 scale_y_d = testGen.rng.choice(valid_scale_y_ds)
2269
2270 if not valid_scale_x_ds:
2271 scale_x_d = 1
2272 else:
2273 scale_x_d = testGen.rng.choice(valid_scale_x_ds)
2274
2275 border_x = border_y = 0
2276 offset_y = testGen.randInt(0, 16 * scale_y_n)
2277 offset_x = testGen.randInt(0, 16 * scale_x_n)
2278 valid_params = True
2279
2280 scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
2281 offset = (offset_y, offset_x)
2282 border = (border_y, border_x)
2283 return scale, offset, border
2284
2285 def get_rand_params():
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002286 def fix_scale_to_max_scale(scale_n, scale_d, max_scale):
2287 scale = scale_n / scale_d
2288 if scale > max_scale:
2289 factor = scale / max_scale
2290 new_scale_d = math.ceil(scale_d * factor)
2291 assert scale_n / new_scale_d <= max_scale
2292 scale_d = new_scale_d
2293 return scale_d
2294
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002295 # Scale
2296 scale_y_n = testGen.randInt(low=1, high=(1 << 11))
2297 scale_x_n = testGen.randInt(low=1, high=(1 << 11))
2298
2299 scale_y_d = testGen.randInt(low=1, high=(16 * scale_y_n))
2300 scale_x_d = testGen.randInt(low=1, high=(16 * scale_x_n))
2301
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002302 scale_y_d = fix_scale_to_max_scale(
2303 scale_y_n, scale_y_d, testGen.TOSA_8K_LEVEL_MAX_SCALE
2304 )
2305 scale_x_d = fix_scale_to_max_scale(
2306 scale_x_n, scale_x_d, testGen.TOSA_8K_LEVEL_MAX_SCALE
2307 )
2308
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002309 # Offsets and border within the scale
2310 offset_y = testGen.randInt(low=-scale_y_n, high=(16 * scale_y_n))
2311 offset_x = testGen.randInt(low=-scale_x_n, high=(16 * scale_x_n))
2312 border_y = testGen.randInt(low=(-16 * scale_y_n), high=scale_y_n)
2313 border_x = testGen.randInt(low=(-16 * scale_x_n), high=scale_x_n)
2314
2315 scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
2316 offset = (offset_y, offset_x)
2317 border = (border_y, border_x)
2318 return scale, offset, border
2319
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002320 def get_level_8k_params():
2321 # Create 64x scale - 64/1 to 2048/32
2322 scale_d = testGen.randInt(
2323 low=1, high=(1 << 11) / testGen.TOSA_8K_LEVEL_MAX_SCALE
2324 )
2325 scale_n = scale_d * testGen.TOSA_8K_LEVEL_MAX_SCALE
2326 # Create half to fifth scaling
2327 scale_d_alt = testGen.randInt(low=2, high=6)
2328 scale_n_alt = 1
2329 switch = testGen.rng.choice((False, True))
2330 if switch:
2331 scale = (scale_n_alt, scale_d_alt, scale_n, scale_d)
2332 else:
2333 scale = (scale_n, scale_d, scale_n_alt, scale_d_alt)
2334
2335 offset_y = testGen.rng.choice((-scale[0], 0, (16 * scale[0]) - 1))
2336 offset_x = testGen.rng.choice((-scale[2], 0, (16 * scale[2]) - 1))
2337 offset = (offset_y, offset_x)
2338 border_y = testGen.rng.choice((-16 * scale[0], 0, scale[0] - 1))
2339 border_x = testGen.rng.choice((-16 * scale[2], 0, scale[2] - 1))
2340 border = (border_y, border_x)
2341 return scale, offset, border
2342
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002343 for mode in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002344 # Exclude illegal {mode, type} configurations. Pick legal output types
2345 if mode == ResizeMode.NEAREST and dtype == DType.INT8:
2346 outputDTypeList = [DType.INT8]
2347 elif mode == ResizeMode.NEAREST and dtype == DType.INT16:
2348 outputDTypeList = [DType.INT16]
2349 elif mode == ResizeMode.BILINEAR and dtype == DType.INT8:
2350 outputDTypeList = [DType.INT32]
2351 elif mode == ResizeMode.BILINEAR and dtype == DType.INT16:
2352 outputDTypeList = [DType.INT48]
James Ward8b390432022-08-12 20:48:56 +01002353 elif dtype == DType.FP16:
2354 outputDTypeList = [DType.FP16]
James Ward24dbc422022-10-19 12:20:31 +01002355 elif dtype == DType.BF16:
2356 outputDTypeList = [DType.BF16]
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01002357 elif dtype == DType.FP32:
2358 outputDTypeList = [DType.FP32]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002359 elif error_name == ErrorIf.WrongInputType:
2360 # If an incorrect input type is used then we set a 'correct'
2361 # output type to avoid other errors
2362 outputDTypeList = [DType.INT8, DType.INT16, DType.INT32]
2363 else:
2364 continue
2365
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002366 arg_str = "mode{}_out{}_sc{}x{}x{}x{}_off{}x{}_bor{}x{}"
2367
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002368 for outputDType in outputDTypeList:
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002369 perm = 0
2370 while perm < testGen.args.num_rand_permutations:
2371 # Random choice of type of params we are testing
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002372 if not testGen.args.level8k:
2373 _rnd_param_fn = testGen.rng.choice(
2374 (
2375 get_rand_params,
2376 get_upscale_downscale_params,
2377 get_aspect_ratio_resize_params,
2378 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002379 )
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002380 scale, offset, border = _rnd_param_fn()
2381 else:
2382 scale, offset, border = get_level_8k_params()
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002383
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002384 # Expand params for bounds-checking
2385 (scale_y_n, scale_y_d, scale_x_n, scale_x_d) = scale
2386 (offset_y, offset_x) = offset
2387 (border_y, border_x) = border
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002388
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002389 # Make sure output dimensions OH and OW are integers
2390 partial_output_y = (
2391 (ifm_shape[1] - 1) * scale_y_n - offset_y + border_y
2392 )
2393 partial_output_x = (
2394 (ifm_shape[2] - 1) * scale_x_n - offset_x + border_x
2395 )
2396 if error_name == ErrorIf.ResizeOutputShapeNonInteger:
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002397 # Look for non-integer test
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002398 if (
2399 partial_output_y % scale_y_d == 0
2400 and partial_output_x % scale_x_d == 0
2401 ):
2402 # Skip this test as it doesn't produce NonInteger output
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002403 if perm > 0:
2404 perm += 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002405 continue
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002406 else:
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002407 # Alter the scaling factors to make the output integer
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002408 while partial_output_y % scale_y_d != 0:
2409 scale_y_d -= 1
2410 while partial_output_x % scale_x_d != 0:
2411 scale_x_d -= 1
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002412 # Make sure we are still within max scaling
2413 if (
2414 scale_y_n / scale_y_d
2415 ) > testGen.TOSA_8K_LEVEL_MAX_SCALE or (
2416 scale_x_n / scale_x_d
2417 ) > testGen.TOSA_8K_LEVEL_MAX_SCALE:
2418 # Skip the test as it is using too large a scaling factor
2419 if perm > 0:
2420 perm += 1
2421 continue
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002422
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002423 output_y = partial_output_y // scale_y_d + 1
2424 output_x = partial_output_x // scale_x_d + 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002425
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002426 if (
2427 output_y >= testGen.args.max_resize_output_dim
2428 or output_x >= testGen.args.max_resize_output_dim
2429 ) and error_name is None:
2430 # Skip positive test if output dim will be too high
2431 # Avoid high test latency and OOM issues
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002432 if not testGen.args.level8k or perm > 0:
2433 perm += 1
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002434 continue
2435
2436 if (
2437 output_y <= 0
Jeremy Johnson1271c442023-09-05 11:39:26 +01002438 or output_y >= gtu.MAX_RESIZE_DIMENSION
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002439 or output_x <= 0
Jeremy Johnson1271c442023-09-05 11:39:26 +01002440 or output_x >= gtu.MAX_RESIZE_DIMENSION
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002441 ):
2442 # Output dimensions out of scope
2443 if error_name is not None and perm > 0:
2444 # As long as we have one ERROR_IF test, don't worry
2445 # about creating all the other permutations
2446 perm += 1
2447 continue
2448
2449 if error_name == ErrorIf.ResizeOutputShapeMismatch and (
2450 (
Jeremy Johnson1271c442023-09-05 11:39:26 +01002451 output_y + scale_y_d >= gtu.MAX_RESIZE_DIMENSION
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002452 and output_y - scale_y_d < 1
2453 )
2454 or (
Jeremy Johnson1271c442023-09-05 11:39:26 +01002455 output_x + scale_x_d >= gtu.MAX_RESIZE_DIMENSION
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002456 and output_x - scale_x_d < 1
2457 )
2458 ):
2459 # Can't create a negative test with these params as it
2460 # will create invalid output size
2461 if perm > 0:
2462 perm += 1
2463 continue
2464
2465 scale = [scale_y_n, scale_y_d, scale_x_n, scale_x_d]
2466 offset = [offset_y, offset_x]
2467 border = [border_y, border_x]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002468
2469 # Common for all data types
2470 if error_name is not None:
2471 (
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002472 scale,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002473 offset,
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002474 border,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002475 outputDTypeNew,
2476 ) = TosaErrorIfArgGen.eiResizeErrorIf(
2477 testGen,
2478 error_name,
2479 mode,
2480 dtype,
2481 shapeList,
2482 outputDType,
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002483 scale,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002484 offset,
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002485 border,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002486 )
2487 else:
2488 outputDTypeNew = outputDType
2489
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002490 arg_to_append = (
2491 arg_str.format(
2492 "N" if mode == ResizeMode.NEAREST else "B",
2493 testGen.typeStr(outputDTypeNew),
2494 scale[0],
2495 scale[1],
2496 scale[2],
2497 scale[3],
2498 offset[0],
2499 offset[1],
2500 border[0],
2501 border[1],
2502 ),
2503 [
2504 mode,
2505 scale,
2506 offset,
2507 border,
2508 dtype,
2509 outputDTypeNew,
2510 ],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002511 )
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002512 if arg_to_append in arg_list:
2513 # Skip already generated test params
2514 continue
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002515
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002516 # Valid permutation
2517 perm += 1
2518 arg_list.append(arg_to_append)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002519 return arg_list
2520
2521 @staticmethod
2522 def agTable(testGen, opName, shapeList, dtype, error_name=None):
2523 arg_list = []
2524
2525 if dtype == DType.INT8:
2526 table = np.int32(
2527 testGen.rng.integers(low=-128, high=128, size=[256])
2528 ).tolist()
2529 else: # INT16
2530 table = np.int32(
2531 testGen.rng.integers(low=-32768, high=32768, size=[513])
2532 ).tolist()
Jerry Ged511f9e2022-08-12 16:12:40 -07002533 # Make sure all slopes are within REQUIRE min/max 16-bit int
2534 for idx in range(len(table) - 1):
2535 slope = table[idx + 1] - table[idx]
2536 # Alter the next table entry to force the slope to be ok
2537 if slope > 32767:
2538 table[idx + 1] -= slope - 32767
2539 if slope < -32768:
2540 table[idx + 1] -= slope + 32768
2541 slope = table[idx + 1] - table[idx]
2542 assert slope <= 32767 and slope >= -32768
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002543 arg_list.append(
2544 (
2545 "",
2546 [table],
2547 )
2548 )
2549 return arg_list
2550
2551 def agCondIf(testGen, opName, shapeList, dtype, error_name=None):
2552 # CondIf generates the condition values here.
2553 # Convert to tensors in the build function, along with the
2554 # then and else blocks
2555 arg_list = []
2556
2557 for c in [False, True]:
2558 arg_list.append(("cond{}".format(int(c)), [c]))
2559
2560 return arg_list
2561
2562 def agWhileLoop(testGen, opName, shapeList, dtype, error_name=None):
2563 # While loop: 0 iterations, 1, more than 1
2564 arg_list = []
2565
2566 for iter in [0, 1, 4]:
2567 arg_list.append(("iter{}".format(iter), [iter]))
2568
2569 return arg_list