blob: 2bbc349a2c12b135310789a534a4bc029b7f45ce [file] [log] [blame]
Luke Hutton261b7b62023-01-10 14:50:31 +00001# Copyright (c) 2021-2023, ARM Limited.
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002# SPDX-License-Identifier: Apache-2.0
3import itertools
4import math
James Ward8b390432022-08-12 20:48:56 +01005import warnings
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01006
7import numpy as np
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01008from generator.tosa_error_if import ErrorIf
9from generator.tosa_error_if import TosaErrorIfArgGen
James Ward8b390432022-08-12 20:48:56 +010010from generator.tosa_utils import get_accum_dtype_from_tgTypes
11from generator.tosa_utils import get_wrong_output_type
Jeremy Johnsona0e03f32022-06-13 17:48:09 +010012from generator.tosa_utils import MAX_RESIZE_DIMENSION
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010013from serializer.tosa_serializer import DTypeNames
14from tosa.DType import DType
15from tosa.Op import Op
16from tosa.ResizeMode import ResizeMode
17
18# DTypeNames, DType, Op and ResizeMode are convenience variables to the
19# flatc-generated types that should be enums, but aren't
20
21
22class TosaQuantGen:
23 """QuantizedInfo random generator helper functions.
24
25 Specify with 'qgen': in the operator defintion.
26 """
27
28 def __init__(self):
29 pass
30
31 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +000032 def getZeroPoint(testGen, dtype, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010033
34 if dtype == DType.INT8:
Jeremy Johnson00423432022-09-12 17:27:37 +010035 if testGen.args.zeropoint is not None:
36 return min(127, max(-128, testGen.args.zeropoint))
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010037 return testGen.randInt(-128, 128)
38 elif dtype == DType.UINT8:
Jeremy Johnson00423432022-09-12 17:27:37 +010039 if testGen.args.zeropoint is not None:
40 return min(255, max(0, testGen.args.zeropoint))
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010041 return testGen.randInt(0, 256)
42 elif error_name in [
43 ErrorIf.InputZeroPointNotZero,
44 ErrorIf.WeightZeroPointNotZero,
45 ErrorIf.OutputZeroPointNotZero,
46 ]:
47 zero_point = testGen.randInt(-128, 128)
48 if zero_point == 0:
49 zero_point = 1
50 return zero_point
51 return 0
52
53 @staticmethod
54 def qgUnary(testGen, op, dtype, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010055 if error_name == ErrorIf.InputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000056 qinfo = [
57 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
58 TosaQuantGen.getZeroPoint(testGen, dtype),
59 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010060 elif error_name == ErrorIf.OutputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000061 qinfo = [
62 TosaQuantGen.getZeroPoint(testGen, dtype),
63 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
64 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010065 else:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000066 qinfo = [
67 TosaQuantGen.getZeroPoint(testGen, dtype),
68 TosaQuantGen.getZeroPoint(testGen, dtype),
69 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010070 return qinfo
71
72 @staticmethod
73 def qgConv(testGen, op, dtype_or_dtypeList, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010074 if isinstance(dtype_or_dtypeList, list):
75 # a list of [input, weights, accumulator] dtypes
76 dtypeList = dtype_or_dtypeList
77 else:
78 # an int, [input, weights, accumulator] dtypes are the same
79 dtypeList = [dtype_or_dtypeList] * 3
80
81 if error_name == ErrorIf.InputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000082 qinfo = [
83 TosaQuantGen.getZeroPoint(testGen, dtypeList[0], error_name),
84 TosaQuantGen.getZeroPoint(testGen, dtypeList[1]),
85 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010086 elif error_name == ErrorIf.WeightZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000087 qinfo = [
88 TosaQuantGen.getZeroPoint(testGen, dtypeList[0]),
89 TosaQuantGen.getZeroPoint(testGen, dtypeList[1], error_name),
90 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010091 else:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000092 qinfo = [
93 TosaQuantGen.getZeroPoint(testGen, dtypeList[0]),
94 TosaQuantGen.getZeroPoint(testGen, dtypeList[1]),
95 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010096 return qinfo
97
98 @staticmethod
99 def qgMatmul(testGen, op, dtype, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100100 if error_name == ErrorIf.InputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000101 qinfo = [
102 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
103 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
104 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100105 else:
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000106 qinfo = [
107 TosaQuantGen.getZeroPoint(testGen, dtype),
108 TosaQuantGen.getZeroPoint(testGen, dtype),
109 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100110 return qinfo
111
112 @staticmethod
113 def computeMultiplierAndShift(scaleFp, scale32):
114 # Derived from computeMultiplierAndShiftTosaScale32
115 # Provide a floating-point scaling factor and the scale32 parameter
116 # to compute the multiplier and shift
117
118 if scale32:
119 scaleBits = 31
120 else:
121 scaleBits = 15
122
123 m, shift = math.frexp(scaleFp)
124
125 if scaleFp < 0.0:
126 m = -m
127
128 multiplier = round(m * (1 << scaleBits))
129 assert multiplier <= (1 << scaleBits)
130
131 if multiplier == (1 << scaleBits):
132 multiplier = multiplier // 2
133 shift = shift + 1
134
135 shift = (-shift) + scaleBits
136 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(
137 # scaleFp, scaleBits, m, multiplier, shift))
138
139 # Adjust multiplier such that shift is in allowed value range.
140 if shift == 0:
141 multiplier = multiplier // 4
142 shift = shift + 2
143 elif shift == 1:
144 multiplier = multiplier // 2
145 shift = shift + 1
146 elif shift == 63:
147 multiplier = multiplier * 2
148 shift = shift - 1
149
150 assert multiplier <= (1 << scaleBits)
151 assert shift >= 2 and shift <= 62
152
153 return multiplier, shift
154
155
156class TosaTensorGen:
157 """Tensor generators create a shape list for the placeholder and const tensor
158 data operands for the operator.
159
160 The actual random data is generated separately for each test.
161 """
162
163 def __init__(self):
164 pass
165
166 @staticmethod
167 def tgBasic(testGen, opName, rank, error_name=None):
168 pl, const = opName["operands"]
169 shape = testGen.makeShape(rank)
170
171 # Constrict the overall size of the shape when creating ERROR_IF tests
172 if error_name:
173 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
174
175 shape_list = []
176 for i in range(pl + const):
177 shape_list.append(shape.copy())
178
Luke Huttona4e48ca2023-02-22 11:53:48 +0000179 # Generates an input rank mismatch for operators with more than one input
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100180 if error_name == ErrorIf.RankMismatch:
181 if rank == 1 and i != 1:
182 shape = testGen.makeShape(rank + testGen.rng.choice([1, 2, 3]))
183 elif i != 1:
184 shape = testGen.makeShape(rank + testGen.rng.choice([-1, 1]))
185
186 return shape_list
187
188 @staticmethod
189 def tgNHWC(testGen, opName, rank, error_name=None):
190 pl, const = opName["operands"]
191
192 if error_name != ErrorIf.WrongRank:
193 assert rank == 4
194
195 shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000196 shape = testGen.constrictBatchSize(shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100197
198 # Constrict the overall size of the shape when creating ERROR_IF tests
199 if error_name and error_name != ErrorIf.MaxDimExceeded:
200 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
201
202 shape_list = []
203 for i in range(pl + const):
204 shape_list.append(shape.copy())
205
206 return shape_list
207
208 @staticmethod
209 def tgScatter(testGen, opName, rank, error_name=None):
210 pl, const = opName["operands"]
211
212 assert pl == 2
213 assert const == 0
214 if error_name != ErrorIf.WrongRank:
215 assert rank == 3
216
217 values_in_shape = testGen.makeShape(rank)
218
219 # ignore max batch size if target shape is set
220 if testGen.args.max_batch_size and not testGen.args.target_shapes:
James Ward30124a82023-02-02 14:56:33 +0000221 values_in_shape[0] = min(values_in_shape[0], testGen.args.max_batch_size)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100222
223 W = testGen.randInt(
224 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
225 )
226 # Constrict W if one dimension is too large to keep tensor size reasonable
227 if max(values_in_shape) > 5000:
228 W = testGen.randInt(0, 16)
229
230 input_shape = [values_in_shape[0], W, values_in_shape[2]]
231
232 shape_list = []
233 shape_list.append(values_in_shape.copy())
234 shape_list.append(input_shape.copy())
235
236 return shape_list
237
238 @staticmethod
239 def tgBroadcastFuzz(testGen, op, rank, error_name=None):
240 shape = testGen.makeShape(rank)
241
242 pl, const = op["operands"]
243
244 shape_list = []
245
246 # Choose one of the inputs to broadcast
247 # Note: Simplifies OutputShaper code if we don't change first shape for errors
248 bcast_idx = testGen.randInt(0 if error_name is None else 1, pl + const)
249 for i in range(pl + const):
250 shape_bcast = shape.copy()
251
252 # If the chosen input, pick a random index to broadcast
253 if i == bcast_idx:
254 fuzz_idx = testGen.randInt(0, rank)
255 if error_name == ErrorIf.DimensionMismatch:
256 shape_bcast[fuzz_idx] += 1
257 elif error_name == ErrorIf.RankMismatch:
258 # Add one rank to the shape (or more for rank of 1)
259 extra_ranks = testGen.rng.choice([1, 2, 3]) if rank == 1 else 1
260 shape_bcast = np.concatenate(
261 (shape_bcast, testGen.makeShape(extra_ranks))
262 )
263 if rank != 1:
264 # Either keep the extra rank, or remove it
265 new_len = testGen.rng.choice([-2, len(shape_bcast)])
266 shape_bcast = shape_bcast[:new_len]
267 else:
268 shape_bcast[fuzz_idx] = 1
269
270 shape_list.append(shape_bcast)
271
272 return shape_list
273
274 @staticmethod
275 def tgConv2D(testGen, op, rank, error_name=None):
276 pl, const = op["operands"]
277
278 if error_name != ErrorIf.WrongRank:
279 assert rank == 4
280
281 # IFM dimensions are NHWC
282 ifm_shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000283 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100284
285 # Constrict the overall size of the shape when creating ERROR_IF tests
286 if error_name:
287 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
288 ifm_shape, max_dim=24, max_items=10000
289 )
290
291 # Get the filter height/width from the operator parameters
292 filter_hw = op["filter"]
293
294 # Generate a random OFM depth
James Ward30124a82023-02-02 14:56:33 +0000295 ofm_depth = testGen.makeDimension()
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100296
297 # The filter dimensions are OHWI
298 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
299
300 # The bias is OC
301 bias_shape = np.asarray([ofm_depth])
302
303 return [ifm_shape, filter_shape, bias_shape]
304
305 @staticmethod
306 def tgConv3D(testGen, op, rank, error_name=None):
307 pl, const = op["operands"]
308
309 if error_name != ErrorIf.WrongRank:
310 assert rank == 5
311
312 # IFM dimensions are NDHWC
313 ifm_shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000314 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100315
316 # Constrict the overall size of the shape when creating ERROR_IF tests
317 if error_name:
318 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
319 ifm_shape, max_dim=24, max_items=10000
320 )
321
322 # Get the filter depth/height/width from the operator parameters
323 filter_dhw = op["filter"]
324
325 # Generate a random OFM channel
James Ward30124a82023-02-02 14:56:33 +0000326 ofm_channel = testGen.makeDimension()
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100327
328 # The filter dimensions are ODHWI
329 filter_shape = np.asarray(
330 [ofm_channel, filter_dhw[0], filter_dhw[1], filter_dhw[2], ifm_shape[4]]
331 )
332
333 # The bias is OC
334 bias_shape = np.asarray([ofm_channel])
335
336 return [ifm_shape, filter_shape, bias_shape]
337
338 @staticmethod
339 def tgTransposeConv2D(testGen, op, rank, error_name=None):
340 pl, const = op["operands"]
341
342 if error_name != ErrorIf.WrongRank:
343 assert rank == 4
344
345 # IFM dimensions are NHWC
346 ifm_shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000347 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100348
349 # Constrict the overall size of the shape when creating ERROR_IF tests
350 if error_name:
351 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
352 ifm_shape, max_dim=24, max_items=10000
353 )
354
355 # Get the filter height/width from the operator parameters
356 filter_hw = op["filter"]
357
358 # Generate a random OFM depth
James Ward30124a82023-02-02 14:56:33 +0000359 ofm_depth = testGen.makeDimension()
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100360
361 # The filter dimensions are OHWI
362 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
363
364 # The bias is OC
365 bias_shape = np.asarray([ofm_depth])
366
367 return [ifm_shape, filter_shape, bias_shape]
368
369 @staticmethod
370 def tgDepthwiseConv2D(testGen, op, rank, error_name=None):
371 pl, const = op["operands"]
372
373 if error_name != ErrorIf.WrongRank:
374 assert rank == 4
375 assert pl == 1 and const == 2
376
377 # IFM dimensions are NHWC
378 ifm_shape = testGen.makeShape(rank)
James Ward30124a82023-02-02 14:56:33 +0000379 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100380
381 # Constrict the overall size of the shape when creating ERROR_IF tests
382 if error_name:
383 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
384 ifm_shape, max_dim=24, max_items=10000
385 )
386
387 # Get the filter height/width from the operator parameters
388 # Filter is KH, HW, C, M
389 filter_hw = op["filter"]
390
391 # Generate a random OFM depth, but don't let it get too big because
392 # the output depth is M * C
393 filter_m = (
James Ward30124a82023-02-02 14:56:33 +0000394 testGen.makeDimension() % (testGen.args.tensor_shape_range[1] // 4)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100395 ) + 1
396
397 # The filter dimensions are HWCM
398 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
399
400 # The bias is M * C
401 bias_shape = np.asarray([ifm_shape[3] * filter_m])
402
403 return [ifm_shape, filter_shape, bias_shape]
404
405 @staticmethod
Luke Hutton57287132023-02-06 14:54:18 +0000406 def tgFFT2d(testGen, op, rank, error_name=None):
407 pl, const = op["operands"]
408
409 if error_name != ErrorIf.WrongRank:
410 assert rank == 3
411 assert pl == 2 and const == 0
412
413 # IFM dimensions are NHW
414 ifm_shape = testGen.makeShape(rank)
415
416 # Select nearest lower power of two from input height and width
417 ifm_shape[1] = 2 ** int(math.log(ifm_shape[1], 2))
418 ifm_shape[2] = 2 ** int(math.log(ifm_shape[2], 2))
419
420 # Constrict the overall size of the shape when creating ERROR_IF tests
421 if error_name:
422 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(ifm_shape)
423
424 # Generate an invalid kernel that is not a power of two
425 if error_name == ErrorIf.KernelNotPowerOfTwo:
426 inc_h = 2 if ifm_shape[1] == 1 else 1
427 inc_w = 2 if ifm_shape[2] == 1 else 1
428 inc_choices = [(inc_h, 0), (0, inc_w), (inc_h, inc_w)]
429 selected_inc = testGen.rng.choice(inc_choices)
430 ifm_shape[1] += selected_inc[0]
431 ifm_shape[2] += selected_inc[1]
432
433 ifm_shape = testGen.constrictBatchSize(ifm_shape)
434
435 ifm_shapes = [ifm_shape.copy(), ifm_shape.copy()]
436 if error_name == ErrorIf.FFTInputShapeMismatch:
437 modify_shape = testGen.rng.choice([0, 1])
438 # Only modify kernel (H, W)
439 modify_dim = testGen.rng.choice([1, 2])
440 ifm_shapes[modify_shape][modify_dim] *= 2
441
442 return [ifm_shapes[0], ifm_shapes[1]]
443
444 @staticmethod
Luke Hutton261b7b62023-01-10 14:50:31 +0000445 def tgRFFT2d(testGen, op, rank, error_name=None):
446 pl, const = op["operands"]
447
448 if error_name != ErrorIf.WrongRank:
449 assert rank == 3
450 assert pl == 1 and const == 0
451
452 # IFM dimensions are NHW
453 ifm_shape = testGen.makeShape(rank)
454
455 # Select nearest lower power of two from input height and width
456 ifm_shape[1] = 2 ** int(math.log(ifm_shape[1], 2))
457 ifm_shape[2] = 2 ** int(math.log(ifm_shape[2], 2))
458
459 # Constrict the overall size of the shape when creating ERROR_IF tests
460 if error_name:
461 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(ifm_shape)
462
463 # Generate an invalid kernel that is not a power of two
464 if error_name == ErrorIf.KernelNotPowerOfTwo:
465 # We must increment by 2 if current size is 1
466 inc_h = 2 if ifm_shape[1] == 1 else 1
467 inc_w = 2 if ifm_shape[2] == 1 else 1
468 inc_choices = [(inc_h, 0), (0, inc_w), (inc_h, inc_w)]
469 selected_inc = testGen.rng.choice(inc_choices)
470 ifm_shape[1] += selected_inc[0]
471 ifm_shape[2] += selected_inc[1]
472
James Ward30124a82023-02-02 14:56:33 +0000473 ifm_shape = testGen.constrictBatchSize(ifm_shape)
Luke Hutton261b7b62023-01-10 14:50:31 +0000474
475 return [ifm_shape]
476
477 @staticmethod
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100478 def tgFullyConnected(testGen, op, rank, error_name=None):
479 pl, const = op["operands"]
480
481 if error_name != ErrorIf.WrongRank:
482 assert rank == 2
483
484 input_shape = testGen.makeShape(rank)
485
486 # Constrict the overall size of the shape when creating ERROR_IF tests
487 if error_name:
488 input_shape = TosaErrorIfArgGen.eiRestrictDimensions(input_shape)
489
490 filter_oc = testGen.rng.integers(
491 low=testGen.args.tensor_shape_range[0],
492 high=testGen.args.tensor_shape_range[1],
493 size=1,
494 )[0]
495 filter_shape = np.asarray([filter_oc, input_shape[1]])
496
497 bias_shape = np.asarray([filter_oc])
498
499 return [input_shape, filter_shape, bias_shape]
500
501 @staticmethod
502 def tgMatmul(testGen, op, rank, error_name=None):
503 pl, const = op["operands"]
504
505 if error_name != ErrorIf.WrongRank:
506 assert rank == 3
507 assert pl == 2 and const == 0
508
509 a_shape = testGen.makeShape(rank)
510
511 # Constrict the overall size of the shape when creating ERROR_IF tests
512 if error_name:
513 a_shape = TosaErrorIfArgGen.eiRestrictDimensions(a_shape)
514
515 # Get a random number for b_oc even if target shape is defined
516 b_oc = np.int32(
517 testGen.rng.integers(
518 low=testGen.args.tensor_shape_range[0],
519 high=testGen.args.tensor_shape_range[1],
520 size=1,
521 )
522 )[0]
523 # If N or H is large let b_oc be 1 to reduce output tensor size
524 if max(a_shape) > 1000:
525 b_oc = 1
526
527 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
528 return [a_shape, b_shape]
529
530 @staticmethod
531 def tgConcat(testGen, opName, rank, error_name=None):
532 pl, const = opName["operands"]
533 shape = testGen.makeShape(rank)
534
535 # Create extra tensors to concat.
536 # Take into account value of pl when getting maximum number of concats
537 num_tensors = testGen.randInt(0, 4)
538 shape_list = []
539 for i in range(pl + const + num_tensors):
540 if error_name == ErrorIf.ConcatInputRankMismatch and i != 0:
541 remove = testGen.rng.choice([True, False])
542 wrongShape = shape.copy()
543
544 if remove and len(shape) > 1:
545 wrongShape = wrongShape[1:]
546 else:
547 wrongShape = list(wrongShape)
548 wrongShape.append(testGen.rng.integers(1, 10))
549
550 shape_list.append(wrongShape)
551 else:
552 shape_list.append(shape.copy())
553
554 return shape_list
555
556 @staticmethod
557 def tgConcatConstInput(testGen, shapeList, axis, error_name=None):
558 if error_name in [
559 ErrorIf.AxisSmallerZero,
560 ErrorIf.AxisLargerRank,
561 ErrorIf.ConcatInputRankMismatch,
562 ]:
563 return shapeList
564
565 # Split concat shape along axis to allow for multiple const inputs
566 # without making too many large tensors
567 if len(shapeList) == 2 or shapeList[0][axis] < len(shapeList):
568 # If axis can't be split we still need to invalidate other dimensions
569 if error_name == ErrorIf.ConcatInputDimMismatch:
570 for shape in shapeList[1:]:
571 # Negative test shapeLists are created individually for each test,
572 # so no need to copy the shape before altering it.
573 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
574 return shapeList
575
576 # Create copy of shape we are going to split (so we don't alter shapeList)
577 shape = shapeList[0].copy()
578 # Add original shape as first input
579 new_shapeList = [shape.copy()]
580 length_on_axis = shape[axis]
581 remaining_length = length_on_axis
582 for i in range(len(shapeList) - 2):
583 # Calculate split on axis and remaining value
584 split_shape_val = int(shape[axis] / 2)
585 remaining_length = remaining_length - split_shape_val
586
587 # Append new shape, and set remaining shape
588 shape[axis] = split_shape_val
589 new_shapeList.append(shape.copy())
590
591 # invalidate dimensions
592 if error_name == ErrorIf.ConcatInputDimMismatch:
593 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
594 else:
595 shape[axis] = remaining_length
596
597 if i == len(shapeList) - 3:
598 new_shapeList.append(shape.copy())
599
600 return new_shapeList
601
602
603class TosaTensorValuesGen:
604 """Tensor Value generators create the random data for each test."""
605
606 def __init__(self):
607 pass
608
609 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000610 def tvgDefault(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100611 pCount, cCount = op["operands"]
612
613 tens = []
614 tens.extend(
615 testGen.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
616 )
617 tens.extend(testGen.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
618
619 return tens
620
621 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000622 def tvgNegate(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson0e463642022-05-03 12:10:23 +0100623 if dtypeList[0] == DType.INT32 and error_name is None:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100624 pCount, cCount = op["operands"]
625 assert (
626 pCount == 1 and cCount == 0
627 ), "Op.NEGATE must have 1 placeholders, 0 consts"
Jeremy Johnson0e463642022-05-03 12:10:23 +0100628 # Must create tensors with values within accumulator (int32) negatable
629 # range
630 max_val = (1 << 31) - 1
631 min_val = -max_val
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100632 arr = np.int32(
633 testGen.rng.integers(low=min_val, high=(max_val + 1), size=shapeList[0])
634 )
635 placeholders = []
636 placeholders.append(
637 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], arr)
638 )
639 return placeholders
640 else:
641 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000642 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100643 )
644
645 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000646 def tvgAddSub(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100647 if dtypeList[0] == DType.INT32 and error_name is None:
648 # Make sure the operation does not cause value saturation - where
649 # the number wraps due to limited number of bits to store the answer
650 pCount, cCount = op["operands"]
651 assert (
652 pCount == 2 and cCount == 0
653 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts"
654 placeholders = []
655 add = op["op"] == Op.ADD
656 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
657 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
658 if add:
659 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
660 else:
661 res_arr = np.subtract(a_arr, b_arr, dtype=np.int64)
662
663 # Work out the saturation limits
664 max_i32 = (1 << 31) - 1
665 min_i32 = -(1 << 31)
666 max_arr = np.full(shapeList[1], max_i32)
667 min_arr = np.full(shapeList[1], min_i32)
668
669 # Find how much values exceed the maximum/minimums
670 sat_max_arr = np.maximum(res_arr - max_arr, 0)
671 sat_min_arr = np.minimum(res_arr - min_arr, 0)
672
673 if not add:
674 # Swap saturation values and negate values as we need to perform opposite operations
675 sat_max_arr, sat_min_arr = -sat_min_arr, -sat_max_arr
676
677 # Create new array of unsaturated values by clipping values as needed
678 b_unsat_arr = b_arr
679 if (sat_max_arr != 0).any():
680 # Clip values that cause saturation
681 b_unsat_arr = np.subtract(b_unsat_arr, sat_max_arr, dtype=np.int32)
682 # Reduce axes in unsaturated tensor to match original tensor
683 for axis, dim in enumerate(b_arr.shape):
684 if dim != b_unsat_arr.shape[axis]:
685 assert (
686 dim == 1
687 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
688 b_unsat_arr = np.amin(b_unsat_arr, axis=axis, keepdims=True)
689
690 if (sat_min_arr != 0).any():
691 # Clip values that cause saturation
692 b_unsat_arr = np.subtract(b_unsat_arr, sat_min_arr, dtype=np.int32)
693 # Reduce axes in unsaturated tensor to match original tensor
694 for axis, dim in enumerate(b_arr.shape):
695 if dim != b_unsat_arr.shape[axis]:
696 assert (
697 dim == 1
698 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
699 b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True)
700
701 placeholders.append(
702 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
703 )
704 placeholders.append(
705 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr)
706 )
707
708 return placeholders
709 else:
710 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000711 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100712 )
713
714 @staticmethod
715 def tvgCondIfWhileLoop(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000716 testGen, op, dtypeList, shapeList, testArgs, error_name=None
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100717 ):
718 if dtypeList[0] in (
719 DType.INT32,
720 DType.INT16,
721 DType.INT8,
722 ):
723 # Limit input tensors with cond_if_binary or while_loop to stop
724 # saturation of add/sub ops with int32 and keep all logical shift
725 # values between 0 to 31 for int16 or int8
726 pCount, cCount = op["operands"]
727 pRemain = pCount
728 placeholders = []
729 for idx, shape in enumerate(shapeList[:]):
730 if dtypeList[0] == DType.INT32:
731 arr = testGen.getRandTensor(shapeList[idx], DType.INT16)
732 else:
733 arr = np.int32(
734 testGen.rng.integers(low=0, high=32, size=shapeList[idx])
735 )
736 if pRemain > 0:
737 placeholders.append(
738 testGen.ser.addPlaceholder(shape, dtypeList[idx], arr)
739 )
740 pRemain -= 1
741 else:
742 placeholders.append(
743 testGen.ser.addConst(shape, dtypeList[idx], arr)
744 )
745
746 return placeholders
747 else:
748 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000749 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100750 )
751
752 @staticmethod
753 def tvgArithmeticRightShift(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000754 testGen, op, dtypeList, shapeList, testArgs, error_name=None
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100755 ):
756 pCount, cCount = op["operands"]
757 # Force value of operand[1] to be within [0, num_bits]
758 assert (
759 pCount == 2 and cCount == 0
760 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
761
762 placeholders = []
763 for idx, shape in enumerate(shapeList[:]):
764 if idx == 1:
765 if dtypeList[idx] == DType.INT8:
766 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
767 elif dtypeList[idx] == DType.INT16:
768 arr = np.int32(testGen.rng.integers(low=0, high=16, size=shape))
769 elif dtypeList[idx] == DType.INT32:
770 arr = np.int32(testGen.rng.integers(low=0, high=32, size=shape))
771 elif error_name == ErrorIf.WrongInputType:
772 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
773 else:
774 raise Exception("OpArithmeticRightShift: invalid input dtype")
775 else:
776 arr = testGen.getRandTensor(shape, dtypeList[idx])
777 placeholders.append(testGen.ser.addPlaceholder(shape, dtypeList[idx], arr))
778
779 return placeholders
780
781 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000782 def tvgSelect(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100783 # Set datatype of condition tensor to boolean
784 dtypeList[0] = DType.BOOL
785
786 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000787 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100788 )
789
790 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000791 def tvgIntDiv(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100792 if error_name is None:
793 pCount, cCount = op["operands"]
794 assert (
795 pCount == 2 and cCount == 0
796 ), "Op.INTDIV must have 2 placeholders, 0 consts"
797
798 placeholders = []
799
800 # Two invalid cases for Op.INTDIV:
801 # 1. divisor == 0
802 # 2. dividend == -(1<<31) and divisor == -1
803 while True:
804 dividend_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
805 divisor_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
806
807 if (divisor_arr == 0).any():
808 continue
809
810 if (dividend_arr == -(2**31)).any() and (divisor_arr == -1).any():
811 continue
812
813 break
814
815 placeholders.append(
816 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
817 )
818 placeholders.append(
819 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
820 )
821
822 return placeholders
823 else:
824 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000825 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100826 )
827
828 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000829 def tvgMul(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100830 if error_name is None:
831 pCount, cCount = op["operands"]
832 assert (
833 pCount == 2 and cCount == 0
834 ), "Op.MUL must have 2 placeholders, 0 consts"
835
836 tens = []
James Ward24dbc422022-10-19 12:20:31 +0100837 if dtypeList[0] in (DType.FP16, DType.BF16, DType.FP32):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100838 tens.extend(testGen.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
839 else:
840 placeholders = []
841
842 # Make sure multiply result in int32 range
843 shift = testArgs[0]
844 if dtypeList[0] == DType.INT8:
845 num_bits = 8
846 elif dtypeList[0] == DType.INT16:
847 num_bits = 16
848 elif dtypeList[0] == DType.INT32:
849 num_bits = 32
850 elif error_name == ErrorIf.WrongInputType:
851 num_bits = 8
852 else:
853 raise Exception("OpMul: invalid input dtype")
854
855 for idx, shape in enumerate(shapeList[:]):
856 low = -(2 ** (num_bits - 1))
857 high = (2 ** (num_bits - 1)) - 1
858
859 a_arr = np.int32(
860 testGen.rng.integers(low=low, high=high, size=shapeList[0])
861 )
862 b_arr = np.int32(
863 testGen.rng.integers(low=low, high=high, size=shapeList[1])
864 )
865
866 i = 0
867 while True:
868
869 a_arr_64 = a_arr.astype(np.int64)
870 b_arr_64 = b_arr.astype(np.int64)
871
872 if shift > 0:
873 rounding = 1 << (shift - 1)
874 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
875 else:
876 result_arr = a_arr_64 * b_arr_64
877
878 if (result_arr > -(2**31)).all() and (
879 result_arr <= ((2**31) - 1)
880 ).all():
881 break
882
883 i = i + 1
884 a_arr = a_arr // 2
885 b_arr = b_arr // 2
886
887 placeholders.append(
888 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
889 )
890 placeholders.append(
891 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
892 )
893
894 tens.extend(placeholders)
895
896 return tens
897 else:
898 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000899 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100900 )
901
902 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000903 def tvgConcat(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100904 count = len(shapeList) - testGen.args.num_const_inputs_concat
905 if count < 1:
906 count = 1
907 if testGen.args.num_const_inputs_concat == 0:
908 count = len(shapeList)
909
910 # Ensure axis is an int
911 testArgs[0] = int(testArgs[0])
912
913 shapeList = TosaTensorGen.tgConcatConstInput(
914 testGen, shapeList, testArgs[0], error_name
915 )
916
917 tens = []
918 tens.extend(
919 testGen.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
920 )
921 tens.extend(testGen.buildConstTensors(shapeList[count:], dtypeList[count:]))
922
923 return tens
924
925 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000926 def tvgLogicalShift(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100927 pCount, cCount = op["operands"]
928 assert (
929 pCount == 2 and cCount == 0
930 ), "Op.LOGICAL_LEFT_SHIFT or Op.LOGICAL_RIGHT_SHIFT must have 2 placeholders, 0 consts"
931 values_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
932 shift_arr = np.int32(testGen.rng.integers(low=0, high=32, size=shapeList[1]))
933 placeholders = []
934 placeholders.append(
935 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
936 )
937 placeholders.append(
938 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], shift_arr)
939 )
940
941 return placeholders
942
943 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000944 def tvgEqual(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100945 if error_name is None:
946 pCount, cCount = op["operands"]
947 assert (
948 pCount == 2 and cCount == 0
949 ), "Op.EQUAL must have 2 placeholders, 0 consts"
950 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
951 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
952 # Using random numbers means that it will be very unlikely that
953 # there are any matching (equal) values, therefore force that
954 # there are twice the number of matching values as the tensor rank
955 for num in range(0, len(shapeList[0]) * 2):
956 a_index = []
957 b_index = []
958 # Choose an index in each axis for the whole shape
959 for axis in range(0, len(shapeList[0])):
960 # Index can be up to the largest dimension in both shapes
961 index = np.int32(
962 testGen.rng.integers(
963 0, max(shapeList[0][axis], shapeList[1][axis])
964 )
965 )
966 # Reduce the index down to a shape's dim for broadcasting
967 a_index.append(min(shapeList[0][axis] - 1, index))
968 b_index.append(min(shapeList[1][axis] - 1, index))
969
970 a_arr[tuple(a_index)] = b_arr[tuple(b_index)]
971
972 placeholders = []
973 placeholders.append(
974 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
975 )
976 placeholders.append(
977 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
978 )
979 return placeholders
980 else:
981 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000982 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100983 )
984
985 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000986 def tvgReduceSum(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100987 if dtypeList[0] == DType.INT32:
988 pCount, cCount = op["operands"]
989 assert (
990 pCount == 1 and cCount == 0
991 ), "Op.REDUCE_SUM must have 1 placeholders, 0 consts"
992 # Limit values so that the sum cannot exceed the range of an int32 during
993 # summation of any axis
994 range_val = int((1 << 31) / max(shapeList[0]))
995 values_arr = np.int32(
996 testGen.rng.integers(low=-range_val, high=range_val, size=shapeList[0])
997 )
998 placeholders = []
999 placeholders.append(
1000 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
1001 )
1002 return placeholders
1003 else:
1004 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +00001005 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001006 )
1007
1008
1009class TosaArgGen:
1010 """Argument generators create exhaustive or random lists of attributes for
1011 operators that take attributes or other parameters.
1012
1013 The return value is a list of (descriptive_name, [arglist]) tuples where
1014 the descriptive_name is appended to the test name and the arglist is expanded
1015 as arguments to the operator build function.
1016 """
1017
1018 def __init__(self):
1019 pass
1020
1021 @staticmethod
1022 def agNone(testGen, opName, shapeList, dtype, error_name=None):
1023 """A trivial argument generator for operators that don't take any
1024 non-tensor arguments"""
1025 return [("", [])]
1026
1027 @staticmethod
1028 def agAxis(testGen, opName, shapeList, dtype, error_name=None):
1029 """Build the axis argument for operators that take a single axis"""
1030 axes = []
1031 shape = shapeList[0]
1032
1033 if error_name == ErrorIf.AxisSmallerZero:
1034 small_axis = testGen.rng.integers(-5, 0)
1035 axes.append(("axis{}".format(small_axis), [small_axis]))
1036 elif error_name == ErrorIf.AxisLargerRank:
1037 large_axis = testGen.rng.integers(len(shape) + 1, len(shape) + 10)
1038 axes.append(("axis{}".format(large_axis), [large_axis]))
1039 else:
1040 for a in range(0, len(shape)):
1041 axes.append(("axis{}".format(a), [a]))
1042
1043 return axes
1044
1045 @staticmethod
Jeremy Johnsonfd05bb32023-02-07 16:39:24 +00001046 def _calculate_sparsity(num_tests, sparsity_factor):
1047 sparsity = num_tests // sparsity_factor + 1
1048 # If there are only a small number of tests, just select them all
1049 if sparsity < 13:
1050 sparsity = 1
1051 # To get a variety of parameter combinations sparsity should not be a
1052 # multiple of 2, 3 or 5
1053 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1054 sparsity += 1
1055 return sparsity
1056
1057 @staticmethod
James Ward8b390432022-08-12 20:48:56 +01001058 def agConv(testGen, opName, shapeList, dtypes, error_name=None):
Jeremy Johnson0c716862023-04-13 17:18:19 +01001059 # Used by CONV2D, CONV3D and DEPTHWISE_CONV2D
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001060 arg_list = []
1061
Jeremy Johnson0c716862023-04-13 17:18:19 +01001062 if testGen.args.level8k and error_name is not None:
1063 # Don't produce negative large tests
1064 return arg_list
1065
1066 # Shape: Batches, (Depth), Height, Width, Channels
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001067 ifm_shape = shapeList[0]
Jeremy Johnson0c716862023-04-13 17:18:19 +01001068 # Shape: (OFM channels), (KD), KH, KW, IFM channels
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001069 filter_shape = shapeList[1]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001070
James Ward8b390432022-08-12 20:48:56 +01001071 accum_dtype = get_accum_dtype_from_tgTypes(dtypes)
1072
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001073 # Check the rank
Jeremy Johnson0c716862023-04-13 17:18:19 +01001074 conv3d = opName.startswith("conv3d")
1075 rank = 5 if conv3d else 4
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001076 if error_name != ErrorIf.WrongRank:
1077 assert len(ifm_shape) == rank
1078 assert len(filter_shape) == rank
1079
Jeremy Johnson0c716862023-04-13 17:18:19 +01001080 # kernel rank omits channels
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001081 k_rank = rank - 2
Jeremy Johnson0c716862023-04-13 17:18:19 +01001082 k_pos = 0 if opName.startswith("depthwise") else 1
1083 k_shape = tuple(filter_shape[k_pos : (k_pos + k_rank)])
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001084
Jeremy Johnson0c716862023-04-13 17:18:19 +01001085 if not testGen.args.level8k:
1086 # Generate comprehensive argument lists
1087 # - except for named errors, which use specific invalid value(s)
1088 if error_name == ErrorIf.PadSmallerZero:
1089 p_vals = [testGen.rng.choice(range(-5, 0))]
1090 else:
1091 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
1092 paddings = {x for x in itertools.product(*([p_vals] * k_rank * 2))}
1093 if error_name == ErrorIf.StrideSmallerOne:
1094 # Can't use stride=0, as it is used to derive output shape, as a divisor
1095 s_vals = [testGen.rng.choice(range(-5, 0))]
1096 else:
1097 # Stride must be greater than 1 to force non-integer error
1098 startStride = (
1099 1 if error_name != ErrorIf.ConvOutputShapeNonInteger else 2
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001100 )
Jeremy Johnson0c716862023-04-13 17:18:19 +01001101 s_vals = [
1102 x for x in range(startStride, testGen.args.max_conv_stride + 1)
1103 ]
1104 strides = {x for x in itertools.product(*([s_vals] * k_rank))}
1105 if error_name == ErrorIf.DilationSmallerOne:
1106 d_vals = [testGen.rng.choice(range(-5, 1))]
1107 else:
1108 d_vals = [x for x in range(1, testGen.args.max_conv_dilation + 1)]
1109 dilations = {x for x in itertools.product(*([d_vals] * k_rank))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001110
Jeremy Johnson0c716862023-04-13 17:18:19 +01001111 if not error_name and testGen.args.oversize:
1112 # add some oversize argument values
1113 if max(ifm_shape) < 64:
1114 bigPadding = 9
1115 paddings.update(
1116 {
1117 x
1118 for x in itertools.product(
1119 *([[0, bigPadding]] * (k_rank * 2))
1120 )
1121 }
1122 )
1123 bigStride = 8
1124 strides.update(
1125 {x for x in itertools.product(*([[1, bigStride]] * k_rank))}
1126 )
1127 bigDilation = 7
1128 dilations.update(
1129 {x for x in itertools.product(*([[1, bigDilation]] * k_rank))}
1130 )
1131 max_dim_size = None
1132
1133 # There are too many parameter combinations, so generate them sparsely,
1134 # very sparse for negative tests
1135 sparsity_factor = 2 if error_name else 120
1136 sparsity = TosaArgGen._calculate_sparsity(
1137 len(paddings) * len(strides) * len(dilations), sparsity_factor
1138 )
1139 else:
1140 # Only test 8k levels boundaries
1141 bigStride = testGen.TOSA_8K_LEVEL_MAX_STRIDE
1142 bigKernel = testGen.TOSA_8K_LEVEL_MAX_KERNEL
1143 bigPadding = bigKernel
1144
1145 dilation_shape = [1] * k_rank
1146 pad_shape = [0] * k_rank * 2
1147 if conv3d:
1148 # Small stride apart from for big kernel (see below) to keep
1149 # tensor size/calculation small
1150 stride_shape = [1] * k_rank
1151 for idx in range(k_rank):
1152 pad_offset = idx * 2
1153 if k_shape[idx] == bigKernel:
1154 # Padding shape needs to account for tensor shape
1155 pad_shape[pad_offset] = bigPadding - ifm_shape[idx + 1]
1156 pad_shape[pad_offset + 1] = bigPadding - dilation_shape[idx] + 1
1157 # Big stride to reduce output size
1158 stride_shape[idx] = bigKernel
1159 else:
1160 # Account for kernel size
1161 pad_shape[pad_offset] = k_shape[idx] - 1
1162 else:
1163 # Always have a large stride with extra padding and dilation to keep
1164 # tensor calculation reasonable
1165 stride_shape = [bigKernel] * k_rank
1166 for idx in range(k_rank):
1167 # Dilation shape must account for kernel size
1168 dilation_shape[idx] = bigKernel // k_shape[idx]
1169 # Padding shape needs to accommodate tensor/kernel & dilation
1170 pad_offset = idx * 2
1171 pad_shape[pad_offset] = bigPadding - ifm_shape[idx + 1]
1172 pad_shape[pad_offset + 1] = bigPadding - dilation_shape[idx] + 1
1173
1174 strides = {tuple(stride_shape)}
1175 dilations = {tuple(dilation_shape)}
1176 paddings = {tuple(pad_shape)}
1177 # Create a limit for the output dimensions size
1178 max_dim_size = testGen.TOSA_8K_LEVEL_MAX_KERNEL
1179
1180 # Currently allow all combinations that are reasonable size
1181 sparsity = 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001182
1183 n = 0
1184 for s in sorted(list(strides)):
1185 for p in sorted(list(paddings)):
1186 for d in sorted(list(dilations)):
1187 if (
1188 n % sparsity == 0
Jeremy Johnson93d43902022-09-27 12:26:14 +01001189 # the padded shape must exceed the dilation * kernel to get a positive
1190 # sized output shape
Jeremy Johnson0c716862023-04-13 17:18:19 +01001191 and (ifm_shape[1] - 1 + p[0] + p[1]) > d[0] * (k_shape[0] - 1)
1192 and (ifm_shape[2] - 1 + p[2] + p[3]) > d[1] * (k_shape[1] - 1)
Jeremy Johnson93d43902022-09-27 12:26:14 +01001193 and (
1194 k_rank < 3
Jeremy Johnson0c716862023-04-13 17:18:19 +01001195 or (
1196 (ifm_shape[3] - 1 + p[4] + p[5])
1197 > d[2] * (k_shape[2] - 1)
1198 )
Jeremy Johnson93d43902022-09-27 12:26:14 +01001199 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001200 ):
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001201 remainders = []
Jeremy Johnson0c716862023-04-13 17:18:19 +01001202 outputs = []
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001203 for index in range(k_rank):
1204 pad_offset = index * 2
Jeremy Johnson0c716862023-04-13 17:18:19 +01001205 partial = (
1206 ifm_shape[index + 1]
1207 - 1
1208 + p[pad_offset]
1209 + p[pad_offset + 1]
1210 - (k_shape[index] - 1) * d[index]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001211 )
Jeremy Johnson0c716862023-04-13 17:18:19 +01001212 remainders.append(partial % s[index])
1213 outputs.append((partial // s[index]) + 1)
1214
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001215 if (
1216 # the parameters must produce integer exact output
1217 error_name != ErrorIf.ConvOutputShapeNonInteger
1218 and max(remainders) == 0
1219 ) or (
1220 error_name == ErrorIf.ConvOutputShapeNonInteger
1221 and max(remainders) > 0
1222 ):
Jeremy Johnson0c716862023-04-13 17:18:19 +01001223 if (
1224 max_dim_size is not None
1225 and max(outputs) >= max_dim_size
1226 ):
1227 # Test will consume too much memory - skip it
1228 continue
1229
1230 # Support for larger values than 9 needs different delimiter
1231 delim = "" if max(s + p + d) <= 9 else "x"
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001232 arg_list.append(
1233 (
James Ward8b390432022-08-12 20:48:56 +01001234 "acc{}_st{}_pad{}_dilat{}".format(
1235 testGen.typeStr(accum_dtype),
Jeremy Johnson0c716862023-04-13 17:18:19 +01001236 delim.join([str(x) for x in s]),
1237 delim.join([str(x) for x in p]),
1238 delim.join([str(x) for x in d]),
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001239 ),
James Ward8b390432022-08-12 20:48:56 +01001240 [accum_dtype, s, p, d],
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001241 )
1242 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001243 n += 1
1244
1245 return arg_list
1246
1247 @staticmethod
James Ward8b390432022-08-12 20:48:56 +01001248 def agFullyConnected(testGen, opName, shapeList, dtypes, error_name=None):
1249
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001250 assert isinstance(dtypes, list) or isinstance(
1251 dtypes, tuple
1252 ), f"{dtypes} unexpected"
1253 input_dtype = dtypes[0]
James Ward8b390432022-08-12 20:48:56 +01001254
1255 if error_name == ErrorIf.WrongOutputType:
1256 accum_dtype = get_wrong_output_type(opName, testGen.rng, input_dtype)
1257 elif error_name == ErrorIf.WrongInputType:
1258 # Pick some potentially correct output dtype if input type is incorrect
1259 accum_dtype = DType.INT32
1260 else:
1261 accum_dtype = get_accum_dtype_from_tgTypes(dtypes)
1262
1263 return [(f"acc{testGen.typeStr(accum_dtype)}", [accum_dtype])]
1264
1265 @staticmethod
1266 def agMatMul(testGen, opName, shapeList, dtype, error_name=None):
1267 # Get valid accumulate type(s)
1268 if dtype == DType.INT8:
1269 accum_dtypes = [DType.INT32]
1270 elif dtype == DType.INT16:
1271 accum_dtypes = [DType.INT48]
1272 elif dtype == DType.FP16:
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001273 accum_dtypes = [DType.FP16, DType.FP32]
James Ward24dbc422022-10-19 12:20:31 +01001274 elif dtype == DType.BF16:
1275 accum_dtypes = [DType.FP32]
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001276 elif dtype == DType.FP32:
1277 accum_dtypes = [DType.FP32]
James Ward8b390432022-08-12 20:48:56 +01001278 elif error_name is None:
1279 assert False, f"Invalid I/O DType for MatMul: {DTypeNames[dtype]}"
1280
1281 if error_name == ErrorIf.WrongOutputType:
1282 # Get incorrect output dtype for ErrorIf case
1283 accum_dtypes = [get_wrong_output_type(opName, testGen.rng, dtype)]
1284 elif error_name == ErrorIf.WrongInputType:
1285 # Pick some potentially correct output dtype if input type is incorrect
1286 accum_dtypes = [DType.INT32]
1287
1288 return [(f"acc{testGen.typeStr(a)}", [a]) for a in accum_dtypes]
1289
1290 @staticmethod
1291 def agTransposeConv2D(testGen, opName, shapeList, dtypes, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001292 arg_list = []
1293
Jeremy Johnson0c716862023-04-13 17:18:19 +01001294 if testGen.args.level8k and error_name is not None:
1295 # Don't produce negative large tests
1296 return arg_list
1297
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001298 ifm_shape = shapeList[0]
1299 filter_shape = shapeList[1]
1300
James Ward8b390432022-08-12 20:48:56 +01001301 accum_dtype = get_accum_dtype_from_tgTypes(dtypes)
1302
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001303 # Must be rank 4
1304 if error_name != ErrorIf.WrongRank:
1305 assert len(ifm_shape) == 4
1306 assert len(filter_shape) == 4
1307
Jeremy Johnson0c716862023-04-13 17:18:19 +01001308 k_shape = tuple(filter_shape[1:3])
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001309
Jeremy Johnson0c716862023-04-13 17:18:19 +01001310 if not testGen.args.level8k:
1311 # Generate comprehensive argument lists
1312 # - except for named errors, which use specific invalid value(s)
1313 smallest_padding_size = -min(k_shape[0], k_shape[1]) + 1
1314 if error_name == ErrorIf.PadLargerEqualKernel:
1315 max_filter_size = -max(k_shape[0], k_shape[1])
1316 p_vals = [
1317 testGen.rng.choice(range(max_filter_size - 10, max_filter_size))
1318 ]
1319 else:
1320 p_vals = [
1321 x
1322 for x in range(
1323 smallest_padding_size, testGen.args.max_conv_padding + 1
1324 )
1325 ]
1326 paddings = {x for x in itertools.product(*([p_vals] * 4))}
1327 if error_name == ErrorIf.StrideSmallerOne:
1328 # Can't use stride=0, as it is used to derive output shape, as a divisor
1329 s_vals = [testGen.rng.choice(range(-5, 0))]
1330 else:
1331 s_vals = [x for x in range(1, testGen.args.max_conv_stride + 1)]
1332 strides = {x for x in itertools.product(*([s_vals] * 2))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001333
Jeremy Johnson0c716862023-04-13 17:18:19 +01001334 if not error_name and testGen.args.oversize:
1335 # add some oversize argument values
1336 if max(ifm_shape) < 64:
1337 bigPadding = 9
1338 paddings.update(
1339 {
1340 x
1341 for x in itertools.product(
1342 *([[smallest_padding_size, bigPadding]] * 4)
1343 )
1344 }
1345 )
1346 bigStride = 8
1347 strides.update({x for x in itertools.product(*([[1, bigStride]] * 2))})
1348
1349 # There are too many parameter combinations, so generate them sparsely,
1350 # very sparse for negative tests
1351 sparsity_factor = 2 if error_name else 10
1352 sparsity = len(paddings) * len(strides) // sparsity_factor + 1
1353 # If there are only a small number of tests, just select them all
1354 if sparsity < 13:
1355 sparsity = 1
1356 # To get a variety of parameter combinations sparsity should not be a
1357 # multiple of 2, 3 or 5
1358 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1359 sparsity += 1
1360 else:
1361 # Only test 8k levels boundaries
1362 bigStride = testGen.TOSA_8K_LEVEL_MAX_STRIDE
1363 bigKernel = testGen.TOSA_8K_LEVEL_MAX_KERNEL
1364 bigPadding = bigKernel
1365
1366 pad_shape = [0] * (len(k_shape) * 2)
1367 stride_shape = [1] * len(k_shape)
1368 # The point at which input dimension combined with the stride will
1369 # create large output sizes!
1370 LARGE_SIZE = 2
1371 for idx in range(len(k_shape)):
1372 pad_offset = idx * 2
1373 if k_shape[idx] == bigKernel:
1374 # Set large stride
1375 stride_shape[idx] = bigKernel
1376 # Use negative output padding to reduce shape size
1377 pad_shape[pad_offset] = -(bigPadding - 1)
1378 if ifm_shape[idx + 1] > LARGE_SIZE:
1379 pad_shape[pad_offset + 1] = -(bigPadding - 1)
1380 else:
1381 # The other dimension should be the bigKernel
1382 alt_idx = 1 - idx
1383 if (
1384 k_shape[alt_idx] == bigKernel
1385 and ifm_shape[alt_idx + 1] < LARGE_SIZE
1386 ):
1387 # As the input is small, the large stride won't
1388 # affect the output so we can add some padding
1389 pad_shape[pad_offset + 1] = bigPadding
1390
1391 strides = {tuple(stride_shape)}
1392 paddings = {tuple(pad_shape)}
1393
1394 # Currently allow all combinations that are reasonable size
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001395 sparsity = 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001396
1397 n = 0
1398 for s in sorted(list(strides)):
1399 for p in sorted(list(paddings)):
TatWai Chong24594f52022-06-08 00:48:04 -07001400 if n % sparsity == 0:
1401 # Determine the output shape
Jeremy Johnson0c716862023-04-13 17:18:19 +01001402 oh = (ifm_shape[1] - 1) * s[0] + p[0] + p[1] + k_shape[0]
1403 ow = (ifm_shape[2] - 1) * s[1] + p[2] + p[3] + k_shape[1]
TatWai Chong24594f52022-06-08 00:48:04 -07001404 os = [ifm_shape[0], oh, ow, filter_shape[0]]
Jeremy Johnson0c716862023-04-13 17:18:19 +01001405
1406 # Support for larger values than 9 needs different delimiter
1407 delim = "" if max(s + p) <= 9 else "x"
TatWai Chong24594f52022-06-08 00:48:04 -07001408 arg_list.append(
1409 (
James Ward8b390432022-08-12 20:48:56 +01001410 "acc{}_st{}_pad{}_os{}".format(
1411 testGen.typeStr(accum_dtype),
Jeremy Johnson0c716862023-04-13 17:18:19 +01001412 delim.join([str(x) for x in s]),
1413 delim.join([str(x) for x in p]),
TatWai Chong24594f52022-06-08 00:48:04 -07001414 "x".join([str(x) for x in os]),
1415 ),
James Ward8b390432022-08-12 20:48:56 +01001416 [accum_dtype, s, p, os],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001417 )
TatWai Chong24594f52022-06-08 00:48:04 -07001418 )
1419 n += 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001420
1421 return arg_list
1422
1423 @staticmethod
1424 def agPad(testGen, opName, shapeList, dtype, error_name=None):
1425 arg_list = []
1426 rank = len(shapeList[0])
1427
1428 # Exhaustively test combinations of padding on each side of each dimension
1429 # - the range of padding values is defined by pad_min and pad_max
1430 # - for padding >9, the name format needs to be more distinctive
1431 pad_min, pad_max = 0, 1
1432 pad_values = [x for x in range(pad_min, pad_max + 1)]
1433 if error_name == ErrorIf.PadSmallerZero:
1434 pad_values = [x for x in range(-2, 0)]
1435 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
1436 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
1437
1438 if dtype in [DType.BOOL, DType.INT8, DType.INT16, DType.INT32]:
1439 pad_const_int = testGen.getRandNumberDType(dtype)
1440 pad_const_fp = 0
James Wardf0890992022-11-17 11:15:14 +00001441 elif dtype in (DType.FP16, DType.BF16, DType.FP32):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001442 pad_const_int = 0
1443 pad_const_fp = testGen.getRandNumberDType(dtype)
1444 else:
1445 return []
1446
Jeremy Johnsonfd05bb32023-02-07 16:39:24 +00001447 list_shape_pad_values = list(shape_pad_values)
1448 # If we are producing tests for rank 6 or greater use sparsity
1449 if len(list_shape_pad_values) > 1024:
1450 sparsity_factor = 2 if error_name else 120
1451 sparsity = TosaArgGen._calculate_sparsity(
1452 len(list_shape_pad_values), sparsity_factor
1453 )
1454 else:
1455 sparsity = 1
1456
1457 for n, paddings in enumerate(list_shape_pad_values):
James Ward8b390432022-08-12 20:48:56 +01001458 paddings = list(paddings)
1459 args_valid = True
1460
1461 if error_name == ErrorIf.PadSmallerZero:
1462 # Prevent negative output shapes while ensuring still testing for negative padding
1463 for i in range(rank):
1464 dim_after_padding = (
1465 paddings[i][0] + paddings[i][1] + shapeList[0][i]
1466 )
1467 if dim_after_padding < 1:
1468 paddings[i] = (0, 0)
1469 if all([p > -1 for p in paddings[i]]):
1470 args_valid = False
Jeremy Johnsonfd05bb32023-02-07 16:39:24 +00001471 if args_valid and n % sparsity == 0:
James Ward8b390432022-08-12 20:48:56 +01001472 name = "pad"
1473 for r in range(rank):
1474 before, after = paddings[r]
1475 name = f"{name}{before}{after}"
1476 arg_list.append(
1477 (name, [np.array(paddings), pad_const_int, pad_const_fp])
1478 )
1479
1480 if error_name == ErrorIf.PadSmallerZero and len(arg_list) == 0:
1481 warnings.warn(f"No ErrorIf test created for input shape: {shapeList[0]}")
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001482
1483 return arg_list
1484
1485 @staticmethod
1486 def agPooling(testGen, opName, shapeList, dtype, error_name=None):
1487 arg_list = []
1488
1489 shape = shapeList[0]
1490 if error_name != ErrorIf.WrongRank:
1491 assert len(shape) == 4
1492
Jeremy Johnson0c716862023-04-13 17:18:19 +01001493 test_level8k = testGen.args.level8k and error_name is None
1494
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001495 startStride = 1 if error_name != ErrorIf.PoolingOutputShapeNonInteger else 2
Jeremy Johnson0c716862023-04-13 17:18:19 +01001496 startKernel = 2
1497 startPad = 0
1498 if not test_level8k:
1499 # Generate comprehensive argument lists
1500 p_vals = [x for x in range(startPad, testGen.args.max_pooling_padding + 1)]
1501 paddings = {x for x in itertools.product(*([p_vals] * 4))}
1502 # Stride must be greater than 1 to force non-integer error
1503 s_vals = [
1504 x for x in range(startStride, testGen.args.max_pooling_stride + 1)
1505 ]
1506 strides = {x for x in itertools.product(*([s_vals] * 2))}
1507 k_vals = [
1508 x for x in range(startKernel, testGen.args.max_pooling_kernel + 1)
1509 ]
1510 kernels = {x for x in itertools.product(*([k_vals] * 2))}
1511 max_dim_size = None
1512 else:
1513 # Only test 8k levels
1514 bigStride = testGen.TOSA_8K_LEVEL_MAX_STRIDE
1515 bigKernel = testGen.TOSA_8K_LEVEL_MAX_KERNEL
1516 strides = {(1, bigStride), (bigStride, 4)}
1517 kernels = {(1, bigKernel), (bigKernel, 3)}
1518 paddings = set()
1519 for s in sorted(list(strides)):
1520 for k in sorted(list(kernels)):
1521 padding = []
1522 for idx in range(len(k)):
1523 total_padding = s[idx] - shape[idx + 1] + k[idx]
1524 while total_padding < 0:
1525 # Must meet: shape + padding > kernel
1526 total_padding += s[idx]
1527 if total_padding < k[idx]:
1528 padding.extend([0, total_padding])
1529 else:
1530 # Note this may produce padding >= k[idx] which is not
1531 # allowed - but will be ignored in the creation loop below
1532 padding.extend([k[idx] - 1, total_padding - (k[idx] - 1)])
1533 paddings.add(tuple(padding))
1534 # Create a limit for the output dimensions size
1535 max_dim_size = testGen.TOSA_8K_LEVEL_MAX_KERNEL
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001536
James Ward8b390432022-08-12 20:48:56 +01001537 if opName == "max_pool2d":
1538 accum_dtypes = [None] # max_pool has no accumulate dtype
1539 elif dtype == DType.INT8 or dtype == DType.INT16:
1540 accum_dtypes = [DType.INT32]
1541 elif dtype == DType.FP16:
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001542 accum_dtypes = [DType.FP16, DType.FP32]
James Ward24dbc422022-10-19 12:20:31 +01001543 elif dtype == DType.BF16 or dtype == DType.FP32:
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001544 accum_dtypes = [DType.FP32]
James Ward8b390432022-08-12 20:48:56 +01001545 elif error_name is None:
1546 assert False, f"Invalid I/O DType for pooling: {DTypeNames[dtype]}"
1547 else:
1548 # Set to something for the ErrorIf case which has
1549 # incorrect input data-type
1550 accum_dtypes = [DType.INT32]
1551
Jeremy Johnson0c716862023-04-13 17:18:19 +01001552 if not test_level8k:
1553 if testGen.args.oversize:
1554 # add some oversize argument values
1555 bigStride = 7
1556 bigKernel = 9
1557 strides.update(
1558 {x for x in itertools.product(*([[startStride, bigStride]] * 2))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001559 )
Jeremy Johnson0c716862023-04-13 17:18:19 +01001560 kernels.update(
1561 {x for x in itertools.product(*([[startKernel, bigKernel]] * 2))}
1562 )
1563 if max(shape) < 64:
1564 # padding must be less than the kernel size
1565 bigPadding = bigKernel - 1
1566 paddings.update(
1567 {x for x in itertools.product(*([[startPad, bigPadding]] * 4))}
1568 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001569
Jeremy Johnson0c716862023-04-13 17:18:19 +01001570 # There are too many parameter combinations, so generate them sparsely,
1571 # very sparse for negative tests
1572 sparsity_factor = 2 if error_name else 500
1573 sparsity = (
1574 len(paddings) * len(strides) * len(kernels) // sparsity_factor + 1
1575 )
1576 else:
1577 # We have already limited test output combinations for 8k tests
1578 sparsity = 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001579
James Ward8b390432022-08-12 20:48:56 +01001580 arg_str = (
1581 "acc{}_st{}_kern{}_pad{}"
1582 if accum_dtypes[0] is not None
1583 else "st{}_kern{}_pad{}"
1584 )
1585
1586 def get_arg_list_element(accum, stride, pad, kern):
1587 # Return tuple containing the formatted argument string and
1588 # the corresponding argument values
Jeremy Johnson0c716862023-04-13 17:18:19 +01001589
1590 # Support for larger values than 9 needs different delimiter
1591 delim = "" if max(stride + kern + pad) <= 9 else "x"
James Ward8b390432022-08-12 20:48:56 +01001592 arg_str_elems = [
Jeremy Johnson0c716862023-04-13 17:18:19 +01001593 delim.join([str(x) for x in stride]),
1594 delim.join([str(x) for x in kern]),
1595 delim.join([str(x) for x in pad]),
James Ward8b390432022-08-12 20:48:56 +01001596 ]
1597 # Note: different order to string
1598 arg_val_elems = [stride, pad, kern]
1599
1600 if accum is not None:
1601 arg_str_elems.insert(0, testGen.typeStr(accum))
1602 arg_val_elems.insert(0, accum)
1603 return (arg_str.format(*arg_str_elems), arg_val_elems)
1604
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001605 n = 0
James Ward8b390432022-08-12 20:48:56 +01001606 for a in accum_dtypes:
1607 for s in sorted(list(strides)):
1608 for p in sorted(list(paddings)):
1609 for k in sorted(list(kernels)):
1610 if error_name in [
1611 ErrorIf.StrideSmallerOne,
1612 ErrorIf.KernelSmallerOne,
1613 ErrorIf.PadSmallerZero,
1614 ErrorIf.PadLargerEqualKernel,
1615 ]:
1616 sNew, pNew, kNew = TosaErrorIfArgGen.eiPoolingErrorIf(
1617 testGen, error_name, s, p, k
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001618 )
James Ward8b390432022-08-12 20:48:56 +01001619 if None not in [sNew, pNew, kNew] and n % sparsity == 0:
1620 arg_vals = [a, sNew, pNew, kNew]
1621 arg_list.append(get_arg_list_element(*arg_vals))
1622 elif (
1623 n % sparsity == 0
1624 # padding must not exceed the kernel size
1625 and p[0] < k[0]
1626 and p[1] < k[0]
1627 and p[2] < k[1]
1628 and p[3] < k[1]
1629 # the padded shape must exceed the kernel size
1630 and (shape[1] + p[0] + p[1]) > k[0]
1631 and (shape[2] + p[2] + p[3]) > k[1]
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001632 ):
Jeremy Johnson0c716862023-04-13 17:18:19 +01001633 partial_h = shape[1] + p[0] + p[1] - k[0]
1634 partial_w = shape[2] + p[2] + p[3] - k[1]
1635 remainder_h = partial_h % s[0]
1636 remainder_w = partial_w % s[1]
1637 output_h = partial_h // s[0] + 1
1638 output_w = partial_w // s[1] + 1
1639 # debug print(shape, remainder_h, remainder_w, "/", output_h, output_w)
James Ward8b390432022-08-12 20:48:56 +01001640 if (
1641 # the parameters must produce integer exact output
1642 error_name != ErrorIf.PoolingOutputShapeNonInteger
1643 and remainder_h == 0
1644 and remainder_w == 0
1645 ) or (
1646 error_name == ErrorIf.PoolingOutputShapeNonInteger
1647 and (remainder_h != 0 or remainder_w != 0)
1648 ):
Jeremy Johnson0c716862023-04-13 17:18:19 +01001649 if (
1650 max_dim_size is not None
1651 and max(output_h, output_w) > max_dim_size
1652 ):
1653 # Test will consume too much memory - skip it
1654 continue
James Ward8b390432022-08-12 20:48:56 +01001655 arg_vals = [a, s, p, k]
1656 arg_list.append(get_arg_list_element(*arg_vals))
1657 n += 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001658
1659 return arg_list
1660
1661 @staticmethod
1662 def agCast(testGen, opName, shapeList, inDtype, error_name=None):
1663 arg_list = []
1664
1665 # Enumerate the output types here
1666 if error_name == ErrorIf.WrongOutputType:
1667 dtypeList = TosaErrorIfArgGen.eiCastErrorIf(testGen, inDtype)
1668 elif inDtype == DType.INT8:
James Ward736fd1a2023-01-23 17:13:37 +00001669 dtypeList = [
1670 DType.BOOL,
1671 DType.INT16,
1672 DType.INT32,
1673 DType.FP16,
1674 DType.BF16,
1675 DType.FP32,
1676 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001677 elif inDtype == DType.INT16:
James Ward736fd1a2023-01-23 17:13:37 +00001678 dtypeList = [
1679 DType.BOOL,
1680 DType.INT8,
1681 DType.INT32,
1682 DType.FP16,
1683 DType.BF16,
1684 DType.FP32,
1685 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001686 elif inDtype == DType.INT32:
James Ward736fd1a2023-01-23 17:13:37 +00001687 dtypeList = [
1688 DType.BOOL,
1689 DType.INT8,
1690 DType.INT16,
1691 DType.FP16,
1692 DType.BF16,
1693 DType.FP32,
1694 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001695 elif inDtype == DType.BOOL:
1696 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
James Ward8b390432022-08-12 20:48:56 +01001697 elif inDtype == DType.FP16:
James Ward736fd1a2023-01-23 17:13:37 +00001698 dtypeList = [DType.INT8, DType.INT16, DType.INT32, DType.FP32]
James Ward24dbc422022-10-19 12:20:31 +01001699 elif inDtype == DType.BF16:
James Ward736fd1a2023-01-23 17:13:37 +00001700 dtypeList = [DType.INT8, DType.INT16, DType.INT32, DType.FP32]
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001701 elif inDtype == DType.FP32:
James Ward736fd1a2023-01-23 17:13:37 +00001702 dtypeList = [DType.INT8, DType.INT16, DType.INT32, DType.FP16, DType.BF16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001703 elif error_name == ErrorIf.WrongInputType:
1704 # Pick some potentially correct output type for incorrect input type
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01001705 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FP32]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001706 else:
1707 raise Exception("Unexpected input dtype: {}".format(inDtype))
1708
1709 for dtype in dtypeList:
Jeremy Johnson3b0544c2022-10-18 16:32:19 +01001710 arg_list.append(("out{}".format(testGen.typeStr(dtype)), [dtype]))
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001711
1712 return arg_list
1713
1714 @staticmethod
1715 def agRescale(testGen, opName, shapeList, inDtype, error_name=None):
1716 arg_list = []
1717
1718 # Enumerate the output types here
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001719 for outDtype in [
1720 DType.UINT8,
1721 DType.INT8,
1722 DType.INT16,
1723 DType.INT32,
1724 DType.UINT16,
1725 ]:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001726 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001727 outDtype in [DType.UINT8, DType.INT8, DType.UINT16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001728 and error_name == ErrorIf.OutputZeroPointNotZero
1729 ):
1730 continue
1731 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001732 outDtype != DType.UINT16
1733 and error_name == ErrorIf.U16OutputZeroPointNotValid
1734 ) or (
1735 inDtype != DType.UINT16
1736 and error_name == ErrorIf.U16InputZeroPointNotValid
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001737 ):
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001738 # ErrorIfs only valid with UINT16
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001739 continue
1740 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001741 inDtype == DType.UINT8
1742 and outDtype not in [DType.INT8, DType.INT16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001743 and error_name != ErrorIf.WrongOutputType
1744 ):
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001745 # The only output dtypes for UINT8 are INT8/INT16, skip all others
1746 continue
1747 if (
1748 inDtype not in [DType.INT8, DType.INT16]
1749 and outDtype == DType.UINT8
1750 and error_name != ErrorIf.WrongOutputType
1751 ):
1752 # The only input dtypes for UINT8 are INT8/INT16, skip all others
1753 continue
1754 if (
1755 inDtype == DType.UINT16
1756 and outDtype != DType.INT16
1757 and error_name != ErrorIf.WrongOutputType
1758 ):
1759 # The only output dtype for UINT16 is INT16, skip all others
1760 continue
1761 if (
1762 inDtype != DType.INT16
1763 and outDtype == DType.UINT16
1764 and error_name != ErrorIf.WrongOutputType
1765 ):
1766 # The only input dtype for UINT16 is INT16, skip all others
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001767 continue
1768 if (
1769 error_name == ErrorIf.WrongOutputType
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001770 and not TosaErrorIfArgGen.eiRescaleWrongOutputType(inDtype, outDtype)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001771 ):
1772 continue
1773
1774 for scale32 in [False, True]:
1775 if error_name == ErrorIf.ScaleTrue and not scale32:
1776 continue
1777 elif error_name == ErrorIf.ScaleNotTrue and scale32:
1778 continue
1779 for double_round in [False, True]:
1780 if error_name == ErrorIf.ScaleNotTrue and not double_round:
1781 continue
1782 for per_channel in [False, True]:
1783
1784 if (
1785 inDtype == DType.INT48
1786 and scale32
1787 and error_name != ErrorIf.ScaleTrue
1788 ):
1789 # Illegal condition. Must be scale32=False
1790 continue
1791 if (
1792 double_round
1793 and not scale32
1794 and error_name != ErrorIf.ScaleNotTrue
1795 ):
1796 # Illegal condition. ERROR_IF(!scale32 && double_round)
1797 continue
1798
1799 arg_list.append(
1800 (
1801 "out{}_sc{}_dr{}_pc{}".format(
Jeremy Johnson3b0544c2022-10-18 16:32:19 +01001802 testGen.typeStr(outDtype),
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001803 int(scale32),
1804 int(double_round),
1805 int(per_channel),
1806 ),
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001807 [outDtype, scale32, double_round, per_channel],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001808 )
1809 )
1810
1811 return arg_list
1812
1813 @staticmethod
1814 def agMul(testGen, opName, shapeList, dtype, error_name=None):
1815 arg_list = []
1816
1817 if dtype is DType.INT32:
1818 for p in range(testGen.args.num_rand_permutations):
1819
1820 shift = testGen.randInt(0, 32)
1821
1822 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
1823 else:
1824 arg_list.append(("perm0_shift0", [0]))
1825
1826 return arg_list
1827
1828 @staticmethod
1829 def agArithmeticRightShift(testGen, opName, shapeList, dtype, error_name=None):
1830 arg_list = []
1831
1832 arg_list.append(("roundTrue", [True]))
1833 arg_list.append(("roundFalse", [False]))
1834
1835 return arg_list
1836
Luke Hutton57287132023-02-06 14:54:18 +00001837 @staticmethod
1838 def agFFT2d(testGen, opName, shapeList, dtype, error_name=None):
1839 arg_list = []
1840
1841 arg_list.append(("inverseTrue", [True]))
1842 arg_list.append(("inverseFalse", [False]))
1843
1844 return arg_list
1845
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001846 # Helper function for reshape. Gets some factors of a larger number.
1847 @staticmethod
1848 def getFactors(val, start=1):
1849 factors = []
1850
1851 for i in range(start, int(np.sqrt(val)) + 1):
1852 if (val % i) == 0:
1853 factors.append(i)
1854
1855 return factors
1856
1857 @staticmethod
1858 def agReshape(testGen, opName, shapeList, dtype, error_name=None):
1859 arg_list = []
1860
1861 origShape = shapeList[0]
1862
1863 totalElements = 1
1864 for s in origShape:
1865 totalElements *= s
1866
1867 # This code is NOT fast. Fortunately, the numbers are fairly small.
1868 factors = TosaArgGen.getFactors(totalElements)
1869
1870 for p in range(testGen.args.num_rand_permutations):
Jeremy Johnsonfd05bb32023-02-07 16:39:24 +00001871 # Rank from 1 to TOSA_TENSOR_MAX_RANK
1872 newRank = testGen.randInt(1, (testGen.TOSA_TENSOR_MAX_RANK + 1))
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001873 if len(factors) < newRank:
1874 continue
1875
1876 found = True
1877 # escape_counter breaks while loop if it continues on for too long
1878 escape_counter = 0
1879 while found:
1880 newShape = []
1881 # Generate newShape ensuring it isn't a duplicate
1882 remainingElements = totalElements
1883 shuffledFactors = testGen.rng.permutation(factors)
1884 for i in range(1, newRank):
1885 # pick rank-1 factors
1886 newShape.append(shuffledFactors[0])
1887 remainingElements = remainingElements // shuffledFactors[0]
1888 shuffledFactors = testGen.rng.permutation(
1889 TosaArgGen.getFactors(remainingElements)
1890 )
1891 newShape.append(remainingElements)
1892
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001893 # Check for duplicates
1894 found = False
1895 for name, other_shape in arg_list:
1896 if other_shape[0] == newShape:
1897 found = True
1898 break
1899
1900 escape_counter += 1
1901 if escape_counter >= 100:
1902 break
1903
1904 if not found:
1905 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
1906
1907 return arg_list
1908
1909 @staticmethod
1910 def agTranspose(testGen, opName, shapeList, dtype, error_name=None):
1911 arg_list = []
1912
1913 ifm_shape = shapeList[0]
1914
1915 if error_name == ErrorIf.IndexOutsideBounds:
1916 incorrect_large_index = range(len(ifm_shape) + 1, 2 * len(ifm_shape) + 1)
1917 incorrect_small_index = range(-len(ifm_shape), 0)
1918 permutations = [p for p in itertools.permutations(incorrect_large_index)]
1919 permutations.extend(
1920 [p for p in itertools.permutations(incorrect_small_index)]
1921 )
1922 elif error_name == ErrorIf.IndexUsedTwice:
1923 # Create list with a duplicated index
1924 perm_range = list(range(len(ifm_shape)))
1925 index_choice = testGen.rng.choice(range(len(perm_range)))
1926 perm_range[(index_choice + 1) % len(perm_range)] = perm_range[index_choice]
1927 permutations = [p for p in itertools.permutations(perm_range)]
1928
1929 else:
1930 # Get all permutations
1931 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
1932
1933 # Limit to possible permutations from shape dimension or argument setting
1934 limit = min(len(permutations), testGen.args.num_rand_permutations)
1935
1936 # Get random permutation generator that uses all permutations
1937 random_permutations = testGen.rng.permutation(permutations)
1938
1939 # Create list of required amount of permutations
1940 arg_list = [
1941 ("perm{}".format(p), [random_permutations[p].tolist()])
1942 for p in range(limit)
1943 ]
1944 return arg_list
1945
1946 @staticmethod
1947 def agSlice(testGen, opName, shapeList, dtype, error_name=None):
1948 arg_list = []
1949
1950 ifm_shape = shapeList[0]
1951 rank = len(ifm_shape)
1952
1953 for p in range(testGen.args.num_rand_permutations):
1954 start = []
1955 size = []
1956
1957 valid = True
1958
1959 for i in range(rank):
1960 if ifm_shape[i] > 1:
1961 start.append(testGen.randInt(0, ifm_shape[i]))
1962 size.append(testGen.randInt(0, ifm_shape[i] - start[i]))
1963
1964 # Invalid slice size?
1965 if size[i] == 0:
1966 valid = False
1967 else:
1968 start.append(0)
1969 size.append(1)
1970
1971 if valid:
1972 # If ERROR_IF test required then incorrect start, size will be returned
1973 start, size = TosaErrorIfArgGen.eiSliceErrorIf(
1974 testGen, error_name, ifm_shape, start, size
1975 )
1976 arg_list.append(("perm{}".format(p), [start, size]))
1977 return arg_list
1978
1979 @staticmethod
1980 def agTile(testGen, opName, shapeList, dtype, error_name=None):
1981 arg_list = []
1982
1983 ifm_shape = shapeList[0]
1984 rank = len(ifm_shape)
1985
1986 for p in range(testGen.args.num_rand_permutations):
1987
1988 # Pick a few random, but small multiple values
1989 # because otherwise this has a tendency to generate
1990 # enormous tensors
1991 multiples = []
1992 for i in range(rank):
1993 if ifm_shape[i] > 1000:
1994 # Multiple of 1 if ifm_shape dimension is large to reduce
1995 # tensor size
1996 multiples.append(1)
1997 elif max(ifm_shape) > 1000:
1998 multiples.append(2)
1999 else:
2000 multiples.append(testGen.randInt(1, 4))
2001 arg_list.append(("perm{}".format(p), [multiples]))
2002
2003 return arg_list
2004
2005 @staticmethod
2006 def agResize(testGen, opName, shapeList, dtype, error_name=None):
2007 arg_list = []
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002008 ifm_shape = shapeList[0]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002009
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002010 def get_aspect_ratio_resize_params():
2011 common_aspect_ratios = ((3, 2), (16, 9), (4, 3))
2012 aspect_ratio = testGen.rng.choice(common_aspect_ratios)
2013 invert = testGen.rng.choice((False, True))
2014 letterbox = testGen.rng.choice((False, True))
2015
2016 scale_y_n = aspect_ratio[0] if invert else aspect_ratio[1]
2017 scale_x_n = aspect_ratio[1] if invert else aspect_ratio[0]
2018 scale_y_d = scale_x_d = 1
2019 offset_x = offset_y = 0
2020
2021 if letterbox:
2022 max_border = scale_y_n
2023 border_y = testGen.randInt(low=0, high=max_border)
2024 border_x = 0
2025 else:
2026 # Pillarboxing
2027 border_y = 0
2028 max_border = scale_x_n
2029 border_x = testGen.randInt(low=0, high=max_border)
2030
2031 scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
2032 offset = (offset_y, offset_x)
2033 border = (border_y, border_x)
2034
2035 return scale, offset, border
2036
2037 def get_upscale_downscale_params():
2038 valid_params = False
2039 while not valid_params:
2040 upscale = testGen.rng.choice((False, True))
2041
2042 # True if sampling begins from (0,0). Otherwise (-0.5,-0.5)
2043 origin_sampling = testGen.rng.choice((False, True))
2044
2045 if upscale:
2046 shift = testGen.randInt(low=1, high=4)
2047 scale_x_d = scale_y_d = 1
2048 scale_x_n = scale_y_n = (
2049 1 << shift if origin_sampling else 2 << shift
2050 )
2051 border_x = border_y = 0 if origin_sampling else (1 << shift) - 1
2052 offset_x = offset_y = 0 if origin_sampling else -(1 << shift) + 1
2053 else:
2054 scale_x_n = 1
2055 scale_y_n = 1
2056
2057 # Return list of valid scale_*_d values (max value 4) given input dim shape
2058 def get_valid_denom(ifm_dim):
2059 return [x for x in range(1, 5) if ifm_dim % x == 1]
2060
2061 # Generate list of valid downscale values and choose one randomly
2062 valid_scale_y_ds = get_valid_denom(ifm_shape[1])
2063 valid_scale_x_ds = get_valid_denom(ifm_shape[2])
2064
2065 if not valid_scale_y_ds and not valid_scale_x_ds:
2066 # Bad parameters, skip
2067 continue
2068
2069 if not valid_scale_y_ds:
2070 scale_y_d = 1
2071 else:
2072 scale_y_d = testGen.rng.choice(valid_scale_y_ds)
2073
2074 if not valid_scale_x_ds:
2075 scale_x_d = 1
2076 else:
2077 scale_x_d = testGen.rng.choice(valid_scale_x_ds)
2078
2079 border_x = border_y = 0
2080 offset_y = testGen.randInt(0, 16 * scale_y_n)
2081 offset_x = testGen.randInt(0, 16 * scale_x_n)
2082 valid_params = True
2083
2084 scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
2085 offset = (offset_y, offset_x)
2086 border = (border_y, border_x)
2087 return scale, offset, border
2088
2089 def get_rand_params():
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002090 def fix_scale_to_max_scale(scale_n, scale_d, max_scale):
2091 scale = scale_n / scale_d
2092 if scale > max_scale:
2093 factor = scale / max_scale
2094 new_scale_d = math.ceil(scale_d * factor)
2095 assert scale_n / new_scale_d <= max_scale
2096 scale_d = new_scale_d
2097 return scale_d
2098
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002099 # Scale
2100 scale_y_n = testGen.randInt(low=1, high=(1 << 11))
2101 scale_x_n = testGen.randInt(low=1, high=(1 << 11))
2102
2103 scale_y_d = testGen.randInt(low=1, high=(16 * scale_y_n))
2104 scale_x_d = testGen.randInt(low=1, high=(16 * scale_x_n))
2105
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002106 scale_y_d = fix_scale_to_max_scale(
2107 scale_y_n, scale_y_d, testGen.TOSA_8K_LEVEL_MAX_SCALE
2108 )
2109 scale_x_d = fix_scale_to_max_scale(
2110 scale_x_n, scale_x_d, testGen.TOSA_8K_LEVEL_MAX_SCALE
2111 )
2112
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002113 # Offsets and border within the scale
2114 offset_y = testGen.randInt(low=-scale_y_n, high=(16 * scale_y_n))
2115 offset_x = testGen.randInt(low=-scale_x_n, high=(16 * scale_x_n))
2116 border_y = testGen.randInt(low=(-16 * scale_y_n), high=scale_y_n)
2117 border_x = testGen.randInt(low=(-16 * scale_x_n), high=scale_x_n)
2118
2119 scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
2120 offset = (offset_y, offset_x)
2121 border = (border_y, border_x)
2122 return scale, offset, border
2123
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002124 def get_level_8k_params():
2125 # Create 64x scale - 64/1 to 2048/32
2126 scale_d = testGen.randInt(
2127 low=1, high=(1 << 11) / testGen.TOSA_8K_LEVEL_MAX_SCALE
2128 )
2129 scale_n = scale_d * testGen.TOSA_8K_LEVEL_MAX_SCALE
2130 # Create half to fifth scaling
2131 scale_d_alt = testGen.randInt(low=2, high=6)
2132 scale_n_alt = 1
2133 switch = testGen.rng.choice((False, True))
2134 if switch:
2135 scale = (scale_n_alt, scale_d_alt, scale_n, scale_d)
2136 else:
2137 scale = (scale_n, scale_d, scale_n_alt, scale_d_alt)
2138
2139 offset_y = testGen.rng.choice((-scale[0], 0, (16 * scale[0]) - 1))
2140 offset_x = testGen.rng.choice((-scale[2], 0, (16 * scale[2]) - 1))
2141 offset = (offset_y, offset_x)
2142 border_y = testGen.rng.choice((-16 * scale[0], 0, scale[0] - 1))
2143 border_x = testGen.rng.choice((-16 * scale[2], 0, scale[2] - 1))
2144 border = (border_y, border_x)
2145 return scale, offset, border
2146
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002147 for mode in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002148 # Exclude illegal {mode, type} configurations. Pick legal output types
2149 if mode == ResizeMode.NEAREST and dtype == DType.INT8:
2150 outputDTypeList = [DType.INT8]
2151 elif mode == ResizeMode.NEAREST and dtype == DType.INT16:
2152 outputDTypeList = [DType.INT16]
2153 elif mode == ResizeMode.BILINEAR and dtype == DType.INT8:
2154 outputDTypeList = [DType.INT32]
2155 elif mode == ResizeMode.BILINEAR and dtype == DType.INT16:
2156 outputDTypeList = [DType.INT48]
James Ward8b390432022-08-12 20:48:56 +01002157 elif dtype == DType.FP16:
2158 outputDTypeList = [DType.FP16]
James Ward24dbc422022-10-19 12:20:31 +01002159 elif dtype == DType.BF16:
2160 outputDTypeList = [DType.BF16]
Jeremy Johnsonbc2a3db2022-09-27 13:50:00 +01002161 elif dtype == DType.FP32:
2162 outputDTypeList = [DType.FP32]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002163 elif error_name == ErrorIf.WrongInputType:
2164 # If an incorrect input type is used then we set a 'correct'
2165 # output type to avoid other errors
2166 outputDTypeList = [DType.INT8, DType.INT16, DType.INT32]
2167 else:
2168 continue
2169
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002170 arg_str = "mode{}_out{}_sc{}x{}x{}x{}_off{}x{}_bor{}x{}"
2171
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002172 for outputDType in outputDTypeList:
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002173 perm = 0
2174 while perm < testGen.args.num_rand_permutations:
2175 # Random choice of type of params we are testing
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002176 if not testGen.args.level8k:
2177 _rnd_param_fn = testGen.rng.choice(
2178 (
2179 get_rand_params,
2180 get_upscale_downscale_params,
2181 get_aspect_ratio_resize_params,
2182 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002183 )
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002184 scale, offset, border = _rnd_param_fn()
2185 else:
2186 scale, offset, border = get_level_8k_params()
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002187
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002188 # Expand params for bounds-checking
2189 (scale_y_n, scale_y_d, scale_x_n, scale_x_d) = scale
2190 (offset_y, offset_x) = offset
2191 (border_y, border_x) = border
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002192
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002193 # Make sure output dimensions OH and OW are integers
2194 partial_output_y = (
2195 (ifm_shape[1] - 1) * scale_y_n - offset_y + border_y
2196 )
2197 partial_output_x = (
2198 (ifm_shape[2] - 1) * scale_x_n - offset_x + border_x
2199 )
2200 if error_name == ErrorIf.ResizeOutputShapeNonInteger:
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002201 # Look for non-integer test
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002202 if (
2203 partial_output_y % scale_y_d == 0
2204 and partial_output_x % scale_x_d == 0
2205 ):
2206 # Skip this test as it doesn't produce NonInteger output
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002207 if perm > 0:
2208 perm += 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002209 continue
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002210 else:
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002211 # Alter the scaling factors to make the output integer
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002212 while partial_output_y % scale_y_d != 0:
2213 scale_y_d -= 1
2214 while partial_output_x % scale_x_d != 0:
2215 scale_x_d -= 1
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002216 # Make sure we are still within max scaling
2217 if (
2218 scale_y_n / scale_y_d
2219 ) > testGen.TOSA_8K_LEVEL_MAX_SCALE or (
2220 scale_x_n / scale_x_d
2221 ) > testGen.TOSA_8K_LEVEL_MAX_SCALE:
2222 # Skip the test as it is using too large a scaling factor
2223 if perm > 0:
2224 perm += 1
2225 continue
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002226
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002227 output_y = partial_output_y // scale_y_d + 1
2228 output_x = partial_output_x // scale_x_d + 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002229
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002230 if (
2231 output_y >= testGen.args.max_resize_output_dim
2232 or output_x >= testGen.args.max_resize_output_dim
2233 ) and error_name is None:
2234 # Skip positive test if output dim will be too high
2235 # Avoid high test latency and OOM issues
Jeremy Johnsonb2099702023-04-12 15:59:01 +01002236 if not testGen.args.level8k or perm > 0:
2237 perm += 1
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002238 continue
2239
2240 if (
2241 output_y <= 0
2242 or output_y >= MAX_RESIZE_DIMENSION
2243 or output_x <= 0
2244 or output_x >= MAX_RESIZE_DIMENSION
2245 ):
2246 # Output dimensions out of scope
2247 if error_name is not None and perm > 0:
2248 # As long as we have one ERROR_IF test, don't worry
2249 # about creating all the other permutations
2250 perm += 1
2251 continue
2252
2253 if error_name == ErrorIf.ResizeOutputShapeMismatch and (
2254 (
2255 output_y + scale_y_d >= MAX_RESIZE_DIMENSION
2256 and output_y - scale_y_d < 1
2257 )
2258 or (
2259 output_x + scale_x_d >= MAX_RESIZE_DIMENSION
2260 and output_x - scale_x_d < 1
2261 )
2262 ):
2263 # Can't create a negative test with these params as it
2264 # will create invalid output size
2265 if perm > 0:
2266 perm += 1
2267 continue
2268
2269 scale = [scale_y_n, scale_y_d, scale_x_n, scale_x_d]
2270 offset = [offset_y, offset_x]
2271 border = [border_y, border_x]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002272
2273 # Common for all data types
2274 if error_name is not None:
2275 (
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002276 scale,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002277 offset,
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002278 border,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002279 outputDTypeNew,
2280 ) = TosaErrorIfArgGen.eiResizeErrorIf(
2281 testGen,
2282 error_name,
2283 mode,
2284 dtype,
2285 shapeList,
2286 outputDType,
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002287 scale,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002288 offset,
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002289 border,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002290 )
2291 else:
2292 outputDTypeNew = outputDType
2293
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002294 arg_to_append = (
2295 arg_str.format(
2296 "N" if mode == ResizeMode.NEAREST else "B",
2297 testGen.typeStr(outputDTypeNew),
2298 scale[0],
2299 scale[1],
2300 scale[2],
2301 scale[3],
2302 offset[0],
2303 offset[1],
2304 border[0],
2305 border[1],
2306 ),
2307 [
2308 mode,
2309 scale,
2310 offset,
2311 border,
2312 dtype,
2313 outputDTypeNew,
2314 ],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002315 )
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002316 if arg_to_append in arg_list:
2317 # Skip already generated test params
2318 continue
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002319
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01002320 # Valid permutation
2321 perm += 1
2322 arg_list.append(arg_to_append)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002323 return arg_list
2324
2325 @staticmethod
2326 def agTable(testGen, opName, shapeList, dtype, error_name=None):
2327 arg_list = []
2328
2329 if dtype == DType.INT8:
2330 table = np.int32(
2331 testGen.rng.integers(low=-128, high=128, size=[256])
2332 ).tolist()
2333 else: # INT16
2334 table = np.int32(
2335 testGen.rng.integers(low=-32768, high=32768, size=[513])
2336 ).tolist()
Jerry Ged511f9e2022-08-12 16:12:40 -07002337 # Make sure all slopes are within REQUIRE min/max 16-bit int
2338 for idx in range(len(table) - 1):
2339 slope = table[idx + 1] - table[idx]
2340 # Alter the next table entry to force the slope to be ok
2341 if slope > 32767:
2342 table[idx + 1] -= slope - 32767
2343 if slope < -32768:
2344 table[idx + 1] -= slope + 32768
2345 slope = table[idx + 1] - table[idx]
2346 assert slope <= 32767 and slope >= -32768
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01002347 arg_list.append(
2348 (
2349 "",
2350 [table],
2351 )
2352 )
2353 return arg_list
2354
2355 def agCondIf(testGen, opName, shapeList, dtype, error_name=None):
2356 # CondIf generates the condition values here.
2357 # Convert to tensors in the build function, along with the
2358 # then and else blocks
2359 arg_list = []
2360
2361 for c in [False, True]:
2362 arg_list.append(("cond{}".format(int(c)), [c]))
2363
2364 return arg_list
2365
2366 def agWhileLoop(testGen, opName, shapeList, dtype, error_name=None):
2367 # While loop: 0 iterations, 1, more than 1
2368 arg_list = []
2369
2370 for iter in [0, 1, 4]:
2371 arg_list.append(("iter{}".format(iter), [iter]))
2372
2373 return arg_list