blob: 2181735cbd4a05e24eac58aec8cf2cf7fd9aba3d [file] [log] [blame]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001# Copyright (c) 2021-2022, ARM Limited.
2# SPDX-License-Identifier: Apache-2.0
3import itertools
4import math
5
6import numpy as np
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01007from generator.tosa_error_if import ErrorIf
8from generator.tosa_error_if import TosaErrorIfArgGen
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01009from generator.tosa_utils import MAX_RESIZE_DIMENSION
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010010from serializer.tosa_serializer import DTypeNames
11from tosa.DType import DType
12from tosa.Op import Op
13from tosa.ResizeMode import ResizeMode
14
15# DTypeNames, DType, Op and ResizeMode are convenience variables to the
16# flatc-generated types that should be enums, but aren't
17
18
19class TosaQuantGen:
20 """QuantizedInfo random generator helper functions.
21
22 Specify with 'qgen': in the operator defintion.
23 """
24
25 def __init__(self):
26 pass
27
28 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +000029 def getZeroPoint(testGen, dtype, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010030
31 if dtype == DType.INT8:
32 return testGen.randInt(-128, 128)
33 elif dtype == DType.UINT8:
34 return testGen.randInt(0, 256)
35 elif error_name in [
36 ErrorIf.InputZeroPointNotZero,
37 ErrorIf.WeightZeroPointNotZero,
38 ErrorIf.OutputZeroPointNotZero,
39 ]:
40 zero_point = testGen.randInt(-128, 128)
41 if zero_point == 0:
42 zero_point = 1
43 return zero_point
44 return 0
45
46 @staticmethod
47 def qgUnary(testGen, op, dtype, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010048 if error_name == ErrorIf.InputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000049 qinfo = [
50 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
51 TosaQuantGen.getZeroPoint(testGen, dtype),
52 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010053 elif error_name == ErrorIf.OutputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000054 qinfo = [
55 TosaQuantGen.getZeroPoint(testGen, dtype),
56 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
57 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010058 else:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000059 qinfo = [
60 TosaQuantGen.getZeroPoint(testGen, dtype),
61 TosaQuantGen.getZeroPoint(testGen, dtype),
62 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010063 return qinfo
64
65 @staticmethod
66 def qgConv(testGen, op, dtype_or_dtypeList, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010067 if isinstance(dtype_or_dtypeList, list):
68 # a list of [input, weights, accumulator] dtypes
69 dtypeList = dtype_or_dtypeList
70 else:
71 # an int, [input, weights, accumulator] dtypes are the same
72 dtypeList = [dtype_or_dtypeList] * 3
73
74 if error_name == ErrorIf.InputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000075 qinfo = [
76 TosaQuantGen.getZeroPoint(testGen, dtypeList[0], error_name),
77 TosaQuantGen.getZeroPoint(testGen, dtypeList[1]),
78 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010079 elif error_name == ErrorIf.WeightZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000080 qinfo = [
81 TosaQuantGen.getZeroPoint(testGen, dtypeList[0]),
82 TosaQuantGen.getZeroPoint(testGen, dtypeList[1], error_name),
83 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010084 else:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000085 qinfo = [
86 TosaQuantGen.getZeroPoint(testGen, dtypeList[0]),
87 TosaQuantGen.getZeroPoint(testGen, dtypeList[1]),
88 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010089 return qinfo
90
91 @staticmethod
92 def qgMatmul(testGen, op, dtype, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010093 if error_name == ErrorIf.InputZeroPointNotZero:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000094 qinfo = [
95 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
96 TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
97 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +010098 else:
Eric Kunzeb5fabec2022-06-07 05:20:44 +000099 qinfo = [
100 TosaQuantGen.getZeroPoint(testGen, dtype),
101 TosaQuantGen.getZeroPoint(testGen, dtype),
102 ]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100103 return qinfo
104
105 @staticmethod
106 def computeMultiplierAndShift(scaleFp, scale32):
107 # Derived from computeMultiplierAndShiftTosaScale32
108 # Provide a floating-point scaling factor and the scale32 parameter
109 # to compute the multiplier and shift
110
111 if scale32:
112 scaleBits = 31
113 else:
114 scaleBits = 15
115
116 m, shift = math.frexp(scaleFp)
117
118 if scaleFp < 0.0:
119 m = -m
120
121 multiplier = round(m * (1 << scaleBits))
122 assert multiplier <= (1 << scaleBits)
123
124 if multiplier == (1 << scaleBits):
125 multiplier = multiplier // 2
126 shift = shift + 1
127
128 shift = (-shift) + scaleBits
129 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(
130 # scaleFp, scaleBits, m, multiplier, shift))
131
132 # Adjust multiplier such that shift is in allowed value range.
133 if shift == 0:
134 multiplier = multiplier // 4
135 shift = shift + 2
136 elif shift == 1:
137 multiplier = multiplier // 2
138 shift = shift + 1
139 elif shift == 63:
140 multiplier = multiplier * 2
141 shift = shift - 1
142
143 assert multiplier <= (1 << scaleBits)
144 assert shift >= 2 and shift <= 62
145
146 return multiplier, shift
147
148
149class TosaTensorGen:
150 """Tensor generators create a shape list for the placeholder and const tensor
151 data operands for the operator.
152
153 The actual random data is generated separately for each test.
154 """
155
156 def __init__(self):
157 pass
158
159 @staticmethod
160 def tgBasic(testGen, opName, rank, error_name=None):
161 pl, const = opName["operands"]
162 shape = testGen.makeShape(rank)
163
164 # Constrict the overall size of the shape when creating ERROR_IF tests
165 if error_name:
166 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
167
168 shape_list = []
169 for i in range(pl + const):
170 shape_list.append(shape.copy())
171
172 if error_name == ErrorIf.RankMismatch:
173 if rank == 1 and i != 1:
174 shape = testGen.makeShape(rank + testGen.rng.choice([1, 2, 3]))
175 elif i != 1:
176 shape = testGen.makeShape(rank + testGen.rng.choice([-1, 1]))
177
178 return shape_list
179
180 @staticmethod
181 def tgNHWC(testGen, opName, rank, error_name=None):
182 pl, const = opName["operands"]
183
184 if error_name != ErrorIf.WrongRank:
185 assert rank == 4
186
187 shape = testGen.makeShape(rank)
188
189 # Constrict the batch size?
190 if testGen.args.max_batch_size:
191 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
192
193 # Constrict the overall size of the shape when creating ERROR_IF tests
194 if error_name and error_name != ErrorIf.MaxDimExceeded:
195 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
196
197 shape_list = []
198 for i in range(pl + const):
199 shape_list.append(shape.copy())
200
201 return shape_list
202
203 @staticmethod
204 def tgScatter(testGen, opName, rank, error_name=None):
205 pl, const = opName["operands"]
206
207 assert pl == 2
208 assert const == 0
209 if error_name != ErrorIf.WrongRank:
210 assert rank == 3
211
212 values_in_shape = testGen.makeShape(rank)
213
214 # ignore max batch size if target shape is set
215 if testGen.args.max_batch_size and not testGen.args.target_shapes:
216 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
217
218 W = testGen.randInt(
219 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
220 )
221 # Constrict W if one dimension is too large to keep tensor size reasonable
222 if max(values_in_shape) > 5000:
223 W = testGen.randInt(0, 16)
224
225 input_shape = [values_in_shape[0], W, values_in_shape[2]]
226
227 shape_list = []
228 shape_list.append(values_in_shape.copy())
229 shape_list.append(input_shape.copy())
230
231 return shape_list
232
233 @staticmethod
234 def tgBroadcastFuzz(testGen, op, rank, error_name=None):
235 shape = testGen.makeShape(rank)
236
237 pl, const = op["operands"]
238
239 shape_list = []
240
241 # Choose one of the inputs to broadcast
242 # Note: Simplifies OutputShaper code if we don't change first shape for errors
243 bcast_idx = testGen.randInt(0 if error_name is None else 1, pl + const)
244 for i in range(pl + const):
245 shape_bcast = shape.copy()
246
247 # If the chosen input, pick a random index to broadcast
248 if i == bcast_idx:
249 fuzz_idx = testGen.randInt(0, rank)
250 if error_name == ErrorIf.DimensionMismatch:
251 shape_bcast[fuzz_idx] += 1
252 elif error_name == ErrorIf.RankMismatch:
253 # Add one rank to the shape (or more for rank of 1)
254 extra_ranks = testGen.rng.choice([1, 2, 3]) if rank == 1 else 1
255 shape_bcast = np.concatenate(
256 (shape_bcast, testGen.makeShape(extra_ranks))
257 )
258 if rank != 1:
259 # Either keep the extra rank, or remove it
260 new_len = testGen.rng.choice([-2, len(shape_bcast)])
261 shape_bcast = shape_bcast[:new_len]
262 else:
263 shape_bcast[fuzz_idx] = 1
264
265 shape_list.append(shape_bcast)
266
267 return shape_list
268
269 @staticmethod
270 def tgConv2D(testGen, op, rank, error_name=None):
271 pl, const = op["operands"]
272
273 if error_name != ErrorIf.WrongRank:
274 assert rank == 4
275
276 # IFM dimensions are NHWC
277 ifm_shape = testGen.makeShape(rank)
278
279 # Constrict the batch size?
280 if testGen.args.max_batch_size:
281 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
282
283 # Constrict the overall size of the shape when creating ERROR_IF tests
284 if error_name:
285 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
286 ifm_shape, max_dim=24, max_items=10000
287 )
288
289 # Get the filter height/width from the operator parameters
290 filter_hw = op["filter"]
291
292 # Generate a random OFM depth
293 ofm_depth = testGen.makeShape(1)[0]
294
295 # The filter dimensions are OHWI
296 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
297
298 # The bias is OC
299 bias_shape = np.asarray([ofm_depth])
300
301 return [ifm_shape, filter_shape, bias_shape]
302
303 @staticmethod
304 def tgConv3D(testGen, op, rank, error_name=None):
305 pl, const = op["operands"]
306
307 if error_name != ErrorIf.WrongRank:
308 assert rank == 5
309
310 # IFM dimensions are NDHWC
311 ifm_shape = testGen.makeShape(rank)
312
313 # Constrict the batch size?
314 if testGen.args.max_batch_size:
315 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
316
317 # Constrict the overall size of the shape when creating ERROR_IF tests
318 if error_name:
319 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
320 ifm_shape, max_dim=24, max_items=10000
321 )
322
323 # Get the filter depth/height/width from the operator parameters
324 filter_dhw = op["filter"]
325
326 # Generate a random OFM channel
327 ofm_channel = testGen.makeShape(1)[0]
328
329 # The filter dimensions are ODHWI
330 filter_shape = np.asarray(
331 [ofm_channel, filter_dhw[0], filter_dhw[1], filter_dhw[2], ifm_shape[4]]
332 )
333
334 # The bias is OC
335 bias_shape = np.asarray([ofm_channel])
336
337 return [ifm_shape, filter_shape, bias_shape]
338
339 @staticmethod
340 def tgTransposeConv2D(testGen, op, rank, error_name=None):
341 pl, const = op["operands"]
342
343 if error_name != ErrorIf.WrongRank:
344 assert rank == 4
345
346 # IFM dimensions are NHWC
347 ifm_shape = testGen.makeShape(rank)
348
349 # Constrict the batch size?
350 if testGen.args.max_batch_size:
351 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
352
353 # Constrict the overall size of the shape when creating ERROR_IF tests
354 if error_name:
355 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
356 ifm_shape, max_dim=24, max_items=10000
357 )
358
359 # Get the filter height/width from the operator parameters
360 filter_hw = op["filter"]
361
362 # Generate a random OFM depth
363 ofm_depth = testGen.makeShape(1)[0]
364
365 # The filter dimensions are OHWI
366 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
367
368 # The bias is OC
369 bias_shape = np.asarray([ofm_depth])
370
371 return [ifm_shape, filter_shape, bias_shape]
372
373 @staticmethod
374 def tgDepthwiseConv2D(testGen, op, rank, error_name=None):
375 pl, const = op["operands"]
376
377 if error_name != ErrorIf.WrongRank:
378 assert rank == 4
379 assert pl == 1 and const == 2
380
381 # IFM dimensions are NHWC
382 ifm_shape = testGen.makeShape(rank)
383
384 # Constrict the batch size?
385 if testGen.args.max_batch_size:
386 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
387
388 # Constrict the overall size of the shape when creating ERROR_IF tests
389 if error_name:
390 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
391 ifm_shape, max_dim=24, max_items=10000
392 )
393
394 # Get the filter height/width from the operator parameters
395 # Filter is KH, HW, C, M
396 filter_hw = op["filter"]
397
398 # Generate a random OFM depth, but don't let it get too big because
399 # the output depth is M * C
400 filter_m = (
401 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
402 ) + 1
403
404 # The filter dimensions are HWCM
405 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
406
407 # The bias is M * C
408 bias_shape = np.asarray([ifm_shape[3] * filter_m])
409
410 return [ifm_shape, filter_shape, bias_shape]
411
412 @staticmethod
413 def tgFullyConnected(testGen, op, rank, error_name=None):
414 pl, const = op["operands"]
415
416 if error_name != ErrorIf.WrongRank:
417 assert rank == 2
418
419 input_shape = testGen.makeShape(rank)
420
421 # Constrict the overall size of the shape when creating ERROR_IF tests
422 if error_name:
423 input_shape = TosaErrorIfArgGen.eiRestrictDimensions(input_shape)
424
425 filter_oc = testGen.rng.integers(
426 low=testGen.args.tensor_shape_range[0],
427 high=testGen.args.tensor_shape_range[1],
428 size=1,
429 )[0]
430 filter_shape = np.asarray([filter_oc, input_shape[1]])
431
432 bias_shape = np.asarray([filter_oc])
433
434 return [input_shape, filter_shape, bias_shape]
435
436 @staticmethod
437 def tgMatmul(testGen, op, rank, error_name=None):
438 pl, const = op["operands"]
439
440 if error_name != ErrorIf.WrongRank:
441 assert rank == 3
442 assert pl == 2 and const == 0
443
444 a_shape = testGen.makeShape(rank)
445
446 # Constrict the overall size of the shape when creating ERROR_IF tests
447 if error_name:
448 a_shape = TosaErrorIfArgGen.eiRestrictDimensions(a_shape)
449
450 # Get a random number for b_oc even if target shape is defined
451 b_oc = np.int32(
452 testGen.rng.integers(
453 low=testGen.args.tensor_shape_range[0],
454 high=testGen.args.tensor_shape_range[1],
455 size=1,
456 )
457 )[0]
458 # If N or H is large let b_oc be 1 to reduce output tensor size
459 if max(a_shape) > 1000:
460 b_oc = 1
461
462 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
463 return [a_shape, b_shape]
464
465 @staticmethod
466 def tgConcat(testGen, opName, rank, error_name=None):
467 pl, const = opName["operands"]
468 shape = testGen.makeShape(rank)
469
470 # Create extra tensors to concat.
471 # Take into account value of pl when getting maximum number of concats
472 num_tensors = testGen.randInt(0, 4)
473 shape_list = []
474 for i in range(pl + const + num_tensors):
475 if error_name == ErrorIf.ConcatInputRankMismatch and i != 0:
476 remove = testGen.rng.choice([True, False])
477 wrongShape = shape.copy()
478
479 if remove and len(shape) > 1:
480 wrongShape = wrongShape[1:]
481 else:
482 wrongShape = list(wrongShape)
483 wrongShape.append(testGen.rng.integers(1, 10))
484
485 shape_list.append(wrongShape)
486 else:
487 shape_list.append(shape.copy())
488
489 return shape_list
490
491 @staticmethod
492 def tgConcatConstInput(testGen, shapeList, axis, error_name=None):
493 if error_name in [
494 ErrorIf.AxisSmallerZero,
495 ErrorIf.AxisLargerRank,
496 ErrorIf.ConcatInputRankMismatch,
497 ]:
498 return shapeList
499
500 # Split concat shape along axis to allow for multiple const inputs
501 # without making too many large tensors
502 if len(shapeList) == 2 or shapeList[0][axis] < len(shapeList):
503 # If axis can't be split we still need to invalidate other dimensions
504 if error_name == ErrorIf.ConcatInputDimMismatch:
505 for shape in shapeList[1:]:
506 # Negative test shapeLists are created individually for each test,
507 # so no need to copy the shape before altering it.
508 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
509 return shapeList
510
511 # Create copy of shape we are going to split (so we don't alter shapeList)
512 shape = shapeList[0].copy()
513 # Add original shape as first input
514 new_shapeList = [shape.copy()]
515 length_on_axis = shape[axis]
516 remaining_length = length_on_axis
517 for i in range(len(shapeList) - 2):
518 # Calculate split on axis and remaining value
519 split_shape_val = int(shape[axis] / 2)
520 remaining_length = remaining_length - split_shape_val
521
522 # Append new shape, and set remaining shape
523 shape[axis] = split_shape_val
524 new_shapeList.append(shape.copy())
525
526 # invalidate dimensions
527 if error_name == ErrorIf.ConcatInputDimMismatch:
528 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
529 else:
530 shape[axis] = remaining_length
531
532 if i == len(shapeList) - 3:
533 new_shapeList.append(shape.copy())
534
535 return new_shapeList
536
537
538class TosaTensorValuesGen:
539 """Tensor Value generators create the random data for each test."""
540
541 def __init__(self):
542 pass
543
544 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000545 def tvgDefault(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100546 pCount, cCount = op["operands"]
547
548 tens = []
549 tens.extend(
550 testGen.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
551 )
552 tens.extend(testGen.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
553
554 return tens
555
556 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000557 def tvgNegate(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson0e463642022-05-03 12:10:23 +0100558 if dtypeList[0] == DType.INT32 and error_name is None:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100559 pCount, cCount = op["operands"]
560 assert (
561 pCount == 1 and cCount == 0
562 ), "Op.NEGATE must have 1 placeholders, 0 consts"
Jeremy Johnson0e463642022-05-03 12:10:23 +0100563 # Must create tensors with values within accumulator (int32) negatable
564 # range
565 max_val = (1 << 31) - 1
566 min_val = -max_val
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100567 arr = np.int32(
568 testGen.rng.integers(low=min_val, high=(max_val + 1), size=shapeList[0])
569 )
570 placeholders = []
571 placeholders.append(
572 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], arr)
573 )
574 return placeholders
575 else:
576 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000577 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100578 )
579
580 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000581 def tvgAddSub(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100582 if dtypeList[0] == DType.INT32 and error_name is None:
583 # Make sure the operation does not cause value saturation - where
584 # the number wraps due to limited number of bits to store the answer
585 pCount, cCount = op["operands"]
586 assert (
587 pCount == 2 and cCount == 0
588 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts"
589 placeholders = []
590 add = op["op"] == Op.ADD
591 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
592 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
593 if add:
594 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
595 else:
596 res_arr = np.subtract(a_arr, b_arr, dtype=np.int64)
597
598 # Work out the saturation limits
599 max_i32 = (1 << 31) - 1
600 min_i32 = -(1 << 31)
601 max_arr = np.full(shapeList[1], max_i32)
602 min_arr = np.full(shapeList[1], min_i32)
603
604 # Find how much values exceed the maximum/minimums
605 sat_max_arr = np.maximum(res_arr - max_arr, 0)
606 sat_min_arr = np.minimum(res_arr - min_arr, 0)
607
608 if not add:
609 # Swap saturation values and negate values as we need to perform opposite operations
610 sat_max_arr, sat_min_arr = -sat_min_arr, -sat_max_arr
611
612 # Create new array of unsaturated values by clipping values as needed
613 b_unsat_arr = b_arr
614 if (sat_max_arr != 0).any():
615 # Clip values that cause saturation
616 b_unsat_arr = np.subtract(b_unsat_arr, sat_max_arr, dtype=np.int32)
617 # Reduce axes in unsaturated tensor to match original tensor
618 for axis, dim in enumerate(b_arr.shape):
619 if dim != b_unsat_arr.shape[axis]:
620 assert (
621 dim == 1
622 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
623 b_unsat_arr = np.amin(b_unsat_arr, axis=axis, keepdims=True)
624
625 if (sat_min_arr != 0).any():
626 # Clip values that cause saturation
627 b_unsat_arr = np.subtract(b_unsat_arr, sat_min_arr, dtype=np.int32)
628 # Reduce axes in unsaturated tensor to match original tensor
629 for axis, dim in enumerate(b_arr.shape):
630 if dim != b_unsat_arr.shape[axis]:
631 assert (
632 dim == 1
633 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
634 b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True)
635
636 placeholders.append(
637 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
638 )
639 placeholders.append(
640 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr)
641 )
642
643 return placeholders
644 else:
645 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000646 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100647 )
648
649 @staticmethod
650 def tvgCondIfWhileLoop(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000651 testGen, op, dtypeList, shapeList, testArgs, error_name=None
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100652 ):
653 if dtypeList[0] in (
654 DType.INT32,
655 DType.INT16,
656 DType.INT8,
657 ):
658 # Limit input tensors with cond_if_binary or while_loop to stop
659 # saturation of add/sub ops with int32 and keep all logical shift
660 # values between 0 to 31 for int16 or int8
661 pCount, cCount = op["operands"]
662 pRemain = pCount
663 placeholders = []
664 for idx, shape in enumerate(shapeList[:]):
665 if dtypeList[0] == DType.INT32:
666 arr = testGen.getRandTensor(shapeList[idx], DType.INT16)
667 else:
668 arr = np.int32(
669 testGen.rng.integers(low=0, high=32, size=shapeList[idx])
670 )
671 if pRemain > 0:
672 placeholders.append(
673 testGen.ser.addPlaceholder(shape, dtypeList[idx], arr)
674 )
675 pRemain -= 1
676 else:
677 placeholders.append(
678 testGen.ser.addConst(shape, dtypeList[idx], arr)
679 )
680
681 return placeholders
682 else:
683 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000684 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100685 )
686
687 @staticmethod
688 def tvgArithmeticRightShift(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000689 testGen, op, dtypeList, shapeList, testArgs, error_name=None
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100690 ):
691 pCount, cCount = op["operands"]
692 # Force value of operand[1] to be within [0, num_bits]
693 assert (
694 pCount == 2 and cCount == 0
695 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
696
697 placeholders = []
698 for idx, shape in enumerate(shapeList[:]):
699 if idx == 1:
700 if dtypeList[idx] == DType.INT8:
701 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
702 elif dtypeList[idx] == DType.INT16:
703 arr = np.int32(testGen.rng.integers(low=0, high=16, size=shape))
704 elif dtypeList[idx] == DType.INT32:
705 arr = np.int32(testGen.rng.integers(low=0, high=32, size=shape))
706 elif error_name == ErrorIf.WrongInputType:
707 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
708 else:
709 raise Exception("OpArithmeticRightShift: invalid input dtype")
710 else:
711 arr = testGen.getRandTensor(shape, dtypeList[idx])
712 placeholders.append(testGen.ser.addPlaceholder(shape, dtypeList[idx], arr))
713
714 return placeholders
715
716 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000717 def tvgSelect(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100718 # Set datatype of condition tensor to boolean
719 dtypeList[0] = DType.BOOL
720
721 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000722 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100723 )
724
725 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000726 def tvgIntDiv(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100727 if error_name is None:
728 pCount, cCount = op["operands"]
729 assert (
730 pCount == 2 and cCount == 0
731 ), "Op.INTDIV must have 2 placeholders, 0 consts"
732
733 placeholders = []
734
735 # Two invalid cases for Op.INTDIV:
736 # 1. divisor == 0
737 # 2. dividend == -(1<<31) and divisor == -1
738 while True:
739 dividend_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
740 divisor_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
741
742 if (divisor_arr == 0).any():
743 continue
744
745 if (dividend_arr == -(2**31)).any() and (divisor_arr == -1).any():
746 continue
747
748 break
749
750 placeholders.append(
751 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
752 )
753 placeholders.append(
754 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
755 )
756
757 return placeholders
758 else:
759 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000760 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100761 )
762
763 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000764 def tvgMul(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100765 if error_name is None:
766 pCount, cCount = op["operands"]
767 assert (
768 pCount == 2 and cCount == 0
769 ), "Op.MUL must have 2 placeholders, 0 consts"
770
771 tens = []
772 if dtypeList[0] == DType.FLOAT:
773 tens.extend(testGen.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
774 else:
775 placeholders = []
776
777 # Make sure multiply result in int32 range
778 shift = testArgs[0]
779 if dtypeList[0] == DType.INT8:
780 num_bits = 8
781 elif dtypeList[0] == DType.INT16:
782 num_bits = 16
783 elif dtypeList[0] == DType.INT32:
784 num_bits = 32
785 elif error_name == ErrorIf.WrongInputType:
786 num_bits = 8
787 else:
788 raise Exception("OpMul: invalid input dtype")
789
790 for idx, shape in enumerate(shapeList[:]):
791 low = -(2 ** (num_bits - 1))
792 high = (2 ** (num_bits - 1)) - 1
793
794 a_arr = np.int32(
795 testGen.rng.integers(low=low, high=high, size=shapeList[0])
796 )
797 b_arr = np.int32(
798 testGen.rng.integers(low=low, high=high, size=shapeList[1])
799 )
800
801 i = 0
802 while True:
803
804 a_arr_64 = a_arr.astype(np.int64)
805 b_arr_64 = b_arr.astype(np.int64)
806
807 if shift > 0:
808 rounding = 1 << (shift - 1)
809 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
810 else:
811 result_arr = a_arr_64 * b_arr_64
812
813 if (result_arr > -(2**31)).all() and (
814 result_arr <= ((2**31) - 1)
815 ).all():
816 break
817
818 i = i + 1
819 a_arr = a_arr // 2
820 b_arr = b_arr // 2
821
822 placeholders.append(
823 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
824 )
825 placeholders.append(
826 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
827 )
828
829 tens.extend(placeholders)
830
831 return tens
832 else:
833 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000834 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100835 )
836
837 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000838 def tvgConcat(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100839 count = len(shapeList) - testGen.args.num_const_inputs_concat
840 if count < 1:
841 count = 1
842 if testGen.args.num_const_inputs_concat == 0:
843 count = len(shapeList)
844
845 # Ensure axis is an int
846 testArgs[0] = int(testArgs[0])
847
848 shapeList = TosaTensorGen.tgConcatConstInput(
849 testGen, shapeList, testArgs[0], error_name
850 )
851
852 tens = []
853 tens.extend(
854 testGen.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
855 )
856 tens.extend(testGen.buildConstTensors(shapeList[count:], dtypeList[count:]))
857
858 return tens
859
860 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000861 def tvgLogicalShift(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100862 pCount, cCount = op["operands"]
863 assert (
864 pCount == 2 and cCount == 0
865 ), "Op.LOGICAL_LEFT_SHIFT or Op.LOGICAL_RIGHT_SHIFT must have 2 placeholders, 0 consts"
866 values_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
867 shift_arr = np.int32(testGen.rng.integers(low=0, high=32, size=shapeList[1]))
868 placeholders = []
869 placeholders.append(
870 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
871 )
872 placeholders.append(
873 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], shift_arr)
874 )
875
876 return placeholders
877
878 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000879 def tvgEqual(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100880 if error_name is None:
881 pCount, cCount = op["operands"]
882 assert (
883 pCount == 2 and cCount == 0
884 ), "Op.EQUAL must have 2 placeholders, 0 consts"
885 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
886 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
887 # Using random numbers means that it will be very unlikely that
888 # there are any matching (equal) values, therefore force that
889 # there are twice the number of matching values as the tensor rank
890 for num in range(0, len(shapeList[0]) * 2):
891 a_index = []
892 b_index = []
893 # Choose an index in each axis for the whole shape
894 for axis in range(0, len(shapeList[0])):
895 # Index can be up to the largest dimension in both shapes
896 index = np.int32(
897 testGen.rng.integers(
898 0, max(shapeList[0][axis], shapeList[1][axis])
899 )
900 )
901 # Reduce the index down to a shape's dim for broadcasting
902 a_index.append(min(shapeList[0][axis] - 1, index))
903 b_index.append(min(shapeList[1][axis] - 1, index))
904
905 a_arr[tuple(a_index)] = b_arr[tuple(b_index)]
906
907 placeholders = []
908 placeholders.append(
909 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
910 )
911 placeholders.append(
912 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
913 )
914 return placeholders
915 else:
916 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000917 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100918 )
919
920 @staticmethod
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000921 def tvgReduceSum(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100922 if dtypeList[0] == DType.INT32:
923 pCount, cCount = op["operands"]
924 assert (
925 pCount == 1 and cCount == 0
926 ), "Op.REDUCE_SUM must have 1 placeholders, 0 consts"
927 # Limit values so that the sum cannot exceed the range of an int32 during
928 # summation of any axis
929 range_val = int((1 << 31) / max(shapeList[0]))
930 values_arr = np.int32(
931 testGen.rng.integers(low=-range_val, high=range_val, size=shapeList[0])
932 )
933 placeholders = []
934 placeholders.append(
935 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
936 )
937 return placeholders
938 else:
939 return TosaTensorValuesGen.tvgDefault(
Eric Kunzeb5fabec2022-06-07 05:20:44 +0000940 testGen, op, dtypeList, shapeList, testArgs, error_name
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100941 )
942
943
944class TosaArgGen:
945 """Argument generators create exhaustive or random lists of attributes for
946 operators that take attributes or other parameters.
947
948 The return value is a list of (descriptive_name, [arglist]) tuples where
949 the descriptive_name is appended to the test name and the arglist is expanded
950 as arguments to the operator build function.
951 """
952
953 def __init__(self):
954 pass
955
956 @staticmethod
957 def agNone(testGen, opName, shapeList, dtype, error_name=None):
958 """A trivial argument generator for operators that don't take any
959 non-tensor arguments"""
960 return [("", [])]
961
962 @staticmethod
963 def agAxis(testGen, opName, shapeList, dtype, error_name=None):
964 """Build the axis argument for operators that take a single axis"""
965 axes = []
966 shape = shapeList[0]
967
968 if error_name == ErrorIf.AxisSmallerZero:
969 small_axis = testGen.rng.integers(-5, 0)
970 axes.append(("axis{}".format(small_axis), [small_axis]))
971 elif error_name == ErrorIf.AxisLargerRank:
972 large_axis = testGen.rng.integers(len(shape) + 1, len(shape) + 10)
973 axes.append(("axis{}".format(large_axis), [large_axis]))
974 else:
975 for a in range(0, len(shape)):
976 axes.append(("axis{}".format(a), [a]))
977
978 return axes
979
980 @staticmethod
981 def agConv(testGen, opName, shapeList, dtype, error_name=None):
982 arg_list = []
983
984 ifm_shape = shapeList[0]
985 filter_shape = shapeList[1]
986 # determine the kernel shape from operator name (e.g. "conv2d_3x3" => [3,3])
987 k = [int(x) for x in opName.split("_")[-1].split("x")]
988
989 # Check the rank
990 rank = 5 if opName.startswith("conv3d") else 4
991 if error_name != ErrorIf.WrongRank:
992 assert len(ifm_shape) == rank
993 assert len(filter_shape) == rank
994
995 # kernel rank omits batch and channels
996 k_rank = rank - 2
997 assert len(k) == k_rank
998
999 # Generate comprehensive argument lists
1000 # - except for named errors, which use specific invalid value(s)
1001 if error_name == ErrorIf.PadSmallerZero:
1002 p_vals = [testGen.rng.choice(range(-5, 0))]
1003 else:
1004 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
1005 paddings = {x for x in itertools.product(*([p_vals] * k_rank * 2))}
1006 if error_name == ErrorIf.StrideSmallerOne:
1007 # Can't use stride=0, as it is used to derive output shape, as a divisor
1008 s_vals = [testGen.rng.choice(range(-5, 0))]
1009 else:
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001010 # Stride must be greater than 1 to force non-integer error
1011 startStride = 1 if error_name != ErrorIf.PoolingOutputShapeNonInteger else 2
1012 s_vals = [x for x in range(startStride, testGen.args.max_conv_stride + 1)]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001013 strides = {x for x in itertools.product(*([s_vals] * k_rank))}
1014 if error_name == ErrorIf.DilationSmallerOne:
1015 d_vals = [testGen.rng.choice(range(-5, 1))]
1016 else:
1017 d_vals = [x for x in range(1, testGen.args.max_conv_dilation + 1)]
1018 dilations = {x for x in itertools.product(*([d_vals] * k_rank))}
1019
1020 if not error_name and testGen.args.oversize:
1021 # add some oversize argument values
1022 if max(ifm_shape) < 64:
1023 bigPadding = 9
1024 paddings.update(
1025 {x for x in itertools.product(*([[0, bigPadding]] * (k_rank * 2)))}
1026 )
1027 bigStride = 8
1028 strides.update({x for x in itertools.product(*([[1, bigStride]] * k_rank))})
1029 bigDilation = 7
1030 dilations.update(
1031 {x for x in itertools.product(*([[1, bigDilation]] * k_rank))}
1032 )
1033
1034 # There are too many parameter combinations, so generate them sparsely,
1035 # very sparse for negative tests
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001036 sparsity_factor = 2 if error_name else 120
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001037 sparsity = len(paddings) * len(strides) * len(dilations) // sparsity_factor + 1
1038 # If there are only a small number of tests, just select them all
1039 if sparsity < 13:
1040 sparsity = 1
1041 # To get a variety of parameter combinations sparsity should not be a
1042 # multiple of 2, 3 or 5
1043 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1044 sparsity += 1
1045
1046 n = 0
1047 for s in sorted(list(strides)):
1048 for p in sorted(list(paddings)):
1049 for d in sorted(list(dilations)):
1050 if (
1051 n % sparsity == 0
1052 # padding must not exceed the kernel size ?
1053 # and p[0] < k[0] and p[1] < k[0]
1054 # and p[2] < k[1] and p[3] < k[1]
1055 # and (k_rank < 3 or (p[4] < k[2] and p[5] < k[2]))
1056 # the padded shape must exceed the kernel size
1057 and (ifm_shape[1] + p[0] + p[1]) > k[0]
1058 and (ifm_shape[2] + p[2] + p[3]) > k[1]
1059 and (k_rank < 3 or ((ifm_shape[3] + p[4] + p[5]) > k[2]))
1060 # the padded shape must exceed the dilation
1061 and (ifm_shape[1] + p[0] + p[1]) > d[0]
1062 and (ifm_shape[2] + p[2] + p[3]) > d[1]
1063 and (k_rank < 3 or ((ifm_shape[3] + p[4] + p[5]) > d[2]))
1064 ):
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001065 remainders = []
1066 for index in range(k_rank):
1067 pad_offset = index * 2
1068 remainders.append(
1069 (
1070 ifm_shape[index + 1]
1071 - 1
1072 + p[pad_offset]
1073 + p[pad_offset + 1]
1074 - (k[index] - 1) * d[index]
1075 )
1076 % s[index]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001077 )
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001078 if (
1079 # the parameters must produce integer exact output
1080 error_name != ErrorIf.ConvOutputShapeNonInteger
1081 and max(remainders) == 0
1082 ) or (
1083 error_name == ErrorIf.ConvOutputShapeNonInteger
1084 and max(remainders) > 0
1085 ):
1086 arg_list.append(
1087 (
1088 "st{}_pad{}_dilat{}".format(
1089 "".join([str(x) for x in s]),
1090 "".join([str(x) for x in p]),
1091 "".join([str(x) for x in d]),
1092 ),
1093 [s, p, d],
1094 )
1095 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001096 n += 1
1097
1098 return arg_list
1099
1100 @staticmethod
1101 def agTransposeConv2D(testGen, opName, shapeList, dtype, error_name=None):
1102 arg_list = []
1103
1104 ifm_shape = shapeList[0]
1105 filter_shape = shapeList[1]
1106
1107 # Must be rank 4
1108 if error_name != ErrorIf.WrongRank:
1109 assert len(ifm_shape) == 4
1110 assert len(filter_shape) == 4
1111
1112 # Generate comprehensive argument lists
1113 # - except for named errors, which use specific invalid value(s)
1114 if error_name == ErrorIf.PadSmallerZero:
1115 p_vals = [testGen.rng.choice(range(-5, 0))]
1116 else:
1117 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001118 paddings = {x for x in itertools.product(*([p_vals] * 4))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001119 if error_name == ErrorIf.StrideSmallerOne:
1120 # Can't use stride=0, as it is used to derive output shape, as a divisor
1121 s_vals = [testGen.rng.choice(range(-5, 0))]
1122 else:
1123 s_vals = [x for x in range(1, testGen.args.max_conv_stride + 1)]
1124 strides = {x for x in itertools.product(*([s_vals] * 2))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001125
Jeremy Johnson5860df62022-05-04 15:30:58 +01001126 if not error_name and testGen.args.oversize:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001127 # add some oversize argument values
1128 if max(ifm_shape) < 64:
1129 bigPadding = 9
1130 paddings.update(
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001131 {x for x in itertools.product(*([[0, bigPadding]] * 4))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001132 )
1133 bigStride = 8
1134 strides.update({x for x in itertools.product(*([[1, bigStride]] * 2))})
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001135
1136 # There are too many parameter combinations, so generate them sparsely,
1137 # very sparse for negative tests
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001138 sparsity_factor = 2 if error_name else 10
TatWai Chong24594f52022-06-08 00:48:04 -07001139 sparsity = len(paddings) * len(strides) // sparsity_factor + 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001140 # If there are only a small number of tests, just select them all
1141 if sparsity < 13:
1142 sparsity = 1
1143 # To get a variety of parameter combinations sparsity should not be a
1144 # multiple of 2, 3 or 5
1145 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1146 sparsity += 1
1147
1148 n = 0
1149 for s in sorted(list(strides)):
1150 for p in sorted(list(paddings)):
TatWai Chong24594f52022-06-08 00:48:04 -07001151 if n % sparsity == 0:
1152 # Determine the output shape
1153 oh = (ifm_shape[1] - 1) * s[0] - p[0] - p[1] + filter_shape[1]
1154 ow = (ifm_shape[2] - 1) * s[1] - p[2] - p[3] + filter_shape[2]
1155 os = [ifm_shape[0], oh, ow, filter_shape[0]]
1156 arg_list.append(
1157 (
1158 "st{}_pad{}_os{}".format(
1159 "".join([str(x) for x in s]),
1160 "".join([str(x) for x in p]),
1161 "x".join([str(x) for x in os]),
1162 ),
1163 [s, p, os],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001164 )
TatWai Chong24594f52022-06-08 00:48:04 -07001165 )
1166 n += 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001167
1168 return arg_list
1169
1170 @staticmethod
1171 def agPad(testGen, opName, shapeList, dtype, error_name=None):
1172 arg_list = []
1173 rank = len(shapeList[0])
1174
1175 # Exhaustively test combinations of padding on each side of each dimension
1176 # - the range of padding values is defined by pad_min and pad_max
1177 # - for padding >9, the name format needs to be more distinctive
1178 pad_min, pad_max = 0, 1
1179 pad_values = [x for x in range(pad_min, pad_max + 1)]
1180 if error_name == ErrorIf.PadSmallerZero:
1181 pad_values = [x for x in range(-2, 0)]
1182 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
1183 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
1184
1185 if dtype in [DType.BOOL, DType.INT8, DType.INT16, DType.INT32]:
1186 pad_const_int = testGen.getRandNumberDType(dtype)
1187 pad_const_fp = 0
1188 elif dtype == DType.FLOAT:
1189 pad_const_int = 0
1190 pad_const_fp = testGen.getRandNumberDType(dtype)
1191 else:
1192 return []
1193
1194 for paddings in shape_pad_values:
1195 name = "pad"
1196 for r in range(rank):
1197 before, after = paddings[r]
1198 name = f"{name}{before}{after}"
1199 arg_list.append((name, [np.array(paddings), pad_const_int, pad_const_fp]))
1200
1201 return arg_list
1202
1203 @staticmethod
1204 def agPooling(testGen, opName, shapeList, dtype, error_name=None):
1205 arg_list = []
1206
1207 shape = shapeList[0]
1208 if error_name != ErrorIf.WrongRank:
1209 assert len(shape) == 4
1210
1211 # Generate comprehensive argument lists
1212 p_vals = [x for x in range(0, testGen.args.max_pooling_padding + 1)]
1213 paddings = {x for x in itertools.product(*([p_vals] * 4))}
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001214 # Stride must be greater than 1 to force non-integer error
1215 startStride = 1 if error_name != ErrorIf.PoolingOutputShapeNonInteger else 2
1216 s_vals = [x for x in range(startStride, testGen.args.max_pooling_stride + 1)]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001217 strides = {x for x in itertools.product(*([s_vals] * 2))}
1218 k_vals = [x for x in range(2, testGen.args.max_pooling_kernel + 1)]
1219 kernels = {x for x in itertools.product(*([k_vals] * 2))}
1220
1221 if testGen.args.oversize:
1222 # add some oversize argument values
1223 bigStride = 7
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001224 strides.update(
1225 {x for x in itertools.product(*([[startStride, bigStride]] * 2))}
1226 )
1227 bigKernel = 9
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001228 kernels.update({x for x in itertools.product(*([[2, bigKernel]] * 2))})
1229 if max(shape) < 64:
1230 # padding must be less than the kernel size
1231 bigPadding = bigKernel - 1
1232 paddings.update(
1233 {x for x in itertools.product(*([[0, bigPadding]] * 4))}
1234 )
1235
1236 # There are too many parameter combinations, so generate them sparsely,
1237 # very sparse for negative tests
1238 sparsity_factor = 2 if error_name else 500
1239 sparsity = len(paddings) * len(strides) * len(kernels) // sparsity_factor + 1
1240
1241 n = 0
1242 for s in sorted(list(strides)):
1243 for p in sorted(list(paddings)):
1244 for k in sorted(list(kernels)):
1245 if error_name in [
1246 ErrorIf.StrideSmallerOne,
1247 ErrorIf.KernelSmallerOne,
1248 ErrorIf.PadSmallerZero,
1249 ErrorIf.PadLargerEqualKernel,
1250 ]:
1251 sNew, pNew, kNew = TosaErrorIfArgGen.eiPoolingErrorIf(
1252 testGen, error_name, s, p, k
1253 )
1254 if None not in [sNew, pNew, kNew] and n % sparsity == 0:
1255 arg_list.append(
1256 (
1257 "st{}_kern{}_pad{}".format(
1258 "".join([str(x) for x in sNew]),
1259 "".join([str(x) for x in kNew]),
1260 "".join([str(x) for x in pNew]),
1261 ),
1262 [sNew, pNew, kNew],
1263 )
1264 )
1265 elif (
1266 n % sparsity == 0
1267 # padding must not exceed the kernel size
1268 and p[0] < k[0]
1269 and p[1] < k[0]
1270 and p[2] < k[1]
1271 and p[3] < k[1]
1272 # the padded shape must exceed the kernel size
1273 and (shape[1] + p[0] + p[1]) > k[0]
1274 and (shape[2] + p[2] + p[3]) > k[1]
1275 ):
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001276 remainder_h = (shape[1] + p[0] + p[1] - k[0]) % s[0]
1277 remainder_w = (shape[2] + p[2] + p[3] - k[1]) % s[1]
1278 if (
1279 # the parameters must produce integer exact output
1280 error_name != ErrorIf.PoolingOutputShapeNonInteger
1281 and remainder_h == 0
1282 and remainder_w == 0
1283 ) or (
1284 error_name == ErrorIf.PoolingOutputShapeNonInteger
1285 and (remainder_h != 0 or remainder_w != 0)
1286 ):
1287 arg_list.append(
1288 (
1289 "st{}_kern{}_pad{}".format(
1290 "".join([str(x) for x in s]),
1291 "".join([str(x) for x in k]),
1292 "".join([str(x) for x in p]),
1293 ),
1294 [s, p, k],
1295 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001296 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001297 n += 1
1298
1299 return arg_list
1300
1301 @staticmethod
1302 def agCast(testGen, opName, shapeList, inDtype, error_name=None):
1303 arg_list = []
1304
1305 # Enumerate the output types here
1306 if error_name == ErrorIf.WrongOutputType:
1307 dtypeList = TosaErrorIfArgGen.eiCastErrorIf(testGen, inDtype)
1308 elif inDtype == DType.INT8:
1309 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
1310 elif inDtype == DType.INT16:
1311 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
1312 elif inDtype == DType.INT32:
1313 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
1314 elif inDtype == DType.BOOL:
1315 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
1316 elif inDtype == DType.FLOAT:
1317 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
1318 elif error_name == ErrorIf.WrongInputType:
1319 # Pick some potentially correct output type for incorrect input type
1320 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
1321 else:
1322 raise Exception("Unexpected input dtype: {}".format(inDtype))
1323
1324 for dtype in dtypeList:
1325 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
1326
1327 return arg_list
1328
1329 @staticmethod
1330 def agRescale(testGen, opName, shapeList, inDtype, error_name=None):
1331 arg_list = []
1332
1333 # Enumerate the output types here
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001334 for outDtype in [
1335 DType.UINT8,
1336 DType.INT8,
1337 DType.INT16,
1338 DType.INT32,
1339 DType.UINT16,
1340 ]:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001341 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001342 outDtype in [DType.UINT8, DType.INT8, DType.UINT16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001343 and error_name == ErrorIf.OutputZeroPointNotZero
1344 ):
1345 continue
1346 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001347 outDtype != DType.UINT16
1348 and error_name == ErrorIf.U16OutputZeroPointNotValid
1349 ) or (
1350 inDtype != DType.UINT16
1351 and error_name == ErrorIf.U16InputZeroPointNotValid
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001352 ):
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001353 # ErrorIfs only valid with UINT16
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001354 continue
1355 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001356 inDtype == DType.UINT8
1357 and outDtype not in [DType.INT8, DType.INT16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001358 and error_name != ErrorIf.WrongOutputType
1359 ):
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001360 # The only output dtypes for UINT8 are INT8/INT16, skip all others
1361 continue
1362 if (
1363 inDtype not in [DType.INT8, DType.INT16]
1364 and outDtype == DType.UINT8
1365 and error_name != ErrorIf.WrongOutputType
1366 ):
1367 # The only input dtypes for UINT8 are INT8/INT16, skip all others
1368 continue
1369 if (
1370 inDtype == DType.UINT16
1371 and outDtype != DType.INT16
1372 and error_name != ErrorIf.WrongOutputType
1373 ):
1374 # The only output dtype for UINT16 is INT16, skip all others
1375 continue
1376 if (
1377 inDtype != DType.INT16
1378 and outDtype == DType.UINT16
1379 and error_name != ErrorIf.WrongOutputType
1380 ):
1381 # The only input dtype for UINT16 is INT16, skip all others
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001382 continue
1383 if (
1384 error_name == ErrorIf.WrongOutputType
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001385 and not TosaErrorIfArgGen.eiRescaleWrongOutputType(inDtype, outDtype)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001386 ):
1387 continue
1388
1389 for scale32 in [False, True]:
1390 if error_name == ErrorIf.ScaleTrue and not scale32:
1391 continue
1392 elif error_name == ErrorIf.ScaleNotTrue and scale32:
1393 continue
1394 for double_round in [False, True]:
1395 if error_name == ErrorIf.ScaleNotTrue and not double_round:
1396 continue
1397 for per_channel in [False, True]:
1398
1399 if (
1400 inDtype == DType.INT48
1401 and scale32
1402 and error_name != ErrorIf.ScaleTrue
1403 ):
1404 # Illegal condition. Must be scale32=False
1405 continue
1406 if (
1407 double_round
1408 and not scale32
1409 and error_name != ErrorIf.ScaleNotTrue
1410 ):
1411 # Illegal condition. ERROR_IF(!scale32 && double_round)
1412 continue
1413
1414 arg_list.append(
1415 (
1416 "out{}_sc{}_dr{}_pc{}".format(
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001417 DTypeNames[outDtype],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001418 int(scale32),
1419 int(double_round),
1420 int(per_channel),
1421 ),
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001422 [outDtype, scale32, double_round, per_channel],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001423 )
1424 )
1425
1426 return arg_list
1427
1428 @staticmethod
1429 def agMul(testGen, opName, shapeList, dtype, error_name=None):
1430 arg_list = []
1431
1432 if dtype is DType.INT32:
1433 for p in range(testGen.args.num_rand_permutations):
1434
1435 shift = testGen.randInt(0, 32)
1436
1437 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
1438 else:
1439 arg_list.append(("perm0_shift0", [0]))
1440
1441 return arg_list
1442
1443 @staticmethod
1444 def agArithmeticRightShift(testGen, opName, shapeList, dtype, error_name=None):
1445 arg_list = []
1446
1447 arg_list.append(("roundTrue", [True]))
1448 arg_list.append(("roundFalse", [False]))
1449
1450 return arg_list
1451
1452 # Helper function for reshape. Gets some factors of a larger number.
1453 @staticmethod
1454 def getFactors(val, start=1):
1455 factors = []
1456
1457 for i in range(start, int(np.sqrt(val)) + 1):
1458 if (val % i) == 0:
1459 factors.append(i)
1460
1461 return factors
1462
1463 @staticmethod
1464 def agReshape(testGen, opName, shapeList, dtype, error_name=None):
1465 arg_list = []
1466
1467 origShape = shapeList[0]
1468
1469 totalElements = 1
1470 for s in origShape:
1471 totalElements *= s
1472
1473 # This code is NOT fast. Fortunately, the numbers are fairly small.
1474 factors = TosaArgGen.getFactors(totalElements)
1475
1476 for p in range(testGen.args.num_rand_permutations):
1477 newRank = testGen.randInt(1, 7)
1478 if len(factors) < newRank:
1479 continue
1480
1481 found = True
1482 # escape_counter breaks while loop if it continues on for too long
1483 escape_counter = 0
1484 while found:
1485 newShape = []
1486 # Generate newShape ensuring it isn't a duplicate
1487 remainingElements = totalElements
1488 shuffledFactors = testGen.rng.permutation(factors)
1489 for i in range(1, newRank):
1490 # pick rank-1 factors
1491 newShape.append(shuffledFactors[0])
1492 remainingElements = remainingElements // shuffledFactors[0]
1493 shuffledFactors = testGen.rng.permutation(
1494 TosaArgGen.getFactors(remainingElements)
1495 )
1496 newShape.append(remainingElements)
1497
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001498 # Check for duplicates
1499 found = False
1500 for name, other_shape in arg_list:
1501 if other_shape[0] == newShape:
1502 found = True
1503 break
1504
1505 escape_counter += 1
1506 if escape_counter >= 100:
1507 break
1508
1509 if not found:
1510 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
1511
1512 return arg_list
1513
1514 @staticmethod
1515 def agTranspose(testGen, opName, shapeList, dtype, error_name=None):
1516 arg_list = []
1517
1518 ifm_shape = shapeList[0]
1519
1520 if error_name == ErrorIf.IndexOutsideBounds:
1521 incorrect_large_index = range(len(ifm_shape) + 1, 2 * len(ifm_shape) + 1)
1522 incorrect_small_index = range(-len(ifm_shape), 0)
1523 permutations = [p for p in itertools.permutations(incorrect_large_index)]
1524 permutations.extend(
1525 [p for p in itertools.permutations(incorrect_small_index)]
1526 )
1527 elif error_name == ErrorIf.IndexUsedTwice:
1528 # Create list with a duplicated index
1529 perm_range = list(range(len(ifm_shape)))
1530 index_choice = testGen.rng.choice(range(len(perm_range)))
1531 perm_range[(index_choice + 1) % len(perm_range)] = perm_range[index_choice]
1532 permutations = [p for p in itertools.permutations(perm_range)]
1533
1534 else:
1535 # Get all permutations
1536 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
1537
1538 # Limit to possible permutations from shape dimension or argument setting
1539 limit = min(len(permutations), testGen.args.num_rand_permutations)
1540
1541 # Get random permutation generator that uses all permutations
1542 random_permutations = testGen.rng.permutation(permutations)
1543
1544 # Create list of required amount of permutations
1545 arg_list = [
1546 ("perm{}".format(p), [random_permutations[p].tolist()])
1547 for p in range(limit)
1548 ]
1549 return arg_list
1550
1551 @staticmethod
1552 def agSlice(testGen, opName, shapeList, dtype, error_name=None):
1553 arg_list = []
1554
1555 ifm_shape = shapeList[0]
1556 rank = len(ifm_shape)
1557
1558 for p in range(testGen.args.num_rand_permutations):
1559 start = []
1560 size = []
1561
1562 valid = True
1563
1564 for i in range(rank):
1565 if ifm_shape[i] > 1:
1566 start.append(testGen.randInt(0, ifm_shape[i]))
1567 size.append(testGen.randInt(0, ifm_shape[i] - start[i]))
1568
1569 # Invalid slice size?
1570 if size[i] == 0:
1571 valid = False
1572 else:
1573 start.append(0)
1574 size.append(1)
1575
1576 if valid:
1577 # If ERROR_IF test required then incorrect start, size will be returned
1578 start, size = TosaErrorIfArgGen.eiSliceErrorIf(
1579 testGen, error_name, ifm_shape, start, size
1580 )
1581 arg_list.append(("perm{}".format(p), [start, size]))
1582 return arg_list
1583
1584 @staticmethod
1585 def agTile(testGen, opName, shapeList, dtype, error_name=None):
1586 arg_list = []
1587
1588 ifm_shape = shapeList[0]
1589 rank = len(ifm_shape)
1590
1591 for p in range(testGen.args.num_rand_permutations):
1592
1593 # Pick a few random, but small multiple values
1594 # because otherwise this has a tendency to generate
1595 # enormous tensors
1596 multiples = []
1597 for i in range(rank):
1598 if ifm_shape[i] > 1000:
1599 # Multiple of 1 if ifm_shape dimension is large to reduce
1600 # tensor size
1601 multiples.append(1)
1602 elif max(ifm_shape) > 1000:
1603 multiples.append(2)
1604 else:
1605 multiples.append(testGen.randInt(1, 4))
1606 arg_list.append(("perm{}".format(p), [multiples]))
1607
1608 return arg_list
1609
1610 @staticmethod
1611 def agResize(testGen, opName, shapeList, dtype, error_name=None):
1612 arg_list = []
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001613 ifm_shape = shapeList[0]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001614
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001615 def get_aspect_ratio_resize_params():
1616 common_aspect_ratios = ((3, 2), (16, 9), (4, 3))
1617 aspect_ratio = testGen.rng.choice(common_aspect_ratios)
1618 invert = testGen.rng.choice((False, True))
1619 letterbox = testGen.rng.choice((False, True))
1620
1621 scale_y_n = aspect_ratio[0] if invert else aspect_ratio[1]
1622 scale_x_n = aspect_ratio[1] if invert else aspect_ratio[0]
1623 scale_y_d = scale_x_d = 1
1624 offset_x = offset_y = 0
1625
1626 if letterbox:
1627 max_border = scale_y_n
1628 border_y = testGen.randInt(low=0, high=max_border)
1629 border_x = 0
1630 else:
1631 # Pillarboxing
1632 border_y = 0
1633 max_border = scale_x_n
1634 border_x = testGen.randInt(low=0, high=max_border)
1635
1636 scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
1637 offset = (offset_y, offset_x)
1638 border = (border_y, border_x)
1639
1640 return scale, offset, border
1641
1642 def get_upscale_downscale_params():
1643 valid_params = False
1644 while not valid_params:
1645 upscale = testGen.rng.choice((False, True))
1646
1647 # True if sampling begins from (0,0). Otherwise (-0.5,-0.5)
1648 origin_sampling = testGen.rng.choice((False, True))
1649
1650 if upscale:
1651 shift = testGen.randInt(low=1, high=4)
1652 scale_x_d = scale_y_d = 1
1653 scale_x_n = scale_y_n = (
1654 1 << shift if origin_sampling else 2 << shift
1655 )
1656 border_x = border_y = 0 if origin_sampling else (1 << shift) - 1
1657 offset_x = offset_y = 0 if origin_sampling else -(1 << shift) + 1
1658 else:
1659 scale_x_n = 1
1660 scale_y_n = 1
1661
1662 # Return list of valid scale_*_d values (max value 4) given input dim shape
1663 def get_valid_denom(ifm_dim):
1664 return [x for x in range(1, 5) if ifm_dim % x == 1]
1665
1666 # Generate list of valid downscale values and choose one randomly
1667 valid_scale_y_ds = get_valid_denom(ifm_shape[1])
1668 valid_scale_x_ds = get_valid_denom(ifm_shape[2])
1669
1670 if not valid_scale_y_ds and not valid_scale_x_ds:
1671 # Bad parameters, skip
1672 continue
1673
1674 if not valid_scale_y_ds:
1675 scale_y_d = 1
1676 else:
1677 scale_y_d = testGen.rng.choice(valid_scale_y_ds)
1678
1679 if not valid_scale_x_ds:
1680 scale_x_d = 1
1681 else:
1682 scale_x_d = testGen.rng.choice(valid_scale_x_ds)
1683
1684 border_x = border_y = 0
1685 offset_y = testGen.randInt(0, 16 * scale_y_n)
1686 offset_x = testGen.randInt(0, 16 * scale_x_n)
1687 valid_params = True
1688
1689 scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
1690 offset = (offset_y, offset_x)
1691 border = (border_y, border_x)
1692 return scale, offset, border
1693
1694 def get_rand_params():
1695 # Scale
1696 scale_y_n = testGen.randInt(low=1, high=(1 << 11))
1697 scale_x_n = testGen.randInt(low=1, high=(1 << 11))
1698
1699 scale_y_d = testGen.randInt(low=1, high=(16 * scale_y_n))
1700 scale_x_d = testGen.randInt(low=1, high=(16 * scale_x_n))
1701
1702 # Offsets and border within the scale
1703 offset_y = testGen.randInt(low=-scale_y_n, high=(16 * scale_y_n))
1704 offset_x = testGen.randInt(low=-scale_x_n, high=(16 * scale_x_n))
1705 border_y = testGen.randInt(low=(-16 * scale_y_n), high=scale_y_n)
1706 border_x = testGen.randInt(low=(-16 * scale_x_n), high=scale_x_n)
1707
1708 scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
1709 offset = (offset_y, offset_x)
1710 border = (border_y, border_x)
1711 return scale, offset, border
1712
1713 for mode in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001714 # Exclude illegal {mode, type} configurations. Pick legal output types
1715 if mode == ResizeMode.NEAREST and dtype == DType.INT8:
1716 outputDTypeList = [DType.INT8]
1717 elif mode == ResizeMode.NEAREST and dtype == DType.INT16:
1718 outputDTypeList = [DType.INT16]
1719 elif mode == ResizeMode.BILINEAR and dtype == DType.INT8:
1720 outputDTypeList = [DType.INT32]
1721 elif mode == ResizeMode.BILINEAR and dtype == DType.INT16:
1722 outputDTypeList = [DType.INT48]
1723 elif dtype == DType.FLOAT:
1724 outputDTypeList = [DType.FLOAT]
1725 elif error_name == ErrorIf.WrongInputType:
1726 # If an incorrect input type is used then we set a 'correct'
1727 # output type to avoid other errors
1728 outputDTypeList = [DType.INT8, DType.INT16, DType.INT32]
1729 else:
1730 continue
1731
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001732 arg_str = "mode{}_out{}_sc{}x{}x{}x{}_off{}x{}_bor{}x{}"
1733
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001734 for outputDType in outputDTypeList:
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001735 perm = 0
1736 while perm < testGen.args.num_rand_permutations:
1737 # Random choice of type of params we are testing
1738 _rnd_param_fn = testGen.rng.choice(
1739 (
1740 get_rand_params,
1741 get_upscale_downscale_params,
1742 get_aspect_ratio_resize_params,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001743 )
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001744 )
1745 scale, offset, border = _rnd_param_fn()
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001746
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001747 # Expand params for bounds-checking
1748 (scale_y_n, scale_y_d, scale_x_n, scale_x_d) = scale
1749 (offset_y, offset_x) = offset
1750 (border_y, border_x) = border
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001751
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001752 # Make sure output dimensions OH and OW are integers
1753 partial_output_y = (
1754 (ifm_shape[1] - 1) * scale_y_n - offset_y + border_y
1755 )
1756 partial_output_x = (
1757 (ifm_shape[2] - 1) * scale_x_n - offset_x + border_x
1758 )
1759 if error_name == ErrorIf.ResizeOutputShapeNonInteger:
1760 if (
1761 partial_output_y % scale_y_d == 0
1762 and partial_output_x % scale_x_d == 0
1763 ):
1764 # Skip this test as it doesn't produce NonInteger output
1765 perm += 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001766 continue
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001767 else:
1768 while partial_output_y % scale_y_d != 0:
1769 scale_y_d -= 1
1770 while partial_output_x % scale_x_d != 0:
1771 scale_x_d -= 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001772
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001773 output_y = partial_output_y // scale_y_d + 1
1774 output_x = partial_output_x // scale_x_d + 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001775
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001776 if (
1777 output_y >= testGen.args.max_resize_output_dim
1778 or output_x >= testGen.args.max_resize_output_dim
1779 ) and error_name is None:
1780 # Skip positive test if output dim will be too high
1781 # Avoid high test latency and OOM issues
1782 perm += 1
1783 continue
1784
1785 if (
1786 output_y <= 0
1787 or output_y >= MAX_RESIZE_DIMENSION
1788 or output_x <= 0
1789 or output_x >= MAX_RESIZE_DIMENSION
1790 ):
1791 # Output dimensions out of scope
1792 if error_name is not None and perm > 0:
1793 # As long as we have one ERROR_IF test, don't worry
1794 # about creating all the other permutations
1795 perm += 1
1796 continue
1797
1798 if error_name == ErrorIf.ResizeOutputShapeMismatch and (
1799 (
1800 output_y + scale_y_d >= MAX_RESIZE_DIMENSION
1801 and output_y - scale_y_d < 1
1802 )
1803 or (
1804 output_x + scale_x_d >= MAX_RESIZE_DIMENSION
1805 and output_x - scale_x_d < 1
1806 )
1807 ):
1808 # Can't create a negative test with these params as it
1809 # will create invalid output size
1810 if perm > 0:
1811 perm += 1
1812 continue
1813
1814 scale = [scale_y_n, scale_y_d, scale_x_n, scale_x_d]
1815 offset = [offset_y, offset_x]
1816 border = [border_y, border_x]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001817
1818 # Common for all data types
1819 if error_name is not None:
1820 (
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001821 scale,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001822 offset,
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001823 border,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001824 outputDTypeNew,
1825 ) = TosaErrorIfArgGen.eiResizeErrorIf(
1826 testGen,
1827 error_name,
1828 mode,
1829 dtype,
1830 shapeList,
1831 outputDType,
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001832 scale,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001833 offset,
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001834 border,
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001835 )
1836 else:
1837 outputDTypeNew = outputDType
1838
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001839 arg_to_append = (
1840 arg_str.format(
1841 "N" if mode == ResizeMode.NEAREST else "B",
1842 testGen.typeStr(outputDTypeNew),
1843 scale[0],
1844 scale[1],
1845 scale[2],
1846 scale[3],
1847 offset[0],
1848 offset[1],
1849 border[0],
1850 border[1],
1851 ),
1852 [
1853 mode,
1854 scale,
1855 offset,
1856 border,
1857 dtype,
1858 outputDTypeNew,
1859 ],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001860 )
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001861 if arg_to_append in arg_list:
1862 # Skip already generated test params
1863 continue
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001864
Jeremy Johnsona0e03f32022-06-13 17:48:09 +01001865 # Valid permutation
1866 perm += 1
1867 arg_list.append(arg_to_append)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001868 return arg_list
1869
1870 @staticmethod
1871 def agTable(testGen, opName, shapeList, dtype, error_name=None):
1872 arg_list = []
1873
1874 if dtype == DType.INT8:
1875 table = np.int32(
1876 testGen.rng.integers(low=-128, high=128, size=[256])
1877 ).tolist()
1878 else: # INT16
1879 table = np.int32(
1880 testGen.rng.integers(low=-32768, high=32768, size=[513])
1881 ).tolist()
1882
1883 arg_list.append(
1884 (
1885 "",
1886 [table],
1887 )
1888 )
1889 return arg_list
1890
1891 def agCondIf(testGen, opName, shapeList, dtype, error_name=None):
1892 # CondIf generates the condition values here.
1893 # Convert to tensors in the build function, along with the
1894 # then and else blocks
1895 arg_list = []
1896
1897 for c in [False, True]:
1898 arg_list.append(("cond{}".format(int(c)), [c]))
1899
1900 return arg_list
1901
1902 def agWhileLoop(testGen, opName, shapeList, dtype, error_name=None):
1903 # While loop: 0 iterations, 1, more than 1
1904 arg_list = []
1905
1906 for iter in [0, 1, 4]:
1907 arg_list.append(("iter{}".format(iter), [iter]))
1908
1909 return arg_list