blob: e3492cd831ef1be084ec26fa1b2ada4ddc1aff03 [file] [log] [blame]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001# Copyright (c) 2021-2022, ARM Limited.
2# SPDX-License-Identifier: Apache-2.0
3import itertools
4import math
5
6import numpy as np
7import serializer.tosa_serializer as ts
8from generator.tosa_error_if import ErrorIf
9from generator.tosa_error_if import TosaErrorIfArgGen
10from serializer.tosa_serializer import DTypeNames
11from tosa.DType import DType
12from tosa.Op import Op
13from tosa.ResizeMode import ResizeMode
14
15# DTypeNames, DType, Op and ResizeMode are convenience variables to the
16# flatc-generated types that should be enums, but aren't
17
18
19class TosaQuantGen:
20 """QuantizedInfo random generator helper functions.
21
22 Specify with 'qgen': in the operator defintion.
23 """
24
25 def __init__(self):
26 pass
27
28 @staticmethod
29 def getQinfo(testGen, dtype, error_name=None):
30
31 if dtype == DType.INT8:
32 return testGen.randInt(-128, 128)
33 elif dtype == DType.UINT8:
34 return testGen.randInt(0, 256)
35 elif error_name in [
36 ErrorIf.InputZeroPointNotZero,
37 ErrorIf.WeightZeroPointNotZero,
38 ErrorIf.OutputZeroPointNotZero,
39 ]:
40 zero_point = testGen.randInt(-128, 128)
41 if zero_point == 0:
42 zero_point = 1
43 return zero_point
44 return 0
45
46 @staticmethod
47 def qgUnary(testGen, op, dtype, error_name=None):
48 qinfo = ts.TosaSerializerQuantInfo()
49 if error_name == ErrorIf.InputZeroPointNotZero:
50 qinfo.UnaryQuantInfo(
51 TosaQuantGen.getQinfo(testGen, dtype, error_name),
52 TosaQuantGen.getQinfo(testGen, dtype),
53 )
54 elif error_name == ErrorIf.OutputZeroPointNotZero:
55 qinfo.UnaryQuantInfo(
56 TosaQuantGen.getQinfo(testGen, dtype),
57 TosaQuantGen.getQinfo(testGen, dtype, error_name),
58 )
59 else:
60 qinfo.UnaryQuantInfo(
61 TosaQuantGen.getQinfo(testGen, dtype),
62 TosaQuantGen.getQinfo(testGen, dtype),
63 )
64 return qinfo
65
66 @staticmethod
67 def qgConv(testGen, op, dtype_or_dtypeList, error_name=None):
68 qinfo = ts.TosaSerializerQuantInfo()
69 if isinstance(dtype_or_dtypeList, list):
70 # a list of [input, weights, accumulator] dtypes
71 dtypeList = dtype_or_dtypeList
72 else:
73 # an int, [input, weights, accumulator] dtypes are the same
74 dtypeList = [dtype_or_dtypeList] * 3
75
76 if error_name == ErrorIf.InputZeroPointNotZero:
77 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0], error_name)
78 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
79 elif error_name == ErrorIf.WeightZeroPointNotZero:
80 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
81 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1], error_name)
82 else:
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85
86 qinfo.ConvQuantInfo(input_zp, weights_zp)
87 return qinfo
88
89 @staticmethod
90 def qgMatmul(testGen, op, dtype, error_name=None):
91 qinfo = ts.TosaSerializerQuantInfo()
92 if error_name == ErrorIf.InputZeroPointNotZero:
93 qinfo.MatMulQuantInfo(
94 TosaQuantGen.getQinfo(testGen, dtype, error_name),
95 TosaQuantGen.getQinfo(testGen, dtype, error_name),
96 )
97 else:
98 qinfo.MatMulQuantInfo(
99 TosaQuantGen.getQinfo(testGen, dtype),
100 TosaQuantGen.getQinfo(testGen, dtype),
101 )
102 return qinfo
103
104 @staticmethod
105 def qgPad(testGen, op, dtype, error_name=None):
106 qinfo = ts.TosaSerializerQuantInfo()
107 if error_name == ErrorIf.InputZeroPointNotZero:
108 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype, error_name))
109 else:
110 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
111 return qinfo
112
113 @staticmethod
114 def computeMultiplierAndShift(scaleFp, scale32):
115 # Derived from computeMultiplierAndShiftTosaScale32
116 # Provide a floating-point scaling factor and the scale32 parameter
117 # to compute the multiplier and shift
118
119 if scale32:
120 scaleBits = 31
121 else:
122 scaleBits = 15
123
124 m, shift = math.frexp(scaleFp)
125
126 if scaleFp < 0.0:
127 m = -m
128
129 multiplier = round(m * (1 << scaleBits))
130 assert multiplier <= (1 << scaleBits)
131
132 if multiplier == (1 << scaleBits):
133 multiplier = multiplier // 2
134 shift = shift + 1
135
136 shift = (-shift) + scaleBits
137 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(
138 # scaleFp, scaleBits, m, multiplier, shift))
139
140 # Adjust multiplier such that shift is in allowed value range.
141 if shift == 0:
142 multiplier = multiplier // 4
143 shift = shift + 2
144 elif shift == 1:
145 multiplier = multiplier // 2
146 shift = shift + 1
147 elif shift == 63:
148 multiplier = multiplier * 2
149 shift = shift - 1
150
151 assert multiplier <= (1 << scaleBits)
152 assert shift >= 2 and shift <= 62
153
154 return multiplier, shift
155
156
157class TosaTensorGen:
158 """Tensor generators create a shape list for the placeholder and const tensor
159 data operands for the operator.
160
161 The actual random data is generated separately for each test.
162 """
163
164 def __init__(self):
165 pass
166
167 @staticmethod
168 def tgBasic(testGen, opName, rank, error_name=None):
169 pl, const = opName["operands"]
170 shape = testGen.makeShape(rank)
171
172 # Constrict the overall size of the shape when creating ERROR_IF tests
173 if error_name:
174 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
175
176 shape_list = []
177 for i in range(pl + const):
178 shape_list.append(shape.copy())
179
180 if error_name == ErrorIf.RankMismatch:
181 if rank == 1 and i != 1:
182 shape = testGen.makeShape(rank + testGen.rng.choice([1, 2, 3]))
183 elif i != 1:
184 shape = testGen.makeShape(rank + testGen.rng.choice([-1, 1]))
185
186 return shape_list
187
188 @staticmethod
189 def tgNHWC(testGen, opName, rank, error_name=None):
190 pl, const = opName["operands"]
191
192 if error_name != ErrorIf.WrongRank:
193 assert rank == 4
194
195 shape = testGen.makeShape(rank)
196
197 # Constrict the batch size?
198 if testGen.args.max_batch_size:
199 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
200
201 # Constrict the overall size of the shape when creating ERROR_IF tests
202 if error_name and error_name != ErrorIf.MaxDimExceeded:
203 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
204
205 shape_list = []
206 for i in range(pl + const):
207 shape_list.append(shape.copy())
208
209 return shape_list
210
211 @staticmethod
212 def tgScatter(testGen, opName, rank, error_name=None):
213 pl, const = opName["operands"]
214
215 assert pl == 2
216 assert const == 0
217 if error_name != ErrorIf.WrongRank:
218 assert rank == 3
219
220 values_in_shape = testGen.makeShape(rank)
221
222 # ignore max batch size if target shape is set
223 if testGen.args.max_batch_size and not testGen.args.target_shapes:
224 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
225
226 W = testGen.randInt(
227 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
228 )
229 # Constrict W if one dimension is too large to keep tensor size reasonable
230 if max(values_in_shape) > 5000:
231 W = testGen.randInt(0, 16)
232
233 input_shape = [values_in_shape[0], W, values_in_shape[2]]
234
235 shape_list = []
236 shape_list.append(values_in_shape.copy())
237 shape_list.append(input_shape.copy())
238
239 return shape_list
240
241 @staticmethod
242 def tgBroadcastFuzz(testGen, op, rank, error_name=None):
243 shape = testGen.makeShape(rank)
244
245 pl, const = op["operands"]
246
247 shape_list = []
248
249 # Choose one of the inputs to broadcast
250 # Note: Simplifies OutputShaper code if we don't change first shape for errors
251 bcast_idx = testGen.randInt(0 if error_name is None else 1, pl + const)
252 for i in range(pl + const):
253 shape_bcast = shape.copy()
254
255 # If the chosen input, pick a random index to broadcast
256 if i == bcast_idx:
257 fuzz_idx = testGen.randInt(0, rank)
258 if error_name == ErrorIf.DimensionMismatch:
259 shape_bcast[fuzz_idx] += 1
260 elif error_name == ErrorIf.RankMismatch:
261 # Add one rank to the shape (or more for rank of 1)
262 extra_ranks = testGen.rng.choice([1, 2, 3]) if rank == 1 else 1
263 shape_bcast = np.concatenate(
264 (shape_bcast, testGen.makeShape(extra_ranks))
265 )
266 if rank != 1:
267 # Either keep the extra rank, or remove it
268 new_len = testGen.rng.choice([-2, len(shape_bcast)])
269 shape_bcast = shape_bcast[:new_len]
270 else:
271 shape_bcast[fuzz_idx] = 1
272
273 shape_list.append(shape_bcast)
274
275 return shape_list
276
277 @staticmethod
278 def tgConv2D(testGen, op, rank, error_name=None):
279 pl, const = op["operands"]
280
281 if error_name != ErrorIf.WrongRank:
282 assert rank == 4
283
284 # IFM dimensions are NHWC
285 ifm_shape = testGen.makeShape(rank)
286
287 # Constrict the batch size?
288 if testGen.args.max_batch_size:
289 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
290
291 # Constrict the overall size of the shape when creating ERROR_IF tests
292 if error_name:
293 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
294 ifm_shape, max_dim=24, max_items=10000
295 )
296
297 # Get the filter height/width from the operator parameters
298 filter_hw = op["filter"]
299
300 # Generate a random OFM depth
301 ofm_depth = testGen.makeShape(1)[0]
302
303 # The filter dimensions are OHWI
304 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
305
306 # The bias is OC
307 bias_shape = np.asarray([ofm_depth])
308
309 return [ifm_shape, filter_shape, bias_shape]
310
311 @staticmethod
312 def tgConv3D(testGen, op, rank, error_name=None):
313 pl, const = op["operands"]
314
315 if error_name != ErrorIf.WrongRank:
316 assert rank == 5
317
318 # IFM dimensions are NDHWC
319 ifm_shape = testGen.makeShape(rank)
320
321 # Constrict the batch size?
322 if testGen.args.max_batch_size:
323 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
324
325 # Constrict the overall size of the shape when creating ERROR_IF tests
326 if error_name:
327 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
328 ifm_shape, max_dim=24, max_items=10000
329 )
330
331 # Get the filter depth/height/width from the operator parameters
332 filter_dhw = op["filter"]
333
334 # Generate a random OFM channel
335 ofm_channel = testGen.makeShape(1)[0]
336
337 # The filter dimensions are ODHWI
338 filter_shape = np.asarray(
339 [ofm_channel, filter_dhw[0], filter_dhw[1], filter_dhw[2], ifm_shape[4]]
340 )
341
342 # The bias is OC
343 bias_shape = np.asarray([ofm_channel])
344
345 return [ifm_shape, filter_shape, bias_shape]
346
347 @staticmethod
348 def tgTransposeConv2D(testGen, op, rank, error_name=None):
349 pl, const = op["operands"]
350
351 if error_name != ErrorIf.WrongRank:
352 assert rank == 4
353
354 # IFM dimensions are NHWC
355 ifm_shape = testGen.makeShape(rank)
356
357 # Constrict the batch size?
358 if testGen.args.max_batch_size:
359 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
360
361 # Constrict the overall size of the shape when creating ERROR_IF tests
362 if error_name:
363 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
364 ifm_shape, max_dim=24, max_items=10000
365 )
366
367 # Get the filter height/width from the operator parameters
368 filter_hw = op["filter"]
369
370 # Generate a random OFM depth
371 ofm_depth = testGen.makeShape(1)[0]
372
373 # The filter dimensions are OHWI
374 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
375
376 # The bias is OC
377 bias_shape = np.asarray([ofm_depth])
378
379 return [ifm_shape, filter_shape, bias_shape]
380
381 @staticmethod
382 def tgDepthwiseConv2D(testGen, op, rank, error_name=None):
383 pl, const = op["operands"]
384
385 if error_name != ErrorIf.WrongRank:
386 assert rank == 4
387 assert pl == 1 and const == 2
388
389 # IFM dimensions are NHWC
390 ifm_shape = testGen.makeShape(rank)
391
392 # Constrict the batch size?
393 if testGen.args.max_batch_size:
394 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
395
396 # Constrict the overall size of the shape when creating ERROR_IF tests
397 if error_name:
398 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
399 ifm_shape, max_dim=24, max_items=10000
400 )
401
402 # Get the filter height/width from the operator parameters
403 # Filter is KH, HW, C, M
404 filter_hw = op["filter"]
405
406 # Generate a random OFM depth, but don't let it get too big because
407 # the output depth is M * C
408 filter_m = (
409 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
410 ) + 1
411
412 # The filter dimensions are HWCM
413 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
414
415 # The bias is M * C
416 bias_shape = np.asarray([ifm_shape[3] * filter_m])
417
418 return [ifm_shape, filter_shape, bias_shape]
419
420 @staticmethod
421 def tgFullyConnected(testGen, op, rank, error_name=None):
422 pl, const = op["operands"]
423
424 if error_name != ErrorIf.WrongRank:
425 assert rank == 2
426
427 input_shape = testGen.makeShape(rank)
428
429 # Constrict the overall size of the shape when creating ERROR_IF tests
430 if error_name:
431 input_shape = TosaErrorIfArgGen.eiRestrictDimensions(input_shape)
432
433 filter_oc = testGen.rng.integers(
434 low=testGen.args.tensor_shape_range[0],
435 high=testGen.args.tensor_shape_range[1],
436 size=1,
437 )[0]
438 filter_shape = np.asarray([filter_oc, input_shape[1]])
439
440 bias_shape = np.asarray([filter_oc])
441
442 return [input_shape, filter_shape, bias_shape]
443
444 @staticmethod
445 def tgMatmul(testGen, op, rank, error_name=None):
446 pl, const = op["operands"]
447
448 if error_name != ErrorIf.WrongRank:
449 assert rank == 3
450 assert pl == 2 and const == 0
451
452 a_shape = testGen.makeShape(rank)
453
454 # Constrict the overall size of the shape when creating ERROR_IF tests
455 if error_name:
456 a_shape = TosaErrorIfArgGen.eiRestrictDimensions(a_shape)
457
458 # Get a random number for b_oc even if target shape is defined
459 b_oc = np.int32(
460 testGen.rng.integers(
461 low=testGen.args.tensor_shape_range[0],
462 high=testGen.args.tensor_shape_range[1],
463 size=1,
464 )
465 )[0]
466 # If N or H is large let b_oc be 1 to reduce output tensor size
467 if max(a_shape) > 1000:
468 b_oc = 1
469
470 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
471 return [a_shape, b_shape]
472
473 @staticmethod
474 def tgConcat(testGen, opName, rank, error_name=None):
475 pl, const = opName["operands"]
476 shape = testGen.makeShape(rank)
477
478 # Create extra tensors to concat.
479 # Take into account value of pl when getting maximum number of concats
480 num_tensors = testGen.randInt(0, 4)
481 shape_list = []
482 for i in range(pl + const + num_tensors):
483 if error_name == ErrorIf.ConcatInputRankMismatch and i != 0:
484 remove = testGen.rng.choice([True, False])
485 wrongShape = shape.copy()
486
487 if remove and len(shape) > 1:
488 wrongShape = wrongShape[1:]
489 else:
490 wrongShape = list(wrongShape)
491 wrongShape.append(testGen.rng.integers(1, 10))
492
493 shape_list.append(wrongShape)
494 else:
495 shape_list.append(shape.copy())
496
497 return shape_list
498
499 @staticmethod
500 def tgConcatConstInput(testGen, shapeList, axis, error_name=None):
501 if error_name in [
502 ErrorIf.AxisSmallerZero,
503 ErrorIf.AxisLargerRank,
504 ErrorIf.ConcatInputRankMismatch,
505 ]:
506 return shapeList
507
508 # Split concat shape along axis to allow for multiple const inputs
509 # without making too many large tensors
510 if len(shapeList) == 2 or shapeList[0][axis] < len(shapeList):
511 # If axis can't be split we still need to invalidate other dimensions
512 if error_name == ErrorIf.ConcatInputDimMismatch:
513 for shape in shapeList[1:]:
514 # Negative test shapeLists are created individually for each test,
515 # so no need to copy the shape before altering it.
516 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
517 return shapeList
518
519 # Create copy of shape we are going to split (so we don't alter shapeList)
520 shape = shapeList[0].copy()
521 # Add original shape as first input
522 new_shapeList = [shape.copy()]
523 length_on_axis = shape[axis]
524 remaining_length = length_on_axis
525 for i in range(len(shapeList) - 2):
526 # Calculate split on axis and remaining value
527 split_shape_val = int(shape[axis] / 2)
528 remaining_length = remaining_length - split_shape_val
529
530 # Append new shape, and set remaining shape
531 shape[axis] = split_shape_val
532 new_shapeList.append(shape.copy())
533
534 # invalidate dimensions
535 if error_name == ErrorIf.ConcatInputDimMismatch:
536 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
537 else:
538 shape[axis] = remaining_length
539
540 if i == len(shapeList) - 3:
541 new_shapeList.append(shape.copy())
542
543 return new_shapeList
544
545
546class TosaTensorValuesGen:
547 """Tensor Value generators create the random data for each test."""
548
549 def __init__(self):
550 pass
551
552 @staticmethod
553 def tvgDefault(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
554 pCount, cCount = op["operands"]
555
556 tens = []
557 tens.extend(
558 testGen.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
559 )
560 tens.extend(testGen.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
561
562 return tens
563
564 @staticmethod
565 def tvgNegate(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
566 if dtypeList[0] != DType.FLOAT and error_name is None:
567 pCount, cCount = op["operands"]
568 assert (
569 pCount == 1 and cCount == 0
570 ), "Op.NEGATE must have 1 placeholders, 0 consts"
571 # Must create tensors with values within negatable ranges
572 if dtypeList[0] == DType.INT8:
573 # Must be within int8, adjustable by input_zp and then negatable
574 # and be within int8
575 # For use: qinfo.ints[0][1] = input_zp, qinfo.ints[1][1] = output_zp
576 max_val = min(127, 127 + qinfo.ints[0][1])
577 min_val = max(-127, -127 + qinfo.ints[0][1])
578 elif dtypeList[0] == DType.INT16:
579 max_val = 32767
580 min_val = -max_val
581 else:
582 assert (
583 dtypeList[0] == DType.INT32
584 ), "Op.NEGATE found with unsupported input type"
585 max_val = (1 << 31) - 1
586 min_val = -max_val
587 arr = np.int32(
588 testGen.rng.integers(low=min_val, high=(max_val + 1), size=shapeList[0])
589 )
590 placeholders = []
591 placeholders.append(
592 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], arr)
593 )
594 return placeholders
595 else:
596 return TosaTensorValuesGen.tvgDefault(
597 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
598 )
599
600 @staticmethod
601 def tvgAddSub(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
602 if dtypeList[0] == DType.INT32 and error_name is None:
603 # Make sure the operation does not cause value saturation - where
604 # the number wraps due to limited number of bits to store the answer
605 pCount, cCount = op["operands"]
606 assert (
607 pCount == 2 and cCount == 0
608 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts"
609 placeholders = []
610 add = op["op"] == Op.ADD
611 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
612 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
613 if add:
614 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
615 else:
616 res_arr = np.subtract(a_arr, b_arr, dtype=np.int64)
617
618 # Work out the saturation limits
619 max_i32 = (1 << 31) - 1
620 min_i32 = -(1 << 31)
621 max_arr = np.full(shapeList[1], max_i32)
622 min_arr = np.full(shapeList[1], min_i32)
623
624 # Find how much values exceed the maximum/minimums
625 sat_max_arr = np.maximum(res_arr - max_arr, 0)
626 sat_min_arr = np.minimum(res_arr - min_arr, 0)
627
628 if not add:
629 # Swap saturation values and negate values as we need to perform opposite operations
630 sat_max_arr, sat_min_arr = -sat_min_arr, -sat_max_arr
631
632 # Create new array of unsaturated values by clipping values as needed
633 b_unsat_arr = b_arr
634 if (sat_max_arr != 0).any():
635 # Clip values that cause saturation
636 b_unsat_arr = np.subtract(b_unsat_arr, sat_max_arr, dtype=np.int32)
637 # Reduce axes in unsaturated tensor to match original tensor
638 for axis, dim in enumerate(b_arr.shape):
639 if dim != b_unsat_arr.shape[axis]:
640 assert (
641 dim == 1
642 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
643 b_unsat_arr = np.amin(b_unsat_arr, axis=axis, keepdims=True)
644
645 if (sat_min_arr != 0).any():
646 # Clip values that cause saturation
647 b_unsat_arr = np.subtract(b_unsat_arr, sat_min_arr, dtype=np.int32)
648 # Reduce axes in unsaturated tensor to match original tensor
649 for axis, dim in enumerate(b_arr.shape):
650 if dim != b_unsat_arr.shape[axis]:
651 assert (
652 dim == 1
653 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
654 b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True)
655
656 placeholders.append(
657 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
658 )
659 placeholders.append(
660 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr)
661 )
662
663 return placeholders
664 else:
665 return TosaTensorValuesGen.tvgDefault(
666 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
667 )
668
669 @staticmethod
670 def tvgCondIfWhileLoop(
671 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
672 ):
673 if dtypeList[0] in (
674 DType.INT32,
675 DType.INT16,
676 DType.INT8,
677 ):
678 # Limit input tensors with cond_if_binary or while_loop to stop
679 # saturation of add/sub ops with int32 and keep all logical shift
680 # values between 0 to 31 for int16 or int8
681 pCount, cCount = op["operands"]
682 pRemain = pCount
683 placeholders = []
684 for idx, shape in enumerate(shapeList[:]):
685 if dtypeList[0] == DType.INT32:
686 arr = testGen.getRandTensor(shapeList[idx], DType.INT16)
687 else:
688 arr = np.int32(
689 testGen.rng.integers(low=0, high=32, size=shapeList[idx])
690 )
691 if pRemain > 0:
692 placeholders.append(
693 testGen.ser.addPlaceholder(shape, dtypeList[idx], arr)
694 )
695 pRemain -= 1
696 else:
697 placeholders.append(
698 testGen.ser.addConst(shape, dtypeList[idx], arr)
699 )
700
701 return placeholders
702 else:
703 return TosaTensorValuesGen.tvgDefault(
704 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
705 )
706
707 @staticmethod
708 def tvgArithmeticRightShift(
709 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
710 ):
711 pCount, cCount = op["operands"]
712 # Force value of operand[1] to be within [0, num_bits]
713 assert (
714 pCount == 2 and cCount == 0
715 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
716
717 placeholders = []
718 for idx, shape in enumerate(shapeList[:]):
719 if idx == 1:
720 if dtypeList[idx] == DType.INT8:
721 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
722 elif dtypeList[idx] == DType.INT16:
723 arr = np.int32(testGen.rng.integers(low=0, high=16, size=shape))
724 elif dtypeList[idx] == DType.INT32:
725 arr = np.int32(testGen.rng.integers(low=0, high=32, size=shape))
726 elif error_name == ErrorIf.WrongInputType:
727 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
728 else:
729 raise Exception("OpArithmeticRightShift: invalid input dtype")
730 else:
731 arr = testGen.getRandTensor(shape, dtypeList[idx])
732 placeholders.append(testGen.ser.addPlaceholder(shape, dtypeList[idx], arr))
733
734 return placeholders
735
736 @staticmethod
737 def tvgSelect(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
738 # Set datatype of condition tensor to boolean
739 dtypeList[0] = DType.BOOL
740
741 return TosaTensorValuesGen.tvgDefault(
742 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
743 )
744
745 @staticmethod
746 def tvgIntDiv(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
747 if error_name is None:
748 pCount, cCount = op["operands"]
749 assert (
750 pCount == 2 and cCount == 0
751 ), "Op.INTDIV must have 2 placeholders, 0 consts"
752
753 placeholders = []
754
755 # Two invalid cases for Op.INTDIV:
756 # 1. divisor == 0
757 # 2. dividend == -(1<<31) and divisor == -1
758 while True:
759 dividend_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
760 divisor_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
761
762 if (divisor_arr == 0).any():
763 continue
764
765 if (dividend_arr == -(2**31)).any() and (divisor_arr == -1).any():
766 continue
767
768 break
769
770 placeholders.append(
771 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
772 )
773 placeholders.append(
774 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
775 )
776
777 return placeholders
778 else:
779 return TosaTensorValuesGen.tvgDefault(
780 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
781 )
782
783 @staticmethod
784 def tvgMul(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
785 if error_name is None:
786 pCount, cCount = op["operands"]
787 assert (
788 pCount == 2 and cCount == 0
789 ), "Op.MUL must have 2 placeholders, 0 consts"
790
791 tens = []
792 if dtypeList[0] == DType.FLOAT:
793 tens.extend(testGen.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
794 else:
795 placeholders = []
796
797 # Make sure multiply result in int32 range
798 shift = testArgs[0]
799 if dtypeList[0] == DType.INT8:
800 num_bits = 8
801 elif dtypeList[0] == DType.INT16:
802 num_bits = 16
803 elif dtypeList[0] == DType.INT32:
804 num_bits = 32
805 elif error_name == ErrorIf.WrongInputType:
806 num_bits = 8
807 else:
808 raise Exception("OpMul: invalid input dtype")
809
810 for idx, shape in enumerate(shapeList[:]):
811 low = -(2 ** (num_bits - 1))
812 high = (2 ** (num_bits - 1)) - 1
813
814 a_arr = np.int32(
815 testGen.rng.integers(low=low, high=high, size=shapeList[0])
816 )
817 b_arr = np.int32(
818 testGen.rng.integers(low=low, high=high, size=shapeList[1])
819 )
820
821 i = 0
822 while True:
823
824 a_arr_64 = a_arr.astype(np.int64)
825 b_arr_64 = b_arr.astype(np.int64)
826
827 if shift > 0:
828 rounding = 1 << (shift - 1)
829 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
830 else:
831 result_arr = a_arr_64 * b_arr_64
832
833 if (result_arr > -(2**31)).all() and (
834 result_arr <= ((2**31) - 1)
835 ).all():
836 break
837
838 i = i + 1
839 a_arr = a_arr // 2
840 b_arr = b_arr // 2
841
842 placeholders.append(
843 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
844 )
845 placeholders.append(
846 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
847 )
848
849 tens.extend(placeholders)
850
851 return tens
852 else:
853 return TosaTensorValuesGen.tvgDefault(
854 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
855 )
856
857 @staticmethod
858 def tvgConcat(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
859 count = len(shapeList) - testGen.args.num_const_inputs_concat
860 if count < 1:
861 count = 1
862 if testGen.args.num_const_inputs_concat == 0:
863 count = len(shapeList)
864
865 # Ensure axis is an int
866 testArgs[0] = int(testArgs[0])
867
868 shapeList = TosaTensorGen.tgConcatConstInput(
869 testGen, shapeList, testArgs[0], error_name
870 )
871
872 tens = []
873 tens.extend(
874 testGen.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
875 )
876 tens.extend(testGen.buildConstTensors(shapeList[count:], dtypeList[count:]))
877
878 return tens
879
880 @staticmethod
881 def tvgLogicalShift(
882 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
883 ):
884 pCount, cCount = op["operands"]
885 assert (
886 pCount == 2 and cCount == 0
887 ), "Op.LOGICAL_LEFT_SHIFT or Op.LOGICAL_RIGHT_SHIFT must have 2 placeholders, 0 consts"
888 values_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
889 shift_arr = np.int32(testGen.rng.integers(low=0, high=32, size=shapeList[1]))
890 placeholders = []
891 placeholders.append(
892 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
893 )
894 placeholders.append(
895 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], shift_arr)
896 )
897
898 return placeholders
899
900 @staticmethod
901 def tvgEqual(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
902 if error_name is None:
903 pCount, cCount = op["operands"]
904 assert (
905 pCount == 2 and cCount == 0
906 ), "Op.EQUAL must have 2 placeholders, 0 consts"
907 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
908 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
909 # Using random numbers means that it will be very unlikely that
910 # there are any matching (equal) values, therefore force that
911 # there are twice the number of matching values as the tensor rank
912 for num in range(0, len(shapeList[0]) * 2):
913 a_index = []
914 b_index = []
915 # Choose an index in each axis for the whole shape
916 for axis in range(0, len(shapeList[0])):
917 # Index can be up to the largest dimension in both shapes
918 index = np.int32(
919 testGen.rng.integers(
920 0, max(shapeList[0][axis], shapeList[1][axis])
921 )
922 )
923 # Reduce the index down to a shape's dim for broadcasting
924 a_index.append(min(shapeList[0][axis] - 1, index))
925 b_index.append(min(shapeList[1][axis] - 1, index))
926
927 a_arr[tuple(a_index)] = b_arr[tuple(b_index)]
928
929 placeholders = []
930 placeholders.append(
931 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
932 )
933 placeholders.append(
934 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
935 )
936 return placeholders
937 else:
938 return TosaTensorValuesGen.tvgDefault(
939 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
940 )
941
942 @staticmethod
943 def tvgReduceSum(
944 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
945 ):
946 if dtypeList[0] == DType.INT32:
947 pCount, cCount = op["operands"]
948 assert (
949 pCount == 1 and cCount == 0
950 ), "Op.REDUCE_SUM must have 1 placeholders, 0 consts"
951 # Limit values so that the sum cannot exceed the range of an int32 during
952 # summation of any axis
953 range_val = int((1 << 31) / max(shapeList[0]))
954 values_arr = np.int32(
955 testGen.rng.integers(low=-range_val, high=range_val, size=shapeList[0])
956 )
957 placeholders = []
958 placeholders.append(
959 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
960 )
961 return placeholders
962 else:
963 return TosaTensorValuesGen.tvgDefault(
964 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
965 )
966
967
968class TosaArgGen:
969 """Argument generators create exhaustive or random lists of attributes for
970 operators that take attributes or other parameters.
971
972 The return value is a list of (descriptive_name, [arglist]) tuples where
973 the descriptive_name is appended to the test name and the arglist is expanded
974 as arguments to the operator build function.
975 """
976
977 def __init__(self):
978 pass
979
980 @staticmethod
981 def agNone(testGen, opName, shapeList, dtype, error_name=None):
982 """A trivial argument generator for operators that don't take any
983 non-tensor arguments"""
984 return [("", [])]
985
986 @staticmethod
987 def agAxis(testGen, opName, shapeList, dtype, error_name=None):
988 """Build the axis argument for operators that take a single axis"""
989 axes = []
990 shape = shapeList[0]
991
992 if error_name == ErrorIf.AxisSmallerZero:
993 small_axis = testGen.rng.integers(-5, 0)
994 axes.append(("axis{}".format(small_axis), [small_axis]))
995 elif error_name == ErrorIf.AxisLargerRank:
996 large_axis = testGen.rng.integers(len(shape) + 1, len(shape) + 10)
997 axes.append(("axis{}".format(large_axis), [large_axis]))
998 else:
999 for a in range(0, len(shape)):
1000 axes.append(("axis{}".format(a), [a]))
1001
1002 return axes
1003
1004 @staticmethod
1005 def agConv(testGen, opName, shapeList, dtype, error_name=None):
1006 arg_list = []
1007
1008 ifm_shape = shapeList[0]
1009 filter_shape = shapeList[1]
1010 # determine the kernel shape from operator name (e.g. "conv2d_3x3" => [3,3])
1011 k = [int(x) for x in opName.split("_")[-1].split("x")]
1012
1013 # Check the rank
1014 rank = 5 if opName.startswith("conv3d") else 4
1015 if error_name != ErrorIf.WrongRank:
1016 assert len(ifm_shape) == rank
1017 assert len(filter_shape) == rank
1018
1019 # kernel rank omits batch and channels
1020 k_rank = rank - 2
1021 assert len(k) == k_rank
1022
1023 # Generate comprehensive argument lists
1024 # - except for named errors, which use specific invalid value(s)
1025 if error_name == ErrorIf.PadSmallerZero:
1026 p_vals = [testGen.rng.choice(range(-5, 0))]
1027 else:
1028 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
1029 paddings = {x for x in itertools.product(*([p_vals] * k_rank * 2))}
1030 if error_name == ErrorIf.StrideSmallerOne:
1031 # Can't use stride=0, as it is used to derive output shape, as a divisor
1032 s_vals = [testGen.rng.choice(range(-5, 0))]
1033 else:
1034 s_vals = [x for x in range(1, testGen.args.max_conv_stride + 1)]
1035 strides = {x for x in itertools.product(*([s_vals] * k_rank))}
1036 if error_name == ErrorIf.DilationSmallerOne:
1037 d_vals = [testGen.rng.choice(range(-5, 1))]
1038 else:
1039 d_vals = [x for x in range(1, testGen.args.max_conv_dilation + 1)]
1040 dilations = {x for x in itertools.product(*([d_vals] * k_rank))}
1041
1042 if not error_name and testGen.args.oversize:
1043 # add some oversize argument values
1044 if max(ifm_shape) < 64:
1045 bigPadding = 9
1046 paddings.update(
1047 {x for x in itertools.product(*([[0, bigPadding]] * (k_rank * 2)))}
1048 )
1049 bigStride = 8
1050 strides.update({x for x in itertools.product(*([[1, bigStride]] * k_rank))})
1051 bigDilation = 7
1052 dilations.update(
1053 {x for x in itertools.product(*([[1, bigDilation]] * k_rank))}
1054 )
1055
1056 # There are too many parameter combinations, so generate them sparsely,
1057 # very sparse for negative tests
1058 sparsity_factor = 2 if error_name else 100
1059 sparsity = len(paddings) * len(strides) * len(dilations) // sparsity_factor + 1
1060 # If there are only a small number of tests, just select them all
1061 if sparsity < 13:
1062 sparsity = 1
1063 # To get a variety of parameter combinations sparsity should not be a
1064 # multiple of 2, 3 or 5
1065 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1066 sparsity += 1
1067
1068 n = 0
1069 for s in sorted(list(strides)):
1070 for p in sorted(list(paddings)):
1071 for d in sorted(list(dilations)):
1072 if (
1073 n % sparsity == 0
1074 # padding must not exceed the kernel size ?
1075 # and p[0] < k[0] and p[1] < k[0]
1076 # and p[2] < k[1] and p[3] < k[1]
1077 # and (k_rank < 3 or (p[4] < k[2] and p[5] < k[2]))
1078 # the padded shape must exceed the kernel size
1079 and (ifm_shape[1] + p[0] + p[1]) > k[0]
1080 and (ifm_shape[2] + p[2] + p[3]) > k[1]
1081 and (k_rank < 3 or ((ifm_shape[3] + p[4] + p[5]) > k[2]))
1082 # the padded shape must exceed the dilation
1083 and (ifm_shape[1] + p[0] + p[1]) > d[0]
1084 and (ifm_shape[2] + p[2] + p[3]) > d[1]
1085 and (k_rank < 3 or ((ifm_shape[3] + p[4] + p[5]) > d[2]))
1086 ):
1087 arg_list.append(
1088 (
1089 "st{}_pad{}_dilat{}".format(
1090 "".join([str(x) for x in s]),
1091 "".join([str(x) for x in p]),
1092 "".join([str(x) for x in d]),
1093 ),
1094 [s, p, d],
1095 )
1096 )
1097 n += 1
1098
1099 return arg_list
1100
1101 @staticmethod
1102 def agTransposeConv2D(testGen, opName, shapeList, dtype, error_name=None):
1103 arg_list = []
1104
1105 ifm_shape = shapeList[0]
1106 filter_shape = shapeList[1]
1107
1108 # Must be rank 4
1109 if error_name != ErrorIf.WrongRank:
1110 assert len(ifm_shape) == 4
1111 assert len(filter_shape) == 4
1112
1113 # Generate comprehensive argument lists
1114 # - except for named errors, which use specific invalid value(s)
1115 if error_name == ErrorIf.PadSmallerZero:
1116 p_vals = [testGen.rng.choice(range(-5, 0))]
1117 else:
1118 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
1119 paddings = {x for x in itertools.product(*([p_vals] * 2))}
1120 if error_name == ErrorIf.StrideSmallerOne:
1121 # Can't use stride=0, as it is used to derive output shape, as a divisor
1122 s_vals = [testGen.rng.choice(range(-5, 0))]
1123 else:
1124 s_vals = [x for x in range(1, testGen.args.max_conv_stride + 1)]
1125 strides = {x for x in itertools.product(*([s_vals] * 2))}
1126 if error_name == ErrorIf.DilationSmallerOne:
1127 d_vals = [testGen.rng.choice(range(-5, 1))]
1128 else:
1129 d_vals = [x for x in range(1, testGen.args.max_conv_dilation + 1)]
1130 dilations = {x for x in itertools.product(*([d_vals] * 2))}
1131
1132 if not error_name:
1133 # add some oversize argument values
1134 if max(ifm_shape) < 64:
1135 bigPadding = 9
1136 paddings.update(
1137 {x for x in itertools.product(*([[0, bigPadding]] * 2))}
1138 )
1139 bigStride = 8
1140 strides.update({x for x in itertools.product(*([[1, bigStride]] * 2))})
1141 bigDilation = 7
1142 dilations.update({x for x in itertools.product(*([[1, bigDilation]] * 2))})
1143
1144 # There are too many parameter combinations, so generate them sparsely,
1145 # very sparse for negative tests
1146 sparsity_factor = 2 if error_name else 100
1147 sparsity = len(paddings) * len(strides) * len(dilations) // sparsity_factor + 1
1148 # If there are only a small number of tests, just select them all
1149 if sparsity < 13:
1150 sparsity = 1
1151 # To get a variety of parameter combinations sparsity should not be a
1152 # multiple of 2, 3 or 5
1153 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1154 sparsity += 1
1155
1156 n = 0
1157 for s in sorted(list(strides)):
1158 for p in sorted(list(paddings)):
1159 for d in sorted(list(dilations)):
1160 if n % sparsity == 0:
1161 # Determine the output shape
1162 oh = (
1163 ifm_shape[1]
1164 - filter_shape[1]
1165 - (filter_shape[1] - 1) * (d[0] - 1)
1166 + 2 * p[0]
1167 ) // s[0] + 1
1168 ow = (
1169 ifm_shape[2]
1170 - filter_shape[2]
1171 - (filter_shape[2] - 1) * (d[1] - 1)
1172 + 2 * p[1]
1173 ) // s[1] + 1
1174 os = [ifm_shape[0], oh, ow, filter_shape[0]]
1175 arg_list.append(
1176 (
1177 "st{}_pad{}_dilat{}_os{}".format(
1178 "".join([str(x) for x in s]),
1179 "".join([str(x) for x in p]),
1180 "".join([str(x) for x in d]),
1181 "x".join([str(x) for x in os]),
1182 ),
1183 [s, p, d, os],
1184 )
1185 )
1186 n += 1
1187
1188 return arg_list
1189
1190 @staticmethod
1191 def agPad(testGen, opName, shapeList, dtype, error_name=None):
1192 arg_list = []
1193 rank = len(shapeList[0])
1194
1195 # Exhaustively test combinations of padding on each side of each dimension
1196 # - the range of padding values is defined by pad_min and pad_max
1197 # - for padding >9, the name format needs to be more distinctive
1198 pad_min, pad_max = 0, 1
1199 pad_values = [x for x in range(pad_min, pad_max + 1)]
1200 if error_name == ErrorIf.PadSmallerZero:
1201 pad_values = [x for x in range(-2, 0)]
1202 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
1203 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
1204
1205 if dtype in [DType.BOOL, DType.INT8, DType.INT16, DType.INT32]:
1206 pad_const_int = testGen.getRandNumberDType(dtype)
1207 pad_const_fp = 0
1208 elif dtype == DType.FLOAT:
1209 pad_const_int = 0
1210 pad_const_fp = testGen.getRandNumberDType(dtype)
1211 else:
1212 return []
1213
1214 for paddings in shape_pad_values:
1215 name = "pad"
1216 for r in range(rank):
1217 before, after = paddings[r]
1218 name = f"{name}{before}{after}"
1219 arg_list.append((name, [np.array(paddings), pad_const_int, pad_const_fp]))
1220
1221 return arg_list
1222
1223 @staticmethod
1224 def agPooling(testGen, opName, shapeList, dtype, error_name=None):
1225 arg_list = []
1226
1227 shape = shapeList[0]
1228 if error_name != ErrorIf.WrongRank:
1229 assert len(shape) == 4
1230
1231 # Generate comprehensive argument lists
1232 p_vals = [x for x in range(0, testGen.args.max_pooling_padding + 1)]
1233 paddings = {x for x in itertools.product(*([p_vals] * 4))}
1234 s_vals = [x for x in range(1, testGen.args.max_pooling_stride + 1)]
1235 strides = {x for x in itertools.product(*([s_vals] * 2))}
1236 k_vals = [x for x in range(2, testGen.args.max_pooling_kernel + 1)]
1237 kernels = {x for x in itertools.product(*([k_vals] * 2))}
1238
1239 if testGen.args.oversize:
1240 # add some oversize argument values
1241 bigStride = 7
1242 strides.update({x for x in itertools.product(*([[1, bigStride]] * 2))})
1243 bigKernel = 6
1244 kernels.update({x for x in itertools.product(*([[2, bigKernel]] * 2))})
1245 if max(shape) < 64:
1246 # padding must be less than the kernel size
1247 bigPadding = bigKernel - 1
1248 paddings.update(
1249 {x for x in itertools.product(*([[0, bigPadding]] * 4))}
1250 )
1251
1252 # There are too many parameter combinations, so generate them sparsely,
1253 # very sparse for negative tests
1254 sparsity_factor = 2 if error_name else 500
1255 sparsity = len(paddings) * len(strides) * len(kernels) // sparsity_factor + 1
1256
1257 n = 0
1258 for s in sorted(list(strides)):
1259 for p in sorted(list(paddings)):
1260 for k in sorted(list(kernels)):
1261 if error_name in [
1262 ErrorIf.StrideSmallerOne,
1263 ErrorIf.KernelSmallerOne,
1264 ErrorIf.PadSmallerZero,
1265 ErrorIf.PadLargerEqualKernel,
1266 ]:
1267 sNew, pNew, kNew = TosaErrorIfArgGen.eiPoolingErrorIf(
1268 testGen, error_name, s, p, k
1269 )
1270 if None not in [sNew, pNew, kNew] and n % sparsity == 0:
1271 arg_list.append(
1272 (
1273 "st{}_kern{}_pad{}".format(
1274 "".join([str(x) for x in sNew]),
1275 "".join([str(x) for x in kNew]),
1276 "".join([str(x) for x in pNew]),
1277 ),
1278 [sNew, pNew, kNew],
1279 )
1280 )
1281 elif (
1282 n % sparsity == 0
1283 # padding must not exceed the kernel size
1284 and p[0] < k[0]
1285 and p[1] < k[0]
1286 and p[2] < k[1]
1287 and p[3] < k[1]
1288 # the padded shape must exceed the kernel size
1289 and (shape[1] + p[0] + p[1]) > k[0]
1290 and (shape[2] + p[2] + p[3]) > k[1]
1291 ):
1292 arg_list.append(
1293 (
1294 "st{}_kern{}_pad{}".format(
1295 "".join([str(x) for x in s]),
1296 "".join([str(x) for x in k]),
1297 "".join([str(x) for x in p]),
1298 ),
1299 [s, p, k],
1300 )
1301 )
1302 n += 1
1303
1304 return arg_list
1305
1306 @staticmethod
1307 def agCast(testGen, opName, shapeList, inDtype, error_name=None):
1308 arg_list = []
1309
1310 # Enumerate the output types here
1311 if error_name == ErrorIf.WrongOutputType:
1312 dtypeList = TosaErrorIfArgGen.eiCastErrorIf(testGen, inDtype)
1313 elif inDtype == DType.INT8:
1314 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
1315 elif inDtype == DType.INT16:
1316 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
1317 elif inDtype == DType.INT32:
1318 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
1319 elif inDtype == DType.BOOL:
1320 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
1321 elif inDtype == DType.FLOAT:
1322 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
1323 elif error_name == ErrorIf.WrongInputType:
1324 # Pick some potentially correct output type for incorrect input type
1325 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
1326 else:
1327 raise Exception("Unexpected input dtype: {}".format(inDtype))
1328
1329 for dtype in dtypeList:
1330 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
1331
1332 return arg_list
1333
1334 @staticmethod
1335 def agRescale(testGen, opName, shapeList, inDtype, error_name=None):
1336 arg_list = []
1337
1338 # Enumerate the output types here
1339 for dtype in [DType.UINT8, DType.INT8, DType.INT16, DType.INT32]:
1340 if (
1341 dtype in [DType.UINT8, DType.INT8]
1342 and error_name == ErrorIf.OutputZeroPointNotZero
1343 ):
1344 continue
1345 if (
1346 inDtype == DType.UINT8
1347 and dtype != DType.INT8
1348 and error_name != ErrorIf.WrongOutputType
1349 ):
1350 # The only output dtype for UINT8 is INT8, skip all other combinations
1351 continue
1352 if (
1353 inDtype != DType.INT8
1354 and dtype == DType.UINT8
1355 and error_name != ErrorIf.WrongOutputType
1356 ):
1357 # The only input dtype for UINT8 is INT8, skip all other combinations
1358 continue
1359 if (
1360 error_name == ErrorIf.WrongOutputType
1361 and not TosaErrorIfArgGen.eiRescaleWrongOutputType(inDtype, dtype)
1362 ):
1363 continue
1364
1365 for scale32 in [False, True]:
1366 if error_name == ErrorIf.ScaleTrue and not scale32:
1367 continue
1368 elif error_name == ErrorIf.ScaleNotTrue and scale32:
1369 continue
1370 for double_round in [False, True]:
1371 if error_name == ErrorIf.ScaleNotTrue and not double_round:
1372 continue
1373 for per_channel in [False, True]:
1374
1375 if (
1376 inDtype == DType.INT48
1377 and scale32
1378 and error_name != ErrorIf.ScaleTrue
1379 ):
1380 # Illegal condition. Must be scale32=False
1381 continue
1382 if (
1383 double_round
1384 and not scale32
1385 and error_name != ErrorIf.ScaleNotTrue
1386 ):
1387 # Illegal condition. ERROR_IF(!scale32 && double_round)
1388 continue
1389
1390 arg_list.append(
1391 (
1392 "out{}_sc{}_dr{}_pc{}".format(
1393 DTypeNames[dtype],
1394 int(scale32),
1395 int(double_round),
1396 int(per_channel),
1397 ),
1398 [dtype, scale32, double_round, per_channel],
1399 )
1400 )
1401
1402 return arg_list
1403
1404 @staticmethod
1405 def agMul(testGen, opName, shapeList, dtype, error_name=None):
1406 arg_list = []
1407
1408 if dtype is DType.INT32:
1409 for p in range(testGen.args.num_rand_permutations):
1410
1411 shift = testGen.randInt(0, 32)
1412
1413 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
1414 else:
1415 arg_list.append(("perm0_shift0", [0]))
1416
1417 return arg_list
1418
1419 @staticmethod
1420 def agArithmeticRightShift(testGen, opName, shapeList, dtype, error_name=None):
1421 arg_list = []
1422
1423 arg_list.append(("roundTrue", [True]))
1424 arg_list.append(("roundFalse", [False]))
1425
1426 return arg_list
1427
1428 # Helper function for reshape. Gets some factors of a larger number.
1429 @staticmethod
1430 def getFactors(val, start=1):
1431 factors = []
1432
1433 for i in range(start, int(np.sqrt(val)) + 1):
1434 if (val % i) == 0:
1435 factors.append(i)
1436
1437 return factors
1438
1439 @staticmethod
1440 def agReshape(testGen, opName, shapeList, dtype, error_name=None):
1441 arg_list = []
1442
1443 origShape = shapeList[0]
1444
1445 totalElements = 1
1446 for s in origShape:
1447 totalElements *= s
1448
1449 # This code is NOT fast. Fortunately, the numbers are fairly small.
1450 factors = TosaArgGen.getFactors(totalElements)
1451
1452 for p in range(testGen.args.num_rand_permutations):
1453 newRank = testGen.randInt(1, 7)
1454 if len(factors) < newRank:
1455 continue
1456
1457 found = True
1458 # escape_counter breaks while loop if it continues on for too long
1459 escape_counter = 0
1460 while found:
1461 newShape = []
1462 # Generate newShape ensuring it isn't a duplicate
1463 remainingElements = totalElements
1464 shuffledFactors = testGen.rng.permutation(factors)
1465 for i in range(1, newRank):
1466 # pick rank-1 factors
1467 newShape.append(shuffledFactors[0])
1468 remainingElements = remainingElements // shuffledFactors[0]
1469 shuffledFactors = testGen.rng.permutation(
1470 TosaArgGen.getFactors(remainingElements)
1471 )
1472 newShape.append(remainingElements)
1473
1474 # Toss in a -1 sometimes
1475 minusOne = testGen.randInt(0, newRank * 4)
1476 if minusOne < newRank:
1477 newShape[minusOne] = -1
1478
1479 # Check for duplicates
1480 found = False
1481 for name, other_shape in arg_list:
1482 if other_shape[0] == newShape:
1483 found = True
1484 break
1485
1486 escape_counter += 1
1487 if escape_counter >= 100:
1488 break
1489
1490 if not found:
1491 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
1492
1493 return arg_list
1494
1495 @staticmethod
1496 def agTranspose(testGen, opName, shapeList, dtype, error_name=None):
1497 arg_list = []
1498
1499 ifm_shape = shapeList[0]
1500
1501 if error_name == ErrorIf.IndexOutsideBounds:
1502 incorrect_large_index = range(len(ifm_shape) + 1, 2 * len(ifm_shape) + 1)
1503 incorrect_small_index = range(-len(ifm_shape), 0)
1504 permutations = [p for p in itertools.permutations(incorrect_large_index)]
1505 permutations.extend(
1506 [p for p in itertools.permutations(incorrect_small_index)]
1507 )
1508 elif error_name == ErrorIf.IndexUsedTwice:
1509 # Create list with a duplicated index
1510 perm_range = list(range(len(ifm_shape)))
1511 index_choice = testGen.rng.choice(range(len(perm_range)))
1512 perm_range[(index_choice + 1) % len(perm_range)] = perm_range[index_choice]
1513 permutations = [p for p in itertools.permutations(perm_range)]
1514
1515 else:
1516 # Get all permutations
1517 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
1518
1519 # Limit to possible permutations from shape dimension or argument setting
1520 limit = min(len(permutations), testGen.args.num_rand_permutations)
1521
1522 # Get random permutation generator that uses all permutations
1523 random_permutations = testGen.rng.permutation(permutations)
1524
1525 # Create list of required amount of permutations
1526 arg_list = [
1527 ("perm{}".format(p), [random_permutations[p].tolist()])
1528 for p in range(limit)
1529 ]
1530 return arg_list
1531
1532 @staticmethod
1533 def agSlice(testGen, opName, shapeList, dtype, error_name=None):
1534 arg_list = []
1535
1536 ifm_shape = shapeList[0]
1537 rank = len(ifm_shape)
1538
1539 for p in range(testGen.args.num_rand_permutations):
1540 start = []
1541 size = []
1542
1543 valid = True
1544
1545 for i in range(rank):
1546 if ifm_shape[i] > 1:
1547 start.append(testGen.randInt(0, ifm_shape[i]))
1548 size.append(testGen.randInt(0, ifm_shape[i] - start[i]))
1549
1550 # Invalid slice size?
1551 if size[i] == 0:
1552 valid = False
1553 else:
1554 start.append(0)
1555 size.append(1)
1556
1557 if valid:
1558 # If ERROR_IF test required then incorrect start, size will be returned
1559 start, size = TosaErrorIfArgGen.eiSliceErrorIf(
1560 testGen, error_name, ifm_shape, start, size
1561 )
1562 arg_list.append(("perm{}".format(p), [start, size]))
1563 return arg_list
1564
1565 @staticmethod
1566 def agTile(testGen, opName, shapeList, dtype, error_name=None):
1567 arg_list = []
1568
1569 ifm_shape = shapeList[0]
1570 rank = len(ifm_shape)
1571
1572 for p in range(testGen.args.num_rand_permutations):
1573
1574 # Pick a few random, but small multiple values
1575 # because otherwise this has a tendency to generate
1576 # enormous tensors
1577 multiples = []
1578 for i in range(rank):
1579 if ifm_shape[i] > 1000:
1580 # Multiple of 1 if ifm_shape dimension is large to reduce
1581 # tensor size
1582 multiples.append(1)
1583 elif max(ifm_shape) > 1000:
1584 multiples.append(2)
1585 else:
1586 multiples.append(testGen.randInt(1, 4))
1587 arg_list.append(("perm{}".format(p), [multiples]))
1588
1589 return arg_list
1590
1591 @staticmethod
1592 def agResize(testGen, opName, shapeList, dtype, error_name=None):
1593 arg_list = []
1594
1595 ifm_shape = shapeList[0]
1596 for mode in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
1597
1598 # Exclude illegal {mode, type} configurations. Pick legal output types
1599 if mode == ResizeMode.NEAREST and dtype == DType.INT8:
1600 outputDTypeList = [DType.INT8]
1601 elif mode == ResizeMode.NEAREST and dtype == DType.INT16:
1602 outputDTypeList = [DType.INT16]
1603 elif mode == ResizeMode.BILINEAR and dtype == DType.INT8:
1604 outputDTypeList = [DType.INT32]
1605 elif mode == ResizeMode.BILINEAR and dtype == DType.INT16:
1606 outputDTypeList = [DType.INT48]
1607 elif dtype == DType.FLOAT:
1608 outputDTypeList = [DType.FLOAT]
1609 elif error_name == ErrorIf.WrongInputType:
1610 # If an incorrect input type is used then we set a 'correct'
1611 # output type to avoid other errors
1612 outputDTypeList = [DType.INT8, DType.INT16, DType.INT32]
1613 else:
1614 continue
1615
1616 for outputDType in outputDTypeList:
1617 for perm in range(testGen.args.num_rand_permutations):
1618 # Randomly generate legal output dimensions and shift
1619 # and then compute the stride and offset based on them
1620 # A output_dim of 1 will cause offset to exceed allowed range
1621 # so minimum value 2 produced below
1622 output_dims = [testGen.randInt(1) + 1, testGen.randInt(1) + 1]
1623 while (float(ifm_shape[1]) / float(output_dims[0])) >= 16:
1624 output_dims[0] += 1
1625 while (float(ifm_shape[2]) / float(output_dims[1])) >= 16:
1626 output_dims[1] += 1
1627
1628 in_center_h = (ifm_shape[1] - 1) / 2.0
1629 in_center_w = (ifm_shape[2] - 1) / 2.0
1630 out_center_h = (output_dims[0] - 1) / 2.0
1631 out_center_w = (output_dims[1] - 1) / 2.0
1632
1633 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
1634 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
1635 fp_offset_y = in_center_h - fp_stride_y * out_center_h
1636 fp_offset_x = in_center_w - fp_stride_x * out_center_w
1637
1638 if outputDType == DType.FLOAT:
1639 float_op = True
1640 arg_str = (
1641 "mode{}_shift{}_odim{}x{}_out{}"
1642 "_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}"
1643 )
1644 shift = 0
1645 stride = [0, 0]
1646 offset = [0, 0]
1647 stride_fp = [fp_stride_y, fp_stride_x]
1648 offset_fp = [fp_offset_y, fp_offset_x]
1649
1650 else:
1651 float_op = False
1652 arg_str = "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}"
1653 shift = testGen.randInt(1, 12)
1654 # Now search for a shift value (1 to 11) that will produce
1655 # a valid and predictable resize operation
1656 count = 0
1657 while count < 12:
1658 unit = float(1 << shift)
1659 stride_y = int(round(fp_stride_y * unit))
1660 stride_x = int(round(fp_stride_x * unit))
1661 offset_y = int(round(fp_offset_y * unit))
1662 offset_x = int(round(fp_offset_x * unit))
1663
1664 if (
1665 stride_y <= 0
1666 or stride_x <= 0
1667 or stride_y >= (16 << shift)
1668 or stride_x >= (16 << shift)
1669 or offset_y >= (16 << shift)
1670 or offset_x >= (16 << shift)
1671 or offset_y <= (-16 << shift)
1672 or offset_x <= (-16 << shift)
1673 ):
1674 # Change the shift value and check again
1675 count += 1
1676 shift = (shift % 11) + 1
1677 continue
1678
1679 def RESIZE_REQUIRE_CALC(
1680 length_in, length_out, stride, offset, shift
1681 ):
1682 # Perform the pseudo loop to look for out of bounds
1683 for pos in range(0, length_out):
1684 a = pos * stride + offset
1685 ia = a >> shift
1686 ia0 = max(ia, 0)
1687 ia1 = min(ia + 1, length_in - 1)
1688 if ia0 > ia1:
1689 # Found a problem value
1690 break
1691 return ia0, ia1
1692
1693 iy0, iy1 = RESIZE_REQUIRE_CALC(
1694 ifm_shape[1], output_dims[0], stride_y, offset_y, shift
1695 )
1696 ix0, ix1 = RESIZE_REQUIRE_CALC(
1697 ifm_shape[2], output_dims[1], stride_x, offset_x, shift
1698 )
1699 if ix0 > ix1 or iy0 > iy1:
1700 # Change the shift value and check again
1701 count += 1
1702 shift = (shift % 11) + 1
1703 continue
1704 break
1705
1706 if count >= 12:
1707 # Couldn't find a good set of values for this test, skip it
1708 continue
1709
1710 stride = [stride_y, stride_x]
1711 offset = [offset_y, offset_x]
1712
1713 stride_fp = [0.0, 0.0]
1714 offset_fp = [0.0, 0.0]
1715
1716 # Common for all data types
1717 if error_name is not None:
1718 (
1719 shift,
1720 stride,
1721 stride_fp,
1722 offset,
1723 offset_fp,
1724 outputDTypeNew,
1725 ) = TosaErrorIfArgGen.eiResizeErrorIf(
1726 testGen,
1727 error_name,
1728 mode,
1729 dtype,
1730 shapeList,
1731 outputDType,
1732 shift,
1733 stride,
1734 stride_fp,
1735 offset,
1736 offset_fp,
1737 )
1738 else:
1739 outputDTypeNew = outputDType
1740
1741 arg_list.append(
1742 (
1743 arg_str.format(
1744 "N" if mode == ResizeMode.NEAREST else "B",
1745 shift,
1746 output_dims[0],
1747 output_dims[1],
1748 testGen.typeStr(outputDTypeNew),
1749 stride_fp[0] if float_op else stride[0],
1750 stride_fp[1] if float_op else stride[1],
1751 offset_fp[0] if float_op else offset[0],
1752 offset_fp[1] if float_op else offset[1],
1753 ),
1754 [
1755 mode,
1756 stride,
1757 offset,
1758 shift,
1759 stride_fp,
1760 offset_fp,
1761 output_dims,
1762 dtype,
1763 outputDTypeNew,
1764 ],
1765 )
1766 )
1767
1768 return arg_list
1769
1770 @staticmethod
1771 def agTable(testGen, opName, shapeList, dtype, error_name=None):
1772 arg_list = []
1773
1774 if dtype == DType.INT8:
1775 table = np.int32(
1776 testGen.rng.integers(low=-128, high=128, size=[256])
1777 ).tolist()
1778 else: # INT16
1779 table = np.int32(
1780 testGen.rng.integers(low=-32768, high=32768, size=[513])
1781 ).tolist()
1782
1783 arg_list.append(
1784 (
1785 "",
1786 [table],
1787 )
1788 )
1789 return arg_list
1790
1791 def agCondIf(testGen, opName, shapeList, dtype, error_name=None):
1792 # CondIf generates the condition values here.
1793 # Convert to tensors in the build function, along with the
1794 # then and else blocks
1795 arg_list = []
1796
1797 for c in [False, True]:
1798 arg_list.append(("cond{}".format(int(c)), [c]))
1799
1800 return arg_list
1801
1802 def agWhileLoop(testGen, opName, shapeList, dtype, error_name=None):
1803 # While loop: 0 iterations, 1, more than 1
1804 arg_list = []
1805
1806 for iter in [0, 1, 4]:
1807 arg_list.append(("iter{}".format(iter), [iter]))
1808
1809 return arg_list