blob: f63a7df9f53f80df291794dc67fc035bc829a599 [file] [log] [blame]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001# Copyright (c) 2021-2022, ARM Limited.
2# SPDX-License-Identifier: Apache-2.0
3import itertools
4import math
5
6import numpy as np
7import serializer.tosa_serializer as ts
8from generator.tosa_error_if import ErrorIf
9from generator.tosa_error_if import TosaErrorIfArgGen
10from serializer.tosa_serializer import DTypeNames
11from tosa.DType import DType
12from tosa.Op import Op
13from tosa.ResizeMode import ResizeMode
14
15# DTypeNames, DType, Op and ResizeMode are convenience variables to the
16# flatc-generated types that should be enums, but aren't
17
18
19class TosaQuantGen:
20 """QuantizedInfo random generator helper functions.
21
22 Specify with 'qgen': in the operator defintion.
23 """
24
25 def __init__(self):
26 pass
27
28 @staticmethod
29 def getQinfo(testGen, dtype, error_name=None):
30
31 if dtype == DType.INT8:
32 return testGen.randInt(-128, 128)
33 elif dtype == DType.UINT8:
34 return testGen.randInt(0, 256)
35 elif error_name in [
36 ErrorIf.InputZeroPointNotZero,
37 ErrorIf.WeightZeroPointNotZero,
38 ErrorIf.OutputZeroPointNotZero,
39 ]:
40 zero_point = testGen.randInt(-128, 128)
41 if zero_point == 0:
42 zero_point = 1
43 return zero_point
44 return 0
45
46 @staticmethod
47 def qgUnary(testGen, op, dtype, error_name=None):
48 qinfo = ts.TosaSerializerQuantInfo()
49 if error_name == ErrorIf.InputZeroPointNotZero:
50 qinfo.UnaryQuantInfo(
51 TosaQuantGen.getQinfo(testGen, dtype, error_name),
52 TosaQuantGen.getQinfo(testGen, dtype),
53 )
54 elif error_name == ErrorIf.OutputZeroPointNotZero:
55 qinfo.UnaryQuantInfo(
56 TosaQuantGen.getQinfo(testGen, dtype),
57 TosaQuantGen.getQinfo(testGen, dtype, error_name),
58 )
59 else:
60 qinfo.UnaryQuantInfo(
61 TosaQuantGen.getQinfo(testGen, dtype),
62 TosaQuantGen.getQinfo(testGen, dtype),
63 )
64 return qinfo
65
66 @staticmethod
67 def qgConv(testGen, op, dtype_or_dtypeList, error_name=None):
68 qinfo = ts.TosaSerializerQuantInfo()
69 if isinstance(dtype_or_dtypeList, list):
70 # a list of [input, weights, accumulator] dtypes
71 dtypeList = dtype_or_dtypeList
72 else:
73 # an int, [input, weights, accumulator] dtypes are the same
74 dtypeList = [dtype_or_dtypeList] * 3
75
76 if error_name == ErrorIf.InputZeroPointNotZero:
77 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0], error_name)
78 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
79 elif error_name == ErrorIf.WeightZeroPointNotZero:
80 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
81 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1], error_name)
82 else:
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85
86 qinfo.ConvQuantInfo(input_zp, weights_zp)
87 return qinfo
88
89 @staticmethod
90 def qgMatmul(testGen, op, dtype, error_name=None):
91 qinfo = ts.TosaSerializerQuantInfo()
92 if error_name == ErrorIf.InputZeroPointNotZero:
93 qinfo.MatMulQuantInfo(
94 TosaQuantGen.getQinfo(testGen, dtype, error_name),
95 TosaQuantGen.getQinfo(testGen, dtype, error_name),
96 )
97 else:
98 qinfo.MatMulQuantInfo(
99 TosaQuantGen.getQinfo(testGen, dtype),
100 TosaQuantGen.getQinfo(testGen, dtype),
101 )
102 return qinfo
103
104 @staticmethod
105 def qgPad(testGen, op, dtype, error_name=None):
106 qinfo = ts.TosaSerializerQuantInfo()
107 if error_name == ErrorIf.InputZeroPointNotZero:
108 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype, error_name))
109 else:
110 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
111 return qinfo
112
113 @staticmethod
114 def computeMultiplierAndShift(scaleFp, scale32):
115 # Derived from computeMultiplierAndShiftTosaScale32
116 # Provide a floating-point scaling factor and the scale32 parameter
117 # to compute the multiplier and shift
118
119 if scale32:
120 scaleBits = 31
121 else:
122 scaleBits = 15
123
124 m, shift = math.frexp(scaleFp)
125
126 if scaleFp < 0.0:
127 m = -m
128
129 multiplier = round(m * (1 << scaleBits))
130 assert multiplier <= (1 << scaleBits)
131
132 if multiplier == (1 << scaleBits):
133 multiplier = multiplier // 2
134 shift = shift + 1
135
136 shift = (-shift) + scaleBits
137 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(
138 # scaleFp, scaleBits, m, multiplier, shift))
139
140 # Adjust multiplier such that shift is in allowed value range.
141 if shift == 0:
142 multiplier = multiplier // 4
143 shift = shift + 2
144 elif shift == 1:
145 multiplier = multiplier // 2
146 shift = shift + 1
147 elif shift == 63:
148 multiplier = multiplier * 2
149 shift = shift - 1
150
151 assert multiplier <= (1 << scaleBits)
152 assert shift >= 2 and shift <= 62
153
154 return multiplier, shift
155
156
157class TosaTensorGen:
158 """Tensor generators create a shape list for the placeholder and const tensor
159 data operands for the operator.
160
161 The actual random data is generated separately for each test.
162 """
163
164 def __init__(self):
165 pass
166
167 @staticmethod
168 def tgBasic(testGen, opName, rank, error_name=None):
169 pl, const = opName["operands"]
170 shape = testGen.makeShape(rank)
171
172 # Constrict the overall size of the shape when creating ERROR_IF tests
173 if error_name:
174 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
175
176 shape_list = []
177 for i in range(pl + const):
178 shape_list.append(shape.copy())
179
180 if error_name == ErrorIf.RankMismatch:
181 if rank == 1 and i != 1:
182 shape = testGen.makeShape(rank + testGen.rng.choice([1, 2, 3]))
183 elif i != 1:
184 shape = testGen.makeShape(rank + testGen.rng.choice([-1, 1]))
185
186 return shape_list
187
188 @staticmethod
189 def tgNHWC(testGen, opName, rank, error_name=None):
190 pl, const = opName["operands"]
191
192 if error_name != ErrorIf.WrongRank:
193 assert rank == 4
194
195 shape = testGen.makeShape(rank)
196
197 # Constrict the batch size?
198 if testGen.args.max_batch_size:
199 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
200
201 # Constrict the overall size of the shape when creating ERROR_IF tests
202 if error_name and error_name != ErrorIf.MaxDimExceeded:
203 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
204
205 shape_list = []
206 for i in range(pl + const):
207 shape_list.append(shape.copy())
208
209 return shape_list
210
211 @staticmethod
212 def tgScatter(testGen, opName, rank, error_name=None):
213 pl, const = opName["operands"]
214
215 assert pl == 2
216 assert const == 0
217 if error_name != ErrorIf.WrongRank:
218 assert rank == 3
219
220 values_in_shape = testGen.makeShape(rank)
221
222 # ignore max batch size if target shape is set
223 if testGen.args.max_batch_size and not testGen.args.target_shapes:
224 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
225
226 W = testGen.randInt(
227 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
228 )
229 # Constrict W if one dimension is too large to keep tensor size reasonable
230 if max(values_in_shape) > 5000:
231 W = testGen.randInt(0, 16)
232
233 input_shape = [values_in_shape[0], W, values_in_shape[2]]
234
235 shape_list = []
236 shape_list.append(values_in_shape.copy())
237 shape_list.append(input_shape.copy())
238
239 return shape_list
240
241 @staticmethod
242 def tgBroadcastFuzz(testGen, op, rank, error_name=None):
243 shape = testGen.makeShape(rank)
244
245 pl, const = op["operands"]
246
247 shape_list = []
248
249 # Choose one of the inputs to broadcast
250 # Note: Simplifies OutputShaper code if we don't change first shape for errors
251 bcast_idx = testGen.randInt(0 if error_name is None else 1, pl + const)
252 for i in range(pl + const):
253 shape_bcast = shape.copy()
254
255 # If the chosen input, pick a random index to broadcast
256 if i == bcast_idx:
257 fuzz_idx = testGen.randInt(0, rank)
258 if error_name == ErrorIf.DimensionMismatch:
259 shape_bcast[fuzz_idx] += 1
260 elif error_name == ErrorIf.RankMismatch:
261 # Add one rank to the shape (or more for rank of 1)
262 extra_ranks = testGen.rng.choice([1, 2, 3]) if rank == 1 else 1
263 shape_bcast = np.concatenate(
264 (shape_bcast, testGen.makeShape(extra_ranks))
265 )
266 if rank != 1:
267 # Either keep the extra rank, or remove it
268 new_len = testGen.rng.choice([-2, len(shape_bcast)])
269 shape_bcast = shape_bcast[:new_len]
270 else:
271 shape_bcast[fuzz_idx] = 1
272
273 shape_list.append(shape_bcast)
274
275 return shape_list
276
277 @staticmethod
278 def tgConv2D(testGen, op, rank, error_name=None):
279 pl, const = op["operands"]
280
281 if error_name != ErrorIf.WrongRank:
282 assert rank == 4
283
284 # IFM dimensions are NHWC
285 ifm_shape = testGen.makeShape(rank)
286
287 # Constrict the batch size?
288 if testGen.args.max_batch_size:
289 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
290
291 # Constrict the overall size of the shape when creating ERROR_IF tests
292 if error_name:
293 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
294 ifm_shape, max_dim=24, max_items=10000
295 )
296
297 # Get the filter height/width from the operator parameters
298 filter_hw = op["filter"]
299
300 # Generate a random OFM depth
301 ofm_depth = testGen.makeShape(1)[0]
302
303 # The filter dimensions are OHWI
304 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
305
306 # The bias is OC
307 bias_shape = np.asarray([ofm_depth])
308
309 return [ifm_shape, filter_shape, bias_shape]
310
311 @staticmethod
312 def tgConv3D(testGen, op, rank, error_name=None):
313 pl, const = op["operands"]
314
315 if error_name != ErrorIf.WrongRank:
316 assert rank == 5
317
318 # IFM dimensions are NDHWC
319 ifm_shape = testGen.makeShape(rank)
320
321 # Constrict the batch size?
322 if testGen.args.max_batch_size:
323 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
324
325 # Constrict the overall size of the shape when creating ERROR_IF tests
326 if error_name:
327 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
328 ifm_shape, max_dim=24, max_items=10000
329 )
330
331 # Get the filter depth/height/width from the operator parameters
332 filter_dhw = op["filter"]
333
334 # Generate a random OFM channel
335 ofm_channel = testGen.makeShape(1)[0]
336
337 # The filter dimensions are ODHWI
338 filter_shape = np.asarray(
339 [ofm_channel, filter_dhw[0], filter_dhw[1], filter_dhw[2], ifm_shape[4]]
340 )
341
342 # The bias is OC
343 bias_shape = np.asarray([ofm_channel])
344
345 return [ifm_shape, filter_shape, bias_shape]
346
347 @staticmethod
348 def tgTransposeConv2D(testGen, op, rank, error_name=None):
349 pl, const = op["operands"]
350
351 if error_name != ErrorIf.WrongRank:
352 assert rank == 4
353
354 # IFM dimensions are NHWC
355 ifm_shape = testGen.makeShape(rank)
356
357 # Constrict the batch size?
358 if testGen.args.max_batch_size:
359 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
360
361 # Constrict the overall size of the shape when creating ERROR_IF tests
362 if error_name:
363 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
364 ifm_shape, max_dim=24, max_items=10000
365 )
366
367 # Get the filter height/width from the operator parameters
368 filter_hw = op["filter"]
369
370 # Generate a random OFM depth
371 ofm_depth = testGen.makeShape(1)[0]
372
373 # The filter dimensions are OHWI
374 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
375
376 # The bias is OC
377 bias_shape = np.asarray([ofm_depth])
378
379 return [ifm_shape, filter_shape, bias_shape]
380
381 @staticmethod
382 def tgDepthwiseConv2D(testGen, op, rank, error_name=None):
383 pl, const = op["operands"]
384
385 if error_name != ErrorIf.WrongRank:
386 assert rank == 4
387 assert pl == 1 and const == 2
388
389 # IFM dimensions are NHWC
390 ifm_shape = testGen.makeShape(rank)
391
392 # Constrict the batch size?
393 if testGen.args.max_batch_size:
394 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
395
396 # Constrict the overall size of the shape when creating ERROR_IF tests
397 if error_name:
398 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
399 ifm_shape, max_dim=24, max_items=10000
400 )
401
402 # Get the filter height/width from the operator parameters
403 # Filter is KH, HW, C, M
404 filter_hw = op["filter"]
405
406 # Generate a random OFM depth, but don't let it get too big because
407 # the output depth is M * C
408 filter_m = (
409 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
410 ) + 1
411
412 # The filter dimensions are HWCM
413 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
414
415 # The bias is M * C
416 bias_shape = np.asarray([ifm_shape[3] * filter_m])
417
418 return [ifm_shape, filter_shape, bias_shape]
419
420 @staticmethod
421 def tgFullyConnected(testGen, op, rank, error_name=None):
422 pl, const = op["operands"]
423
424 if error_name != ErrorIf.WrongRank:
425 assert rank == 2
426
427 input_shape = testGen.makeShape(rank)
428
429 # Constrict the overall size of the shape when creating ERROR_IF tests
430 if error_name:
431 input_shape = TosaErrorIfArgGen.eiRestrictDimensions(input_shape)
432
433 filter_oc = testGen.rng.integers(
434 low=testGen.args.tensor_shape_range[0],
435 high=testGen.args.tensor_shape_range[1],
436 size=1,
437 )[0]
438 filter_shape = np.asarray([filter_oc, input_shape[1]])
439
440 bias_shape = np.asarray([filter_oc])
441
442 return [input_shape, filter_shape, bias_shape]
443
444 @staticmethod
445 def tgMatmul(testGen, op, rank, error_name=None):
446 pl, const = op["operands"]
447
448 if error_name != ErrorIf.WrongRank:
449 assert rank == 3
450 assert pl == 2 and const == 0
451
452 a_shape = testGen.makeShape(rank)
453
454 # Constrict the overall size of the shape when creating ERROR_IF tests
455 if error_name:
456 a_shape = TosaErrorIfArgGen.eiRestrictDimensions(a_shape)
457
458 # Get a random number for b_oc even if target shape is defined
459 b_oc = np.int32(
460 testGen.rng.integers(
461 low=testGen.args.tensor_shape_range[0],
462 high=testGen.args.tensor_shape_range[1],
463 size=1,
464 )
465 )[0]
466 # If N or H is large let b_oc be 1 to reduce output tensor size
467 if max(a_shape) > 1000:
468 b_oc = 1
469
470 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
471 return [a_shape, b_shape]
472
473 @staticmethod
474 def tgConcat(testGen, opName, rank, error_name=None):
475 pl, const = opName["operands"]
476 shape = testGen.makeShape(rank)
477
478 # Create extra tensors to concat.
479 # Take into account value of pl when getting maximum number of concats
480 num_tensors = testGen.randInt(0, 4)
481 shape_list = []
482 for i in range(pl + const + num_tensors):
483 if error_name == ErrorIf.ConcatInputRankMismatch and i != 0:
484 remove = testGen.rng.choice([True, False])
485 wrongShape = shape.copy()
486
487 if remove and len(shape) > 1:
488 wrongShape = wrongShape[1:]
489 else:
490 wrongShape = list(wrongShape)
491 wrongShape.append(testGen.rng.integers(1, 10))
492
493 shape_list.append(wrongShape)
494 else:
495 shape_list.append(shape.copy())
496
497 return shape_list
498
499 @staticmethod
500 def tgConcatConstInput(testGen, shapeList, axis, error_name=None):
501 if error_name in [
502 ErrorIf.AxisSmallerZero,
503 ErrorIf.AxisLargerRank,
504 ErrorIf.ConcatInputRankMismatch,
505 ]:
506 return shapeList
507
508 # Split concat shape along axis to allow for multiple const inputs
509 # without making too many large tensors
510 if len(shapeList) == 2 or shapeList[0][axis] < len(shapeList):
511 # If axis can't be split we still need to invalidate other dimensions
512 if error_name == ErrorIf.ConcatInputDimMismatch:
513 for shape in shapeList[1:]:
514 # Negative test shapeLists are created individually for each test,
515 # so no need to copy the shape before altering it.
516 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
517 return shapeList
518
519 # Create copy of shape we are going to split (so we don't alter shapeList)
520 shape = shapeList[0].copy()
521 # Add original shape as first input
522 new_shapeList = [shape.copy()]
523 length_on_axis = shape[axis]
524 remaining_length = length_on_axis
525 for i in range(len(shapeList) - 2):
526 # Calculate split on axis and remaining value
527 split_shape_val = int(shape[axis] / 2)
528 remaining_length = remaining_length - split_shape_val
529
530 # Append new shape, and set remaining shape
531 shape[axis] = split_shape_val
532 new_shapeList.append(shape.copy())
533
534 # invalidate dimensions
535 if error_name == ErrorIf.ConcatInputDimMismatch:
536 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
537 else:
538 shape[axis] = remaining_length
539
540 if i == len(shapeList) - 3:
541 new_shapeList.append(shape.copy())
542
543 return new_shapeList
544
545
546class TosaTensorValuesGen:
547 """Tensor Value generators create the random data for each test."""
548
549 def __init__(self):
550 pass
551
552 @staticmethod
553 def tvgDefault(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
554 pCount, cCount = op["operands"]
555
556 tens = []
557 tens.extend(
558 testGen.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
559 )
560 tens.extend(testGen.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
561
562 return tens
563
564 @staticmethod
565 def tvgNegate(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
566 if dtypeList[0] != DType.FLOAT and error_name is None:
567 pCount, cCount = op["operands"]
568 assert (
569 pCount == 1 and cCount == 0
570 ), "Op.NEGATE must have 1 placeholders, 0 consts"
571 # Must create tensors with values within negatable ranges
572 if dtypeList[0] == DType.INT8:
573 # Must be within int8, adjustable by input_zp and then negatable
574 # and be within int8
575 # For use: qinfo.ints[0][1] = input_zp, qinfo.ints[1][1] = output_zp
576 max_val = min(127, 127 + qinfo.ints[0][1])
577 min_val = max(-127, -127 + qinfo.ints[0][1])
578 elif dtypeList[0] == DType.INT16:
579 max_val = 32767
580 min_val = -max_val
581 else:
582 assert (
583 dtypeList[0] == DType.INT32
584 ), "Op.NEGATE found with unsupported input type"
585 max_val = (1 << 31) - 1
586 min_val = -max_val
587 arr = np.int32(
588 testGen.rng.integers(low=min_val, high=(max_val + 1), size=shapeList[0])
589 )
590 placeholders = []
591 placeholders.append(
592 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], arr)
593 )
594 return placeholders
595 else:
596 return TosaTensorValuesGen.tvgDefault(
597 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
598 )
599
600 @staticmethod
601 def tvgAddSub(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
602 if dtypeList[0] == DType.INT32 and error_name is None:
603 # Make sure the operation does not cause value saturation - where
604 # the number wraps due to limited number of bits to store the answer
605 pCount, cCount = op["operands"]
606 assert (
607 pCount == 2 and cCount == 0
608 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts"
609 placeholders = []
610 add = op["op"] == Op.ADD
611 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
612 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
613 if add:
614 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
615 else:
616 res_arr = np.subtract(a_arr, b_arr, dtype=np.int64)
617
618 # Work out the saturation limits
619 max_i32 = (1 << 31) - 1
620 min_i32 = -(1 << 31)
621 max_arr = np.full(shapeList[1], max_i32)
622 min_arr = np.full(shapeList[1], min_i32)
623
624 # Find how much values exceed the maximum/minimums
625 sat_max_arr = np.maximum(res_arr - max_arr, 0)
626 sat_min_arr = np.minimum(res_arr - min_arr, 0)
627
628 if not add:
629 # Swap saturation values and negate values as we need to perform opposite operations
630 sat_max_arr, sat_min_arr = -sat_min_arr, -sat_max_arr
631
632 # Create new array of unsaturated values by clipping values as needed
633 b_unsat_arr = b_arr
634 if (sat_max_arr != 0).any():
635 # Clip values that cause saturation
636 b_unsat_arr = np.subtract(b_unsat_arr, sat_max_arr, dtype=np.int32)
637 # Reduce axes in unsaturated tensor to match original tensor
638 for axis, dim in enumerate(b_arr.shape):
639 if dim != b_unsat_arr.shape[axis]:
640 assert (
641 dim == 1
642 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
643 b_unsat_arr = np.amin(b_unsat_arr, axis=axis, keepdims=True)
644
645 if (sat_min_arr != 0).any():
646 # Clip values that cause saturation
647 b_unsat_arr = np.subtract(b_unsat_arr, sat_min_arr, dtype=np.int32)
648 # Reduce axes in unsaturated tensor to match original tensor
649 for axis, dim in enumerate(b_arr.shape):
650 if dim != b_unsat_arr.shape[axis]:
651 assert (
652 dim == 1
653 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
654 b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True)
655
656 placeholders.append(
657 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
658 )
659 placeholders.append(
660 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr)
661 )
662
663 return placeholders
664 else:
665 return TosaTensorValuesGen.tvgDefault(
666 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
667 )
668
669 @staticmethod
670 def tvgCondIfWhileLoop(
671 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
672 ):
673 if dtypeList[0] in (
674 DType.INT32,
675 DType.INT16,
676 DType.INT8,
677 ):
678 # Limit input tensors with cond_if_binary or while_loop to stop
679 # saturation of add/sub ops with int32 and keep all logical shift
680 # values between 0 to 31 for int16 or int8
681 pCount, cCount = op["operands"]
682 pRemain = pCount
683 placeholders = []
684 for idx, shape in enumerate(shapeList[:]):
685 if dtypeList[0] == DType.INT32:
686 arr = testGen.getRandTensor(shapeList[idx], DType.INT16)
687 else:
688 arr = np.int32(
689 testGen.rng.integers(low=0, high=32, size=shapeList[idx])
690 )
691 if pRemain > 0:
692 placeholders.append(
693 testGen.ser.addPlaceholder(shape, dtypeList[idx], arr)
694 )
695 pRemain -= 1
696 else:
697 placeholders.append(
698 testGen.ser.addConst(shape, dtypeList[idx], arr)
699 )
700
701 return placeholders
702 else:
703 return TosaTensorValuesGen.tvgDefault(
704 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
705 )
706
707 @staticmethod
708 def tvgArithmeticRightShift(
709 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
710 ):
711 pCount, cCount = op["operands"]
712 # Force value of operand[1] to be within [0, num_bits]
713 assert (
714 pCount == 2 and cCount == 0
715 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
716
717 placeholders = []
718 for idx, shape in enumerate(shapeList[:]):
719 if idx == 1:
720 if dtypeList[idx] == DType.INT8:
721 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
722 elif dtypeList[idx] == DType.INT16:
723 arr = np.int32(testGen.rng.integers(low=0, high=16, size=shape))
724 elif dtypeList[idx] == DType.INT32:
725 arr = np.int32(testGen.rng.integers(low=0, high=32, size=shape))
726 elif error_name == ErrorIf.WrongInputType:
727 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
728 else:
729 raise Exception("OpArithmeticRightShift: invalid input dtype")
730 else:
731 arr = testGen.getRandTensor(shape, dtypeList[idx])
732 placeholders.append(testGen.ser.addPlaceholder(shape, dtypeList[idx], arr))
733
734 return placeholders
735
736 @staticmethod
737 def tvgSelect(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
738 # Set datatype of condition tensor to boolean
739 dtypeList[0] = DType.BOOL
740
741 return TosaTensorValuesGen.tvgDefault(
742 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
743 )
744
745 @staticmethod
746 def tvgIntDiv(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
747 if error_name is None:
748 pCount, cCount = op["operands"]
749 assert (
750 pCount == 2 and cCount == 0
751 ), "Op.INTDIV must have 2 placeholders, 0 consts"
752
753 placeholders = []
754
755 # Two invalid cases for Op.INTDIV:
756 # 1. divisor == 0
757 # 2. dividend == -(1<<31) and divisor == -1
758 while True:
759 dividend_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
760 divisor_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
761
762 if (divisor_arr == 0).any():
763 continue
764
765 if (dividend_arr == -(2**31)).any() and (divisor_arr == -1).any():
766 continue
767
768 break
769
770 placeholders.append(
771 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
772 )
773 placeholders.append(
774 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
775 )
776
777 return placeholders
778 else:
779 return TosaTensorValuesGen.tvgDefault(
780 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
781 )
782
783 @staticmethod
784 def tvgMul(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
785 if error_name is None:
786 pCount, cCount = op["operands"]
787 assert (
788 pCount == 2 and cCount == 0
789 ), "Op.MUL must have 2 placeholders, 0 consts"
790
791 tens = []
792 if dtypeList[0] == DType.FLOAT:
793 tens.extend(testGen.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
794 else:
795 placeholders = []
796
797 # Make sure multiply result in int32 range
798 shift = testArgs[0]
799 if dtypeList[0] == DType.INT8:
800 num_bits = 8
801 elif dtypeList[0] == DType.INT16:
802 num_bits = 16
803 elif dtypeList[0] == DType.INT32:
804 num_bits = 32
805 elif error_name == ErrorIf.WrongInputType:
806 num_bits = 8
807 else:
808 raise Exception("OpMul: invalid input dtype")
809
810 for idx, shape in enumerate(shapeList[:]):
811 low = -(2 ** (num_bits - 1))
812 high = (2 ** (num_bits - 1)) - 1
813
814 a_arr = np.int32(
815 testGen.rng.integers(low=low, high=high, size=shapeList[0])
816 )
817 b_arr = np.int32(
818 testGen.rng.integers(low=low, high=high, size=shapeList[1])
819 )
820
821 i = 0
822 while True:
823
824 a_arr_64 = a_arr.astype(np.int64)
825 b_arr_64 = b_arr.astype(np.int64)
826
827 if shift > 0:
828 rounding = 1 << (shift - 1)
829 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
830 else:
831 result_arr = a_arr_64 * b_arr_64
832
833 if (result_arr > -(2**31)).all() and (
834 result_arr <= ((2**31) - 1)
835 ).all():
836 break
837
838 i = i + 1
839 a_arr = a_arr // 2
840 b_arr = b_arr // 2
841
842 placeholders.append(
843 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
844 )
845 placeholders.append(
846 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
847 )
848
849 tens.extend(placeholders)
850
851 return tens
852 else:
853 return TosaTensorValuesGen.tvgDefault(
854 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
855 )
856
857 @staticmethod
858 def tvgConcat(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
859 count = len(shapeList) - testGen.args.num_const_inputs_concat
860 if count < 1:
861 count = 1
862 if testGen.args.num_const_inputs_concat == 0:
863 count = len(shapeList)
864
865 # Ensure axis is an int
866 testArgs[0] = int(testArgs[0])
867
868 shapeList = TosaTensorGen.tgConcatConstInput(
869 testGen, shapeList, testArgs[0], error_name
870 )
871
872 tens = []
873 tens.extend(
874 testGen.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
875 )
876 tens.extend(testGen.buildConstTensors(shapeList[count:], dtypeList[count:]))
877
878 return tens
879
880 @staticmethod
881 def tvgLogicalShift(
882 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
883 ):
884 pCount, cCount = op["operands"]
885 assert (
886 pCount == 2 and cCount == 0
887 ), "Op.LOGICAL_LEFT_SHIFT or Op.LOGICAL_RIGHT_SHIFT must have 2 placeholders, 0 consts"
888 values_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
889 shift_arr = np.int32(testGen.rng.integers(low=0, high=32, size=shapeList[1]))
890 placeholders = []
891 placeholders.append(
892 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
893 )
894 placeholders.append(
895 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], shift_arr)
896 )
897
898 return placeholders
899
900 @staticmethod
901 def tvgEqual(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
902 if error_name is None:
903 pCount, cCount = op["operands"]
904 assert (
905 pCount == 2 and cCount == 0
906 ), "Op.EQUAL must have 2 placeholders, 0 consts"
907 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
908 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
909 # Using random numbers means that it will be very unlikely that
910 # there are any matching (equal) values, therefore force that
911 # there are twice the number of matching values as the tensor rank
912 for num in range(0, len(shapeList[0]) * 2):
913 a_index = []
914 b_index = []
915 # Choose an index in each axis for the whole shape
916 for axis in range(0, len(shapeList[0])):
917 # Index can be up to the largest dimension in both shapes
918 index = np.int32(
919 testGen.rng.integers(
920 0, max(shapeList[0][axis], shapeList[1][axis])
921 )
922 )
923 # Reduce the index down to a shape's dim for broadcasting
924 a_index.append(min(shapeList[0][axis] - 1, index))
925 b_index.append(min(shapeList[1][axis] - 1, index))
926
927 a_arr[tuple(a_index)] = b_arr[tuple(b_index)]
928
929 placeholders = []
930 placeholders.append(
931 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
932 )
933 placeholders.append(
934 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
935 )
936 return placeholders
937 else:
938 return TosaTensorValuesGen.tvgDefault(
939 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
940 )
941
942 @staticmethod
943 def tvgReduceSum(
944 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
945 ):
946 if dtypeList[0] == DType.INT32:
947 pCount, cCount = op["operands"]
948 assert (
949 pCount == 1 and cCount == 0
950 ), "Op.REDUCE_SUM must have 1 placeholders, 0 consts"
951 # Limit values so that the sum cannot exceed the range of an int32 during
952 # summation of any axis
953 range_val = int((1 << 31) / max(shapeList[0]))
954 values_arr = np.int32(
955 testGen.rng.integers(low=-range_val, high=range_val, size=shapeList[0])
956 )
957 placeholders = []
958 placeholders.append(
959 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
960 )
961 return placeholders
962 else:
963 return TosaTensorValuesGen.tvgDefault(
964 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
965 )
966
967
968class TosaArgGen:
969 """Argument generators create exhaustive or random lists of attributes for
970 operators that take attributes or other parameters.
971
972 The return value is a list of (descriptive_name, [arglist]) tuples where
973 the descriptive_name is appended to the test name and the arglist is expanded
974 as arguments to the operator build function.
975 """
976
977 def __init__(self):
978 pass
979
980 @staticmethod
981 def agNone(testGen, opName, shapeList, dtype, error_name=None):
982 """A trivial argument generator for operators that don't take any
983 non-tensor arguments"""
984 return [("", [])]
985
986 @staticmethod
987 def agAxis(testGen, opName, shapeList, dtype, error_name=None):
988 """Build the axis argument for operators that take a single axis"""
989 axes = []
990 shape = shapeList[0]
991
992 if error_name == ErrorIf.AxisSmallerZero:
993 small_axis = testGen.rng.integers(-5, 0)
994 axes.append(("axis{}".format(small_axis), [small_axis]))
995 elif error_name == ErrorIf.AxisLargerRank:
996 large_axis = testGen.rng.integers(len(shape) + 1, len(shape) + 10)
997 axes.append(("axis{}".format(large_axis), [large_axis]))
998 else:
999 for a in range(0, len(shape)):
1000 axes.append(("axis{}".format(a), [a]))
1001
1002 return axes
1003
1004 @staticmethod
1005 def agConv(testGen, opName, shapeList, dtype, error_name=None):
1006 arg_list = []
1007
1008 ifm_shape = shapeList[0]
1009 filter_shape = shapeList[1]
1010 # determine the kernel shape from operator name (e.g. "conv2d_3x3" => [3,3])
1011 k = [int(x) for x in opName.split("_")[-1].split("x")]
1012
1013 # Check the rank
1014 rank = 5 if opName.startswith("conv3d") else 4
1015 if error_name != ErrorIf.WrongRank:
1016 assert len(ifm_shape) == rank
1017 assert len(filter_shape) == rank
1018
1019 # kernel rank omits batch and channels
1020 k_rank = rank - 2
1021 assert len(k) == k_rank
1022
1023 # Generate comprehensive argument lists
1024 # - except for named errors, which use specific invalid value(s)
1025 if error_name == ErrorIf.PadSmallerZero:
1026 p_vals = [testGen.rng.choice(range(-5, 0))]
1027 else:
1028 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
1029 paddings = {x for x in itertools.product(*([p_vals] * k_rank * 2))}
1030 if error_name == ErrorIf.StrideSmallerOne:
1031 # Can't use stride=0, as it is used to derive output shape, as a divisor
1032 s_vals = [testGen.rng.choice(range(-5, 0))]
1033 else:
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001034 # Stride must be greater than 1 to force non-integer error
1035 startStride = 1 if error_name != ErrorIf.PoolingOutputShapeNonInteger else 2
1036 s_vals = [x for x in range(startStride, testGen.args.max_conv_stride + 1)]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001037 strides = {x for x in itertools.product(*([s_vals] * k_rank))}
1038 if error_name == ErrorIf.DilationSmallerOne:
1039 d_vals = [testGen.rng.choice(range(-5, 1))]
1040 else:
1041 d_vals = [x for x in range(1, testGen.args.max_conv_dilation + 1)]
1042 dilations = {x for x in itertools.product(*([d_vals] * k_rank))}
1043
1044 if not error_name and testGen.args.oversize:
1045 # add some oversize argument values
1046 if max(ifm_shape) < 64:
1047 bigPadding = 9
1048 paddings.update(
1049 {x for x in itertools.product(*([[0, bigPadding]] * (k_rank * 2)))}
1050 )
1051 bigStride = 8
1052 strides.update({x for x in itertools.product(*([[1, bigStride]] * k_rank))})
1053 bigDilation = 7
1054 dilations.update(
1055 {x for x in itertools.product(*([[1, bigDilation]] * k_rank))}
1056 )
1057
1058 # There are too many parameter combinations, so generate them sparsely,
1059 # very sparse for negative tests
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001060 sparsity_factor = 2 if error_name else 120
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001061 sparsity = len(paddings) * len(strides) * len(dilations) // sparsity_factor + 1
1062 # If there are only a small number of tests, just select them all
1063 if sparsity < 13:
1064 sparsity = 1
1065 # To get a variety of parameter combinations sparsity should not be a
1066 # multiple of 2, 3 or 5
1067 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1068 sparsity += 1
1069
1070 n = 0
1071 for s in sorted(list(strides)):
1072 for p in sorted(list(paddings)):
1073 for d in sorted(list(dilations)):
1074 if (
1075 n % sparsity == 0
1076 # padding must not exceed the kernel size ?
1077 # and p[0] < k[0] and p[1] < k[0]
1078 # and p[2] < k[1] and p[3] < k[1]
1079 # and (k_rank < 3 or (p[4] < k[2] and p[5] < k[2]))
1080 # the padded shape must exceed the kernel size
1081 and (ifm_shape[1] + p[0] + p[1]) > k[0]
1082 and (ifm_shape[2] + p[2] + p[3]) > k[1]
1083 and (k_rank < 3 or ((ifm_shape[3] + p[4] + p[5]) > k[2]))
1084 # the padded shape must exceed the dilation
1085 and (ifm_shape[1] + p[0] + p[1]) > d[0]
1086 and (ifm_shape[2] + p[2] + p[3]) > d[1]
1087 and (k_rank < 3 or ((ifm_shape[3] + p[4] + p[5]) > d[2]))
1088 ):
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001089 remainders = []
1090 for index in range(k_rank):
1091 pad_offset = index * 2
1092 remainders.append(
1093 (
1094 ifm_shape[index + 1]
1095 - 1
1096 + p[pad_offset]
1097 + p[pad_offset + 1]
1098 - (k[index] - 1) * d[index]
1099 )
1100 % s[index]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001101 )
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001102 if (
1103 # the parameters must produce integer exact output
1104 error_name != ErrorIf.ConvOutputShapeNonInteger
1105 and max(remainders) == 0
1106 ) or (
1107 error_name == ErrorIf.ConvOutputShapeNonInteger
1108 and max(remainders) > 0
1109 ):
1110 arg_list.append(
1111 (
1112 "st{}_pad{}_dilat{}".format(
1113 "".join([str(x) for x in s]),
1114 "".join([str(x) for x in p]),
1115 "".join([str(x) for x in d]),
1116 ),
1117 [s, p, d],
1118 )
1119 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001120 n += 1
1121
1122 return arg_list
1123
1124 @staticmethod
1125 def agTransposeConv2D(testGen, opName, shapeList, dtype, error_name=None):
1126 arg_list = []
1127
1128 ifm_shape = shapeList[0]
1129 filter_shape = shapeList[1]
1130
1131 # Must be rank 4
1132 if error_name != ErrorIf.WrongRank:
1133 assert len(ifm_shape) == 4
1134 assert len(filter_shape) == 4
1135
1136 # Generate comprehensive argument lists
1137 # - except for named errors, which use specific invalid value(s)
1138 if error_name == ErrorIf.PadSmallerZero:
1139 p_vals = [testGen.rng.choice(range(-5, 0))]
1140 else:
1141 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001142 paddings = {x for x in itertools.product(*([p_vals] * 4))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001143 if error_name == ErrorIf.StrideSmallerOne:
1144 # Can't use stride=0, as it is used to derive output shape, as a divisor
1145 s_vals = [testGen.rng.choice(range(-5, 0))]
1146 else:
1147 s_vals = [x for x in range(1, testGen.args.max_conv_stride + 1)]
1148 strides = {x for x in itertools.product(*([s_vals] * 2))}
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001149 # Dilation is not supported by the specification for transpose conv2d
1150 # TODO: Remove this completely when schema has been updated
1151 d_vals = [1]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001152 dilations = {x for x in itertools.product(*([d_vals] * 2))}
1153
1154 if not error_name:
1155 # add some oversize argument values
1156 if max(ifm_shape) < 64:
1157 bigPadding = 9
1158 paddings.update(
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001159 {x for x in itertools.product(*([[0, bigPadding]] * 4))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001160 )
1161 bigStride = 8
1162 strides.update({x for x in itertools.product(*([[1, bigStride]] * 2))})
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001163
1164 # There are too many parameter combinations, so generate them sparsely,
1165 # very sparse for negative tests
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001166 sparsity_factor = 2 if error_name else 10
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001167 sparsity = len(paddings) * len(strides) * len(dilations) // sparsity_factor + 1
1168 # If there are only a small number of tests, just select them all
1169 if sparsity < 13:
1170 sparsity = 1
1171 # To get a variety of parameter combinations sparsity should not be a
1172 # multiple of 2, 3 or 5
1173 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1174 sparsity += 1
1175
1176 n = 0
1177 for s in sorted(list(strides)):
1178 for p in sorted(list(paddings)):
1179 for d in sorted(list(dilations)):
1180 if n % sparsity == 0:
1181 # Determine the output shape
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001182 oh = (ifm_shape[1] - 1) * s[0] - p[0] - p[1] + filter_shape[1]
1183 ow = (ifm_shape[2] - 1) * s[1] - p[2] - p[3] + filter_shape[2]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001184 os = [ifm_shape[0], oh, ow, filter_shape[0]]
1185 arg_list.append(
1186 (
1187 "st{}_pad{}_dilat{}_os{}".format(
1188 "".join([str(x) for x in s]),
1189 "".join([str(x) for x in p]),
1190 "".join([str(x) for x in d]),
1191 "x".join([str(x) for x in os]),
1192 ),
1193 [s, p, d, os],
1194 )
1195 )
1196 n += 1
1197
1198 return arg_list
1199
1200 @staticmethod
1201 def agPad(testGen, opName, shapeList, dtype, error_name=None):
1202 arg_list = []
1203 rank = len(shapeList[0])
1204
1205 # Exhaustively test combinations of padding on each side of each dimension
1206 # - the range of padding values is defined by pad_min and pad_max
1207 # - for padding >9, the name format needs to be more distinctive
1208 pad_min, pad_max = 0, 1
1209 pad_values = [x for x in range(pad_min, pad_max + 1)]
1210 if error_name == ErrorIf.PadSmallerZero:
1211 pad_values = [x for x in range(-2, 0)]
1212 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
1213 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
1214
1215 if dtype in [DType.BOOL, DType.INT8, DType.INT16, DType.INT32]:
1216 pad_const_int = testGen.getRandNumberDType(dtype)
1217 pad_const_fp = 0
1218 elif dtype == DType.FLOAT:
1219 pad_const_int = 0
1220 pad_const_fp = testGen.getRandNumberDType(dtype)
1221 else:
1222 return []
1223
1224 for paddings in shape_pad_values:
1225 name = "pad"
1226 for r in range(rank):
1227 before, after = paddings[r]
1228 name = f"{name}{before}{after}"
1229 arg_list.append((name, [np.array(paddings), pad_const_int, pad_const_fp]))
1230
1231 return arg_list
1232
1233 @staticmethod
1234 def agPooling(testGen, opName, shapeList, dtype, error_name=None):
1235 arg_list = []
1236
1237 shape = shapeList[0]
1238 if error_name != ErrorIf.WrongRank:
1239 assert len(shape) == 4
1240
1241 # Generate comprehensive argument lists
1242 p_vals = [x for x in range(0, testGen.args.max_pooling_padding + 1)]
1243 paddings = {x for x in itertools.product(*([p_vals] * 4))}
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001244 # Stride must be greater than 1 to force non-integer error
1245 startStride = 1 if error_name != ErrorIf.PoolingOutputShapeNonInteger else 2
1246 s_vals = [x for x in range(startStride, testGen.args.max_pooling_stride + 1)]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001247 strides = {x for x in itertools.product(*([s_vals] * 2))}
1248 k_vals = [x for x in range(2, testGen.args.max_pooling_kernel + 1)]
1249 kernels = {x for x in itertools.product(*([k_vals] * 2))}
1250
1251 if testGen.args.oversize:
1252 # add some oversize argument values
1253 bigStride = 7
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001254 strides.update(
1255 {x for x in itertools.product(*([[startStride, bigStride]] * 2))}
1256 )
1257 bigKernel = 9
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001258 kernels.update({x for x in itertools.product(*([[2, bigKernel]] * 2))})
1259 if max(shape) < 64:
1260 # padding must be less than the kernel size
1261 bigPadding = bigKernel - 1
1262 paddings.update(
1263 {x for x in itertools.product(*([[0, bigPadding]] * 4))}
1264 )
1265
1266 # There are too many parameter combinations, so generate them sparsely,
1267 # very sparse for negative tests
1268 sparsity_factor = 2 if error_name else 500
1269 sparsity = len(paddings) * len(strides) * len(kernels) // sparsity_factor + 1
1270
1271 n = 0
1272 for s in sorted(list(strides)):
1273 for p in sorted(list(paddings)):
1274 for k in sorted(list(kernels)):
1275 if error_name in [
1276 ErrorIf.StrideSmallerOne,
1277 ErrorIf.KernelSmallerOne,
1278 ErrorIf.PadSmallerZero,
1279 ErrorIf.PadLargerEqualKernel,
1280 ]:
1281 sNew, pNew, kNew = TosaErrorIfArgGen.eiPoolingErrorIf(
1282 testGen, error_name, s, p, k
1283 )
1284 if None not in [sNew, pNew, kNew] and n % sparsity == 0:
1285 arg_list.append(
1286 (
1287 "st{}_kern{}_pad{}".format(
1288 "".join([str(x) for x in sNew]),
1289 "".join([str(x) for x in kNew]),
1290 "".join([str(x) for x in pNew]),
1291 ),
1292 [sNew, pNew, kNew],
1293 )
1294 )
1295 elif (
1296 n % sparsity == 0
1297 # padding must not exceed the kernel size
1298 and p[0] < k[0]
1299 and p[1] < k[0]
1300 and p[2] < k[1]
1301 and p[3] < k[1]
1302 # the padded shape must exceed the kernel size
1303 and (shape[1] + p[0] + p[1]) > k[0]
1304 and (shape[2] + p[2] + p[3]) > k[1]
1305 ):
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001306 remainder_h = (shape[1] + p[0] + p[1] - k[0]) % s[0]
1307 remainder_w = (shape[2] + p[2] + p[3] - k[1]) % s[1]
1308 if (
1309 # the parameters must produce integer exact output
1310 error_name != ErrorIf.PoolingOutputShapeNonInteger
1311 and remainder_h == 0
1312 and remainder_w == 0
1313 ) or (
1314 error_name == ErrorIf.PoolingOutputShapeNonInteger
1315 and (remainder_h != 0 or remainder_w != 0)
1316 ):
1317 arg_list.append(
1318 (
1319 "st{}_kern{}_pad{}".format(
1320 "".join([str(x) for x in s]),
1321 "".join([str(x) for x in k]),
1322 "".join([str(x) for x in p]),
1323 ),
1324 [s, p, k],
1325 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001326 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001327 n += 1
1328
1329 return arg_list
1330
1331 @staticmethod
1332 def agCast(testGen, opName, shapeList, inDtype, error_name=None):
1333 arg_list = []
1334
1335 # Enumerate the output types here
1336 if error_name == ErrorIf.WrongOutputType:
1337 dtypeList = TosaErrorIfArgGen.eiCastErrorIf(testGen, inDtype)
1338 elif inDtype == DType.INT8:
1339 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
1340 elif inDtype == DType.INT16:
1341 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
1342 elif inDtype == DType.INT32:
1343 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
1344 elif inDtype == DType.BOOL:
1345 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
1346 elif inDtype == DType.FLOAT:
1347 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
1348 elif error_name == ErrorIf.WrongInputType:
1349 # Pick some potentially correct output type for incorrect input type
1350 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
1351 else:
1352 raise Exception("Unexpected input dtype: {}".format(inDtype))
1353
1354 for dtype in dtypeList:
1355 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
1356
1357 return arg_list
1358
1359 @staticmethod
1360 def agRescale(testGen, opName, shapeList, inDtype, error_name=None):
1361 arg_list = []
1362
1363 # Enumerate the output types here
1364 for dtype in [DType.UINT8, DType.INT8, DType.INT16, DType.INT32]:
1365 if (
1366 dtype in [DType.UINT8, DType.INT8]
1367 and error_name == ErrorIf.OutputZeroPointNotZero
1368 ):
1369 continue
1370 if (
1371 inDtype == DType.UINT8
1372 and dtype != DType.INT8
1373 and error_name != ErrorIf.WrongOutputType
1374 ):
1375 # The only output dtype for UINT8 is INT8, skip all other combinations
1376 continue
1377 if (
1378 inDtype != DType.INT8
1379 and dtype == DType.UINT8
1380 and error_name != ErrorIf.WrongOutputType
1381 ):
1382 # The only input dtype for UINT8 is INT8, skip all other combinations
1383 continue
1384 if (
1385 error_name == ErrorIf.WrongOutputType
1386 and not TosaErrorIfArgGen.eiRescaleWrongOutputType(inDtype, dtype)
1387 ):
1388 continue
1389
1390 for scale32 in [False, True]:
1391 if error_name == ErrorIf.ScaleTrue and not scale32:
1392 continue
1393 elif error_name == ErrorIf.ScaleNotTrue and scale32:
1394 continue
1395 for double_round in [False, True]:
1396 if error_name == ErrorIf.ScaleNotTrue and not double_round:
1397 continue
1398 for per_channel in [False, True]:
1399
1400 if (
1401 inDtype == DType.INT48
1402 and scale32
1403 and error_name != ErrorIf.ScaleTrue
1404 ):
1405 # Illegal condition. Must be scale32=False
1406 continue
1407 if (
1408 double_round
1409 and not scale32
1410 and error_name != ErrorIf.ScaleNotTrue
1411 ):
1412 # Illegal condition. ERROR_IF(!scale32 && double_round)
1413 continue
1414
1415 arg_list.append(
1416 (
1417 "out{}_sc{}_dr{}_pc{}".format(
1418 DTypeNames[dtype],
1419 int(scale32),
1420 int(double_round),
1421 int(per_channel),
1422 ),
1423 [dtype, scale32, double_round, per_channel],
1424 )
1425 )
1426
1427 return arg_list
1428
1429 @staticmethod
1430 def agMul(testGen, opName, shapeList, dtype, error_name=None):
1431 arg_list = []
1432
1433 if dtype is DType.INT32:
1434 for p in range(testGen.args.num_rand_permutations):
1435
1436 shift = testGen.randInt(0, 32)
1437
1438 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
1439 else:
1440 arg_list.append(("perm0_shift0", [0]))
1441
1442 return arg_list
1443
1444 @staticmethod
1445 def agArithmeticRightShift(testGen, opName, shapeList, dtype, error_name=None):
1446 arg_list = []
1447
1448 arg_list.append(("roundTrue", [True]))
1449 arg_list.append(("roundFalse", [False]))
1450
1451 return arg_list
1452
1453 # Helper function for reshape. Gets some factors of a larger number.
1454 @staticmethod
1455 def getFactors(val, start=1):
1456 factors = []
1457
1458 for i in range(start, int(np.sqrt(val)) + 1):
1459 if (val % i) == 0:
1460 factors.append(i)
1461
1462 return factors
1463
1464 @staticmethod
1465 def agReshape(testGen, opName, shapeList, dtype, error_name=None):
1466 arg_list = []
1467
1468 origShape = shapeList[0]
1469
1470 totalElements = 1
1471 for s in origShape:
1472 totalElements *= s
1473
1474 # This code is NOT fast. Fortunately, the numbers are fairly small.
1475 factors = TosaArgGen.getFactors(totalElements)
1476
1477 for p in range(testGen.args.num_rand_permutations):
1478 newRank = testGen.randInt(1, 7)
1479 if len(factors) < newRank:
1480 continue
1481
1482 found = True
1483 # escape_counter breaks while loop if it continues on for too long
1484 escape_counter = 0
1485 while found:
1486 newShape = []
1487 # Generate newShape ensuring it isn't a duplicate
1488 remainingElements = totalElements
1489 shuffledFactors = testGen.rng.permutation(factors)
1490 for i in range(1, newRank):
1491 # pick rank-1 factors
1492 newShape.append(shuffledFactors[0])
1493 remainingElements = remainingElements // shuffledFactors[0]
1494 shuffledFactors = testGen.rng.permutation(
1495 TosaArgGen.getFactors(remainingElements)
1496 )
1497 newShape.append(remainingElements)
1498
1499 # Toss in a -1 sometimes
1500 minusOne = testGen.randInt(0, newRank * 4)
1501 if minusOne < newRank:
1502 newShape[minusOne] = -1
1503
1504 # Check for duplicates
1505 found = False
1506 for name, other_shape in arg_list:
1507 if other_shape[0] == newShape:
1508 found = True
1509 break
1510
1511 escape_counter += 1
1512 if escape_counter >= 100:
1513 break
1514
1515 if not found:
1516 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
1517
1518 return arg_list
1519
1520 @staticmethod
1521 def agTranspose(testGen, opName, shapeList, dtype, error_name=None):
1522 arg_list = []
1523
1524 ifm_shape = shapeList[0]
1525
1526 if error_name == ErrorIf.IndexOutsideBounds:
1527 incorrect_large_index = range(len(ifm_shape) + 1, 2 * len(ifm_shape) + 1)
1528 incorrect_small_index = range(-len(ifm_shape), 0)
1529 permutations = [p for p in itertools.permutations(incorrect_large_index)]
1530 permutations.extend(
1531 [p for p in itertools.permutations(incorrect_small_index)]
1532 )
1533 elif error_name == ErrorIf.IndexUsedTwice:
1534 # Create list with a duplicated index
1535 perm_range = list(range(len(ifm_shape)))
1536 index_choice = testGen.rng.choice(range(len(perm_range)))
1537 perm_range[(index_choice + 1) % len(perm_range)] = perm_range[index_choice]
1538 permutations = [p for p in itertools.permutations(perm_range)]
1539
1540 else:
1541 # Get all permutations
1542 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
1543
1544 # Limit to possible permutations from shape dimension or argument setting
1545 limit = min(len(permutations), testGen.args.num_rand_permutations)
1546
1547 # Get random permutation generator that uses all permutations
1548 random_permutations = testGen.rng.permutation(permutations)
1549
1550 # Create list of required amount of permutations
1551 arg_list = [
1552 ("perm{}".format(p), [random_permutations[p].tolist()])
1553 for p in range(limit)
1554 ]
1555 return arg_list
1556
1557 @staticmethod
1558 def agSlice(testGen, opName, shapeList, dtype, error_name=None):
1559 arg_list = []
1560
1561 ifm_shape = shapeList[0]
1562 rank = len(ifm_shape)
1563
1564 for p in range(testGen.args.num_rand_permutations):
1565 start = []
1566 size = []
1567
1568 valid = True
1569
1570 for i in range(rank):
1571 if ifm_shape[i] > 1:
1572 start.append(testGen.randInt(0, ifm_shape[i]))
1573 size.append(testGen.randInt(0, ifm_shape[i] - start[i]))
1574
1575 # Invalid slice size?
1576 if size[i] == 0:
1577 valid = False
1578 else:
1579 start.append(0)
1580 size.append(1)
1581
1582 if valid:
1583 # If ERROR_IF test required then incorrect start, size will be returned
1584 start, size = TosaErrorIfArgGen.eiSliceErrorIf(
1585 testGen, error_name, ifm_shape, start, size
1586 )
1587 arg_list.append(("perm{}".format(p), [start, size]))
1588 return arg_list
1589
1590 @staticmethod
1591 def agTile(testGen, opName, shapeList, dtype, error_name=None):
1592 arg_list = []
1593
1594 ifm_shape = shapeList[0]
1595 rank = len(ifm_shape)
1596
1597 for p in range(testGen.args.num_rand_permutations):
1598
1599 # Pick a few random, but small multiple values
1600 # because otherwise this has a tendency to generate
1601 # enormous tensors
1602 multiples = []
1603 for i in range(rank):
1604 if ifm_shape[i] > 1000:
1605 # Multiple of 1 if ifm_shape dimension is large to reduce
1606 # tensor size
1607 multiples.append(1)
1608 elif max(ifm_shape) > 1000:
1609 multiples.append(2)
1610 else:
1611 multiples.append(testGen.randInt(1, 4))
1612 arg_list.append(("perm{}".format(p), [multiples]))
1613
1614 return arg_list
1615
1616 @staticmethod
1617 def agResize(testGen, opName, shapeList, dtype, error_name=None):
1618 arg_list = []
1619
1620 ifm_shape = shapeList[0]
1621 for mode in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
1622
1623 # Exclude illegal {mode, type} configurations. Pick legal output types
1624 if mode == ResizeMode.NEAREST and dtype == DType.INT8:
1625 outputDTypeList = [DType.INT8]
1626 elif mode == ResizeMode.NEAREST and dtype == DType.INT16:
1627 outputDTypeList = [DType.INT16]
1628 elif mode == ResizeMode.BILINEAR and dtype == DType.INT8:
1629 outputDTypeList = [DType.INT32]
1630 elif mode == ResizeMode.BILINEAR and dtype == DType.INT16:
1631 outputDTypeList = [DType.INT48]
1632 elif dtype == DType.FLOAT:
1633 outputDTypeList = [DType.FLOAT]
1634 elif error_name == ErrorIf.WrongInputType:
1635 # If an incorrect input type is used then we set a 'correct'
1636 # output type to avoid other errors
1637 outputDTypeList = [DType.INT8, DType.INT16, DType.INT32]
1638 else:
1639 continue
1640
1641 for outputDType in outputDTypeList:
1642 for perm in range(testGen.args.num_rand_permutations):
1643 # Randomly generate legal output dimensions and shift
1644 # and then compute the stride and offset based on them
1645 # A output_dim of 1 will cause offset to exceed allowed range
1646 # so minimum value 2 produced below
1647 output_dims = [testGen.randInt(1) + 1, testGen.randInt(1) + 1]
1648 while (float(ifm_shape[1]) / float(output_dims[0])) >= 16:
1649 output_dims[0] += 1
1650 while (float(ifm_shape[2]) / float(output_dims[1])) >= 16:
1651 output_dims[1] += 1
1652
1653 in_center_h = (ifm_shape[1] - 1) / 2.0
1654 in_center_w = (ifm_shape[2] - 1) / 2.0
1655 out_center_h = (output_dims[0] - 1) / 2.0
1656 out_center_w = (output_dims[1] - 1) / 2.0
1657
1658 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
1659 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
1660 fp_offset_y = in_center_h - fp_stride_y * out_center_h
1661 fp_offset_x = in_center_w - fp_stride_x * out_center_w
1662
1663 if outputDType == DType.FLOAT:
1664 float_op = True
1665 arg_str = (
1666 "mode{}_shift{}_odim{}x{}_out{}"
1667 "_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}"
1668 )
1669 shift = 0
1670 stride = [0, 0]
1671 offset = [0, 0]
1672 stride_fp = [fp_stride_y, fp_stride_x]
1673 offset_fp = [fp_offset_y, fp_offset_x]
1674
1675 else:
1676 float_op = False
1677 arg_str = "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}"
1678 shift = testGen.randInt(1, 12)
1679 # Now search for a shift value (1 to 11) that will produce
1680 # a valid and predictable resize operation
1681 count = 0
1682 while count < 12:
1683 unit = float(1 << shift)
1684 stride_y = int(round(fp_stride_y * unit))
1685 stride_x = int(round(fp_stride_x * unit))
1686 offset_y = int(round(fp_offset_y * unit))
1687 offset_x = int(round(fp_offset_x * unit))
1688
1689 if (
1690 stride_y <= 0
1691 or stride_x <= 0
1692 or stride_y >= (16 << shift)
1693 or stride_x >= (16 << shift)
1694 or offset_y >= (16 << shift)
1695 or offset_x >= (16 << shift)
1696 or offset_y <= (-16 << shift)
1697 or offset_x <= (-16 << shift)
1698 ):
1699 # Change the shift value and check again
1700 count += 1
1701 shift = (shift % 11) + 1
1702 continue
1703
1704 def RESIZE_REQUIRE_CALC(
1705 length_in, length_out, stride, offset, shift
1706 ):
1707 # Perform the pseudo loop to look for out of bounds
1708 for pos in range(0, length_out):
1709 a = pos * stride + offset
1710 ia = a >> shift
1711 ia0 = max(ia, 0)
1712 ia1 = min(ia + 1, length_in - 1)
1713 if ia0 > ia1:
1714 # Found a problem value
1715 break
1716 return ia0, ia1
1717
1718 iy0, iy1 = RESIZE_REQUIRE_CALC(
1719 ifm_shape[1], output_dims[0], stride_y, offset_y, shift
1720 )
1721 ix0, ix1 = RESIZE_REQUIRE_CALC(
1722 ifm_shape[2], output_dims[1], stride_x, offset_x, shift
1723 )
1724 if ix0 > ix1 or iy0 > iy1:
1725 # Change the shift value and check again
1726 count += 1
1727 shift = (shift % 11) + 1
1728 continue
1729 break
1730
1731 if count >= 12:
1732 # Couldn't find a good set of values for this test, skip it
1733 continue
1734
1735 stride = [stride_y, stride_x]
1736 offset = [offset_y, offset_x]
1737
1738 stride_fp = [0.0, 0.0]
1739 offset_fp = [0.0, 0.0]
1740
1741 # Common for all data types
1742 if error_name is not None:
1743 (
1744 shift,
1745 stride,
1746 stride_fp,
1747 offset,
1748 offset_fp,
1749 outputDTypeNew,
1750 ) = TosaErrorIfArgGen.eiResizeErrorIf(
1751 testGen,
1752 error_name,
1753 mode,
1754 dtype,
1755 shapeList,
1756 outputDType,
1757 shift,
1758 stride,
1759 stride_fp,
1760 offset,
1761 offset_fp,
1762 )
1763 else:
1764 outputDTypeNew = outputDType
1765
1766 arg_list.append(
1767 (
1768 arg_str.format(
1769 "N" if mode == ResizeMode.NEAREST else "B",
1770 shift,
1771 output_dims[0],
1772 output_dims[1],
1773 testGen.typeStr(outputDTypeNew),
1774 stride_fp[0] if float_op else stride[0],
1775 stride_fp[1] if float_op else stride[1],
1776 offset_fp[0] if float_op else offset[0],
1777 offset_fp[1] if float_op else offset[1],
1778 ),
1779 [
1780 mode,
1781 stride,
1782 offset,
1783 shift,
1784 stride_fp,
1785 offset_fp,
1786 output_dims,
1787 dtype,
1788 outputDTypeNew,
1789 ],
1790 )
1791 )
1792
1793 return arg_list
1794
1795 @staticmethod
1796 def agTable(testGen, opName, shapeList, dtype, error_name=None):
1797 arg_list = []
1798
1799 if dtype == DType.INT8:
1800 table = np.int32(
1801 testGen.rng.integers(low=-128, high=128, size=[256])
1802 ).tolist()
1803 else: # INT16
1804 table = np.int32(
1805 testGen.rng.integers(low=-32768, high=32768, size=[513])
1806 ).tolist()
1807
1808 arg_list.append(
1809 (
1810 "",
1811 [table],
1812 )
1813 )
1814 return arg_list
1815
1816 def agCondIf(testGen, opName, shapeList, dtype, error_name=None):
1817 # CondIf generates the condition values here.
1818 # Convert to tensors in the build function, along with the
1819 # then and else blocks
1820 arg_list = []
1821
1822 for c in [False, True]:
1823 arg_list.append(("cond{}".format(int(c)), [c]))
1824
1825 return arg_list
1826
1827 def agWhileLoop(testGen, opName, shapeList, dtype, error_name=None):
1828 # While loop: 0 iterations, 1, more than 1
1829 arg_list = []
1830
1831 for iter in [0, 1, 4]:
1832 arg_list.append(("iter{}".format(iter), [iter]))
1833
1834 return arg_list