blob: a27d8497504c66a19edfdb9ab9959bcfc361833a [file] [log] [blame]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001# Copyright (c) 2021-2022, ARM Limited.
2# SPDX-License-Identifier: Apache-2.0
3import itertools
4import math
5
6import numpy as np
7import serializer.tosa_serializer as ts
8from generator.tosa_error_if import ErrorIf
9from generator.tosa_error_if import TosaErrorIfArgGen
10from serializer.tosa_serializer import DTypeNames
11from tosa.DType import DType
12from tosa.Op import Op
13from tosa.ResizeMode import ResizeMode
14
15# DTypeNames, DType, Op and ResizeMode are convenience variables to the
16# flatc-generated types that should be enums, but aren't
17
18
19class TosaQuantGen:
20 """QuantizedInfo random generator helper functions.
21
22 Specify with 'qgen': in the operator defintion.
23 """
24
25 def __init__(self):
26 pass
27
28 @staticmethod
29 def getQinfo(testGen, dtype, error_name=None):
30
31 if dtype == DType.INT8:
32 return testGen.randInt(-128, 128)
33 elif dtype == DType.UINT8:
34 return testGen.randInt(0, 256)
35 elif error_name in [
36 ErrorIf.InputZeroPointNotZero,
37 ErrorIf.WeightZeroPointNotZero,
38 ErrorIf.OutputZeroPointNotZero,
39 ]:
40 zero_point = testGen.randInt(-128, 128)
41 if zero_point == 0:
42 zero_point = 1
43 return zero_point
44 return 0
45
46 @staticmethod
47 def qgUnary(testGen, op, dtype, error_name=None):
48 qinfo = ts.TosaSerializerQuantInfo()
49 if error_name == ErrorIf.InputZeroPointNotZero:
50 qinfo.UnaryQuantInfo(
51 TosaQuantGen.getQinfo(testGen, dtype, error_name),
52 TosaQuantGen.getQinfo(testGen, dtype),
53 )
54 elif error_name == ErrorIf.OutputZeroPointNotZero:
55 qinfo.UnaryQuantInfo(
56 TosaQuantGen.getQinfo(testGen, dtype),
57 TosaQuantGen.getQinfo(testGen, dtype, error_name),
58 )
59 else:
60 qinfo.UnaryQuantInfo(
61 TosaQuantGen.getQinfo(testGen, dtype),
62 TosaQuantGen.getQinfo(testGen, dtype),
63 )
64 return qinfo
65
66 @staticmethod
67 def qgConv(testGen, op, dtype_or_dtypeList, error_name=None):
68 qinfo = ts.TosaSerializerQuantInfo()
69 if isinstance(dtype_or_dtypeList, list):
70 # a list of [input, weights, accumulator] dtypes
71 dtypeList = dtype_or_dtypeList
72 else:
73 # an int, [input, weights, accumulator] dtypes are the same
74 dtypeList = [dtype_or_dtypeList] * 3
75
76 if error_name == ErrorIf.InputZeroPointNotZero:
77 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0], error_name)
78 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
79 elif error_name == ErrorIf.WeightZeroPointNotZero:
80 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
81 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1], error_name)
82 else:
83 input_zp = TosaQuantGen.getQinfo(testGen, dtypeList[0])
84 weights_zp = TosaQuantGen.getQinfo(testGen, dtypeList[1])
85
86 qinfo.ConvQuantInfo(input_zp, weights_zp)
87 return qinfo
88
89 @staticmethod
90 def qgMatmul(testGen, op, dtype, error_name=None):
91 qinfo = ts.TosaSerializerQuantInfo()
92 if error_name == ErrorIf.InputZeroPointNotZero:
93 qinfo.MatMulQuantInfo(
94 TosaQuantGen.getQinfo(testGen, dtype, error_name),
95 TosaQuantGen.getQinfo(testGen, dtype, error_name),
96 )
97 else:
98 qinfo.MatMulQuantInfo(
99 TosaQuantGen.getQinfo(testGen, dtype),
100 TosaQuantGen.getQinfo(testGen, dtype),
101 )
102 return qinfo
103
104 @staticmethod
105 def qgPad(testGen, op, dtype, error_name=None):
106 qinfo = ts.TosaSerializerQuantInfo()
107 if error_name == ErrorIf.InputZeroPointNotZero:
108 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype, error_name))
109 else:
110 qinfo.PadQuantInfo(TosaQuantGen.getQinfo(testGen, dtype))
111 return qinfo
112
113 @staticmethod
114 def computeMultiplierAndShift(scaleFp, scale32):
115 # Derived from computeMultiplierAndShiftTosaScale32
116 # Provide a floating-point scaling factor and the scale32 parameter
117 # to compute the multiplier and shift
118
119 if scale32:
120 scaleBits = 31
121 else:
122 scaleBits = 15
123
124 m, shift = math.frexp(scaleFp)
125
126 if scaleFp < 0.0:
127 m = -m
128
129 multiplier = round(m * (1 << scaleBits))
130 assert multiplier <= (1 << scaleBits)
131
132 if multiplier == (1 << scaleBits):
133 multiplier = multiplier // 2
134 shift = shift + 1
135
136 shift = (-shift) + scaleBits
137 # print('scalefp {} scaleBits {} m {} mult {} shift {}'.format(
138 # scaleFp, scaleBits, m, multiplier, shift))
139
140 # Adjust multiplier such that shift is in allowed value range.
141 if shift == 0:
142 multiplier = multiplier // 4
143 shift = shift + 2
144 elif shift == 1:
145 multiplier = multiplier // 2
146 shift = shift + 1
147 elif shift == 63:
148 multiplier = multiplier * 2
149 shift = shift - 1
150
151 assert multiplier <= (1 << scaleBits)
152 assert shift >= 2 and shift <= 62
153
154 return multiplier, shift
155
156
157class TosaTensorGen:
158 """Tensor generators create a shape list for the placeholder and const tensor
159 data operands for the operator.
160
161 The actual random data is generated separately for each test.
162 """
163
164 def __init__(self):
165 pass
166
167 @staticmethod
168 def tgBasic(testGen, opName, rank, error_name=None):
169 pl, const = opName["operands"]
170 shape = testGen.makeShape(rank)
171
172 # Constrict the overall size of the shape when creating ERROR_IF tests
173 if error_name:
174 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
175
176 shape_list = []
177 for i in range(pl + const):
178 shape_list.append(shape.copy())
179
180 if error_name == ErrorIf.RankMismatch:
181 if rank == 1 and i != 1:
182 shape = testGen.makeShape(rank + testGen.rng.choice([1, 2, 3]))
183 elif i != 1:
184 shape = testGen.makeShape(rank + testGen.rng.choice([-1, 1]))
185
186 return shape_list
187
188 @staticmethod
189 def tgNHWC(testGen, opName, rank, error_name=None):
190 pl, const = opName["operands"]
191
192 if error_name != ErrorIf.WrongRank:
193 assert rank == 4
194
195 shape = testGen.makeShape(rank)
196
197 # Constrict the batch size?
198 if testGen.args.max_batch_size:
199 shape[0] = (shape[0] % testGen.args.max_batch_size) + 1
200
201 # Constrict the overall size of the shape when creating ERROR_IF tests
202 if error_name and error_name != ErrorIf.MaxDimExceeded:
203 shape = TosaErrorIfArgGen.eiRestrictDimensions(shape)
204
205 shape_list = []
206 for i in range(pl + const):
207 shape_list.append(shape.copy())
208
209 return shape_list
210
211 @staticmethod
212 def tgScatter(testGen, opName, rank, error_name=None):
213 pl, const = opName["operands"]
214
215 assert pl == 2
216 assert const == 0
217 if error_name != ErrorIf.WrongRank:
218 assert rank == 3
219
220 values_in_shape = testGen.makeShape(rank)
221
222 # ignore max batch size if target shape is set
223 if testGen.args.max_batch_size and not testGen.args.target_shapes:
224 values_in_shape[0] = (values_in_shape[0] % testGen.args.max_batch_size) + 1
225
226 W = testGen.randInt(
227 testGen.args.tensor_shape_range[0], testGen.args.tensor_shape_range[1]
228 )
229 # Constrict W if one dimension is too large to keep tensor size reasonable
230 if max(values_in_shape) > 5000:
231 W = testGen.randInt(0, 16)
232
233 input_shape = [values_in_shape[0], W, values_in_shape[2]]
234
235 shape_list = []
236 shape_list.append(values_in_shape.copy())
237 shape_list.append(input_shape.copy())
238
239 return shape_list
240
241 @staticmethod
242 def tgBroadcastFuzz(testGen, op, rank, error_name=None):
243 shape = testGen.makeShape(rank)
244
245 pl, const = op["operands"]
246
247 shape_list = []
248
249 # Choose one of the inputs to broadcast
250 # Note: Simplifies OutputShaper code if we don't change first shape for errors
251 bcast_idx = testGen.randInt(0 if error_name is None else 1, pl + const)
252 for i in range(pl + const):
253 shape_bcast = shape.copy()
254
255 # If the chosen input, pick a random index to broadcast
256 if i == bcast_idx:
257 fuzz_idx = testGen.randInt(0, rank)
258 if error_name == ErrorIf.DimensionMismatch:
259 shape_bcast[fuzz_idx] += 1
260 elif error_name == ErrorIf.RankMismatch:
261 # Add one rank to the shape (or more for rank of 1)
262 extra_ranks = testGen.rng.choice([1, 2, 3]) if rank == 1 else 1
263 shape_bcast = np.concatenate(
264 (shape_bcast, testGen.makeShape(extra_ranks))
265 )
266 if rank != 1:
267 # Either keep the extra rank, or remove it
268 new_len = testGen.rng.choice([-2, len(shape_bcast)])
269 shape_bcast = shape_bcast[:new_len]
270 else:
271 shape_bcast[fuzz_idx] = 1
272
273 shape_list.append(shape_bcast)
274
275 return shape_list
276
277 @staticmethod
278 def tgConv2D(testGen, op, rank, error_name=None):
279 pl, const = op["operands"]
280
281 if error_name != ErrorIf.WrongRank:
282 assert rank == 4
283
284 # IFM dimensions are NHWC
285 ifm_shape = testGen.makeShape(rank)
286
287 # Constrict the batch size?
288 if testGen.args.max_batch_size:
289 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
290
291 # Constrict the overall size of the shape when creating ERROR_IF tests
292 if error_name:
293 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
294 ifm_shape, max_dim=24, max_items=10000
295 )
296
297 # Get the filter height/width from the operator parameters
298 filter_hw = op["filter"]
299
300 # Generate a random OFM depth
301 ofm_depth = testGen.makeShape(1)[0]
302
303 # The filter dimensions are OHWI
304 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
305
306 # The bias is OC
307 bias_shape = np.asarray([ofm_depth])
308
309 return [ifm_shape, filter_shape, bias_shape]
310
311 @staticmethod
312 def tgConv3D(testGen, op, rank, error_name=None):
313 pl, const = op["operands"]
314
315 if error_name != ErrorIf.WrongRank:
316 assert rank == 5
317
318 # IFM dimensions are NDHWC
319 ifm_shape = testGen.makeShape(rank)
320
321 # Constrict the batch size?
322 if testGen.args.max_batch_size:
323 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
324
325 # Constrict the overall size of the shape when creating ERROR_IF tests
326 if error_name:
327 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
328 ifm_shape, max_dim=24, max_items=10000
329 )
330
331 # Get the filter depth/height/width from the operator parameters
332 filter_dhw = op["filter"]
333
334 # Generate a random OFM channel
335 ofm_channel = testGen.makeShape(1)[0]
336
337 # The filter dimensions are ODHWI
338 filter_shape = np.asarray(
339 [ofm_channel, filter_dhw[0], filter_dhw[1], filter_dhw[2], ifm_shape[4]]
340 )
341
342 # The bias is OC
343 bias_shape = np.asarray([ofm_channel])
344
345 return [ifm_shape, filter_shape, bias_shape]
346
347 @staticmethod
348 def tgTransposeConv2D(testGen, op, rank, error_name=None):
349 pl, const = op["operands"]
350
351 if error_name != ErrorIf.WrongRank:
352 assert rank == 4
353
354 # IFM dimensions are NHWC
355 ifm_shape = testGen.makeShape(rank)
356
357 # Constrict the batch size?
358 if testGen.args.max_batch_size:
359 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
360
361 # Constrict the overall size of the shape when creating ERROR_IF tests
362 if error_name:
363 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
364 ifm_shape, max_dim=24, max_items=10000
365 )
366
367 # Get the filter height/width from the operator parameters
368 filter_hw = op["filter"]
369
370 # Generate a random OFM depth
371 ofm_depth = testGen.makeShape(1)[0]
372
373 # The filter dimensions are OHWI
374 filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
375
376 # The bias is OC
377 bias_shape = np.asarray([ofm_depth])
378
379 return [ifm_shape, filter_shape, bias_shape]
380
381 @staticmethod
382 def tgDepthwiseConv2D(testGen, op, rank, error_name=None):
383 pl, const = op["operands"]
384
385 if error_name != ErrorIf.WrongRank:
386 assert rank == 4
387 assert pl == 1 and const == 2
388
389 # IFM dimensions are NHWC
390 ifm_shape = testGen.makeShape(rank)
391
392 # Constrict the batch size?
393 if testGen.args.max_batch_size:
394 ifm_shape[0] = (ifm_shape[0] % testGen.args.max_batch_size) + 1
395
396 # Constrict the overall size of the shape when creating ERROR_IF tests
397 if error_name:
398 ifm_shape = TosaErrorIfArgGen.eiRestrictDimensions(
399 ifm_shape, max_dim=24, max_items=10000
400 )
401
402 # Get the filter height/width from the operator parameters
403 # Filter is KH, HW, C, M
404 filter_hw = op["filter"]
405
406 # Generate a random OFM depth, but don't let it get too big because
407 # the output depth is M * C
408 filter_m = (
409 testGen.makeShape(1)[0] % (testGen.args.tensor_shape_range[1] // 4)
410 ) + 1
411
412 # The filter dimensions are HWCM
413 filter_shape = np.asarray([filter_hw[0], filter_hw[1], ifm_shape[3], filter_m])
414
415 # The bias is M * C
416 bias_shape = np.asarray([ifm_shape[3] * filter_m])
417
418 return [ifm_shape, filter_shape, bias_shape]
419
420 @staticmethod
421 def tgFullyConnected(testGen, op, rank, error_name=None):
422 pl, const = op["operands"]
423
424 if error_name != ErrorIf.WrongRank:
425 assert rank == 2
426
427 input_shape = testGen.makeShape(rank)
428
429 # Constrict the overall size of the shape when creating ERROR_IF tests
430 if error_name:
431 input_shape = TosaErrorIfArgGen.eiRestrictDimensions(input_shape)
432
433 filter_oc = testGen.rng.integers(
434 low=testGen.args.tensor_shape_range[0],
435 high=testGen.args.tensor_shape_range[1],
436 size=1,
437 )[0]
438 filter_shape = np.asarray([filter_oc, input_shape[1]])
439
440 bias_shape = np.asarray([filter_oc])
441
442 return [input_shape, filter_shape, bias_shape]
443
444 @staticmethod
445 def tgMatmul(testGen, op, rank, error_name=None):
446 pl, const = op["operands"]
447
448 if error_name != ErrorIf.WrongRank:
449 assert rank == 3
450 assert pl == 2 and const == 0
451
452 a_shape = testGen.makeShape(rank)
453
454 # Constrict the overall size of the shape when creating ERROR_IF tests
455 if error_name:
456 a_shape = TosaErrorIfArgGen.eiRestrictDimensions(a_shape)
457
458 # Get a random number for b_oc even if target shape is defined
459 b_oc = np.int32(
460 testGen.rng.integers(
461 low=testGen.args.tensor_shape_range[0],
462 high=testGen.args.tensor_shape_range[1],
463 size=1,
464 )
465 )[0]
466 # If N or H is large let b_oc be 1 to reduce output tensor size
467 if max(a_shape) > 1000:
468 b_oc = 1
469
470 b_shape = np.asarray([a_shape[0], a_shape[2], b_oc])
471 return [a_shape, b_shape]
472
473 @staticmethod
474 def tgConcat(testGen, opName, rank, error_name=None):
475 pl, const = opName["operands"]
476 shape = testGen.makeShape(rank)
477
478 # Create extra tensors to concat.
479 # Take into account value of pl when getting maximum number of concats
480 num_tensors = testGen.randInt(0, 4)
481 shape_list = []
482 for i in range(pl + const + num_tensors):
483 if error_name == ErrorIf.ConcatInputRankMismatch and i != 0:
484 remove = testGen.rng.choice([True, False])
485 wrongShape = shape.copy()
486
487 if remove and len(shape) > 1:
488 wrongShape = wrongShape[1:]
489 else:
490 wrongShape = list(wrongShape)
491 wrongShape.append(testGen.rng.integers(1, 10))
492
493 shape_list.append(wrongShape)
494 else:
495 shape_list.append(shape.copy())
496
497 return shape_list
498
499 @staticmethod
500 def tgConcatConstInput(testGen, shapeList, axis, error_name=None):
501 if error_name in [
502 ErrorIf.AxisSmallerZero,
503 ErrorIf.AxisLargerRank,
504 ErrorIf.ConcatInputRankMismatch,
505 ]:
506 return shapeList
507
508 # Split concat shape along axis to allow for multiple const inputs
509 # without making too many large tensors
510 if len(shapeList) == 2 or shapeList[0][axis] < len(shapeList):
511 # If axis can't be split we still need to invalidate other dimensions
512 if error_name == ErrorIf.ConcatInputDimMismatch:
513 for shape in shapeList[1:]:
514 # Negative test shapeLists are created individually for each test,
515 # so no need to copy the shape before altering it.
516 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
517 return shapeList
518
519 # Create copy of shape we are going to split (so we don't alter shapeList)
520 shape = shapeList[0].copy()
521 # Add original shape as first input
522 new_shapeList = [shape.copy()]
523 length_on_axis = shape[axis]
524 remaining_length = length_on_axis
525 for i in range(len(shapeList) - 2):
526 # Calculate split on axis and remaining value
527 split_shape_val = int(shape[axis] / 2)
528 remaining_length = remaining_length - split_shape_val
529
530 # Append new shape, and set remaining shape
531 shape[axis] = split_shape_val
532 new_shapeList.append(shape.copy())
533
534 # invalidate dimensions
535 if error_name == ErrorIf.ConcatInputDimMismatch:
536 shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
537 else:
538 shape[axis] = remaining_length
539
540 if i == len(shapeList) - 3:
541 new_shapeList.append(shape.copy())
542
543 return new_shapeList
544
545
546class TosaTensorValuesGen:
547 """Tensor Value generators create the random data for each test."""
548
549 def __init__(self):
550 pass
551
552 @staticmethod
553 def tvgDefault(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
554 pCount, cCount = op["operands"]
555
556 tens = []
557 tens.extend(
558 testGen.buildPlaceholderTensors(shapeList[0:pCount], dtypeList[0:pCount])
559 )
560 tens.extend(testGen.buildConstTensors(shapeList[pCount:], dtypeList[pCount:]))
561
562 return tens
563
564 @staticmethod
565 def tvgNegate(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
Jeremy Johnson0e463642022-05-03 12:10:23 +0100566 if dtypeList[0] == DType.INT32 and error_name is None:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100567 pCount, cCount = op["operands"]
568 assert (
569 pCount == 1 and cCount == 0
570 ), "Op.NEGATE must have 1 placeholders, 0 consts"
Jeremy Johnson0e463642022-05-03 12:10:23 +0100571 # Must create tensors with values within accumulator (int32) negatable
572 # range
573 max_val = (1 << 31) - 1
574 min_val = -max_val
Jeremy Johnson9a66abb2022-04-07 11:29:20 +0100575 arr = np.int32(
576 testGen.rng.integers(low=min_val, high=(max_val + 1), size=shapeList[0])
577 )
578 placeholders = []
579 placeholders.append(
580 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], arr)
581 )
582 return placeholders
583 else:
584 return TosaTensorValuesGen.tvgDefault(
585 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
586 )
587
588 @staticmethod
589 def tvgAddSub(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
590 if dtypeList[0] == DType.INT32 and error_name is None:
591 # Make sure the operation does not cause value saturation - where
592 # the number wraps due to limited number of bits to store the answer
593 pCount, cCount = op["operands"]
594 assert (
595 pCount == 2 and cCount == 0
596 ), "Op.ADD / Op.SUB must have 2 placeholders, 0 consts"
597 placeholders = []
598 add = op["op"] == Op.ADD
599 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
600 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
601 if add:
602 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
603 else:
604 res_arr = np.subtract(a_arr, b_arr, dtype=np.int64)
605
606 # Work out the saturation limits
607 max_i32 = (1 << 31) - 1
608 min_i32 = -(1 << 31)
609 max_arr = np.full(shapeList[1], max_i32)
610 min_arr = np.full(shapeList[1], min_i32)
611
612 # Find how much values exceed the maximum/minimums
613 sat_max_arr = np.maximum(res_arr - max_arr, 0)
614 sat_min_arr = np.minimum(res_arr - min_arr, 0)
615
616 if not add:
617 # Swap saturation values and negate values as we need to perform opposite operations
618 sat_max_arr, sat_min_arr = -sat_min_arr, -sat_max_arr
619
620 # Create new array of unsaturated values by clipping values as needed
621 b_unsat_arr = b_arr
622 if (sat_max_arr != 0).any():
623 # Clip values that cause saturation
624 b_unsat_arr = np.subtract(b_unsat_arr, sat_max_arr, dtype=np.int32)
625 # Reduce axes in unsaturated tensor to match original tensor
626 for axis, dim in enumerate(b_arr.shape):
627 if dim != b_unsat_arr.shape[axis]:
628 assert (
629 dim == 1
630 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
631 b_unsat_arr = np.amin(b_unsat_arr, axis=axis, keepdims=True)
632
633 if (sat_min_arr != 0).any():
634 # Clip values that cause saturation
635 b_unsat_arr = np.subtract(b_unsat_arr, sat_min_arr, dtype=np.int32)
636 # Reduce axes in unsaturated tensor to match original tensor
637 for axis, dim in enumerate(b_arr.shape):
638 if dim != b_unsat_arr.shape[axis]:
639 assert (
640 dim == 1
641 ), "Op.ADD / SUB dimension must be 1 or matching to be broadcastable"
642 b_unsat_arr = np.amax(b_unsat_arr, axis=axis, keepdims=True)
643
644 placeholders.append(
645 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
646 )
647 placeholders.append(
648 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_unsat_arr)
649 )
650
651 return placeholders
652 else:
653 return TosaTensorValuesGen.tvgDefault(
654 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
655 )
656
657 @staticmethod
658 def tvgCondIfWhileLoop(
659 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
660 ):
661 if dtypeList[0] in (
662 DType.INT32,
663 DType.INT16,
664 DType.INT8,
665 ):
666 # Limit input tensors with cond_if_binary or while_loop to stop
667 # saturation of add/sub ops with int32 and keep all logical shift
668 # values between 0 to 31 for int16 or int8
669 pCount, cCount = op["operands"]
670 pRemain = pCount
671 placeholders = []
672 for idx, shape in enumerate(shapeList[:]):
673 if dtypeList[0] == DType.INT32:
674 arr = testGen.getRandTensor(shapeList[idx], DType.INT16)
675 else:
676 arr = np.int32(
677 testGen.rng.integers(low=0, high=32, size=shapeList[idx])
678 )
679 if pRemain > 0:
680 placeholders.append(
681 testGen.ser.addPlaceholder(shape, dtypeList[idx], arr)
682 )
683 pRemain -= 1
684 else:
685 placeholders.append(
686 testGen.ser.addConst(shape, dtypeList[idx], arr)
687 )
688
689 return placeholders
690 else:
691 return TosaTensorValuesGen.tvgDefault(
692 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
693 )
694
695 @staticmethod
696 def tvgArithmeticRightShift(
697 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
698 ):
699 pCount, cCount = op["operands"]
700 # Force value of operand[1] to be within [0, num_bits]
701 assert (
702 pCount == 2 and cCount == 0
703 ), "Op.ArithmeticRightShift must have 2 placeholders, 0 consts"
704
705 placeholders = []
706 for idx, shape in enumerate(shapeList[:]):
707 if idx == 1:
708 if dtypeList[idx] == DType.INT8:
709 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
710 elif dtypeList[idx] == DType.INT16:
711 arr = np.int32(testGen.rng.integers(low=0, high=16, size=shape))
712 elif dtypeList[idx] == DType.INT32:
713 arr = np.int32(testGen.rng.integers(low=0, high=32, size=shape))
714 elif error_name == ErrorIf.WrongInputType:
715 arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
716 else:
717 raise Exception("OpArithmeticRightShift: invalid input dtype")
718 else:
719 arr = testGen.getRandTensor(shape, dtypeList[idx])
720 placeholders.append(testGen.ser.addPlaceholder(shape, dtypeList[idx], arr))
721
722 return placeholders
723
724 @staticmethod
725 def tvgSelect(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
726 # Set datatype of condition tensor to boolean
727 dtypeList[0] = DType.BOOL
728
729 return TosaTensorValuesGen.tvgDefault(
730 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
731 )
732
733 @staticmethod
734 def tvgIntDiv(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
735 if error_name is None:
736 pCount, cCount = op["operands"]
737 assert (
738 pCount == 2 and cCount == 0
739 ), "Op.INTDIV must have 2 placeholders, 0 consts"
740
741 placeholders = []
742
743 # Two invalid cases for Op.INTDIV:
744 # 1. divisor == 0
745 # 2. dividend == -(1<<31) and divisor == -1
746 while True:
747 dividend_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
748 divisor_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
749
750 if (divisor_arr == 0).any():
751 continue
752
753 if (dividend_arr == -(2**31)).any() and (divisor_arr == -1).any():
754 continue
755
756 break
757
758 placeholders.append(
759 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], dividend_arr)
760 )
761 placeholders.append(
762 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], divisor_arr)
763 )
764
765 return placeholders
766 else:
767 return TosaTensorValuesGen.tvgDefault(
768 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
769 )
770
771 @staticmethod
772 def tvgMul(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
773 if error_name is None:
774 pCount, cCount = op["operands"]
775 assert (
776 pCount == 2 and cCount == 0
777 ), "Op.MUL must have 2 placeholders, 0 consts"
778
779 tens = []
780 if dtypeList[0] == DType.FLOAT:
781 tens.extend(testGen.buildPlaceholderTensors(shapeList[:], dtypeList[:]))
782 else:
783 placeholders = []
784
785 # Make sure multiply result in int32 range
786 shift = testArgs[0]
787 if dtypeList[0] == DType.INT8:
788 num_bits = 8
789 elif dtypeList[0] == DType.INT16:
790 num_bits = 16
791 elif dtypeList[0] == DType.INT32:
792 num_bits = 32
793 elif error_name == ErrorIf.WrongInputType:
794 num_bits = 8
795 else:
796 raise Exception("OpMul: invalid input dtype")
797
798 for idx, shape in enumerate(shapeList[:]):
799 low = -(2 ** (num_bits - 1))
800 high = (2 ** (num_bits - 1)) - 1
801
802 a_arr = np.int32(
803 testGen.rng.integers(low=low, high=high, size=shapeList[0])
804 )
805 b_arr = np.int32(
806 testGen.rng.integers(low=low, high=high, size=shapeList[1])
807 )
808
809 i = 0
810 while True:
811
812 a_arr_64 = a_arr.astype(np.int64)
813 b_arr_64 = b_arr.astype(np.int64)
814
815 if shift > 0:
816 rounding = 1 << (shift - 1)
817 result_arr = ((a_arr_64 * b_arr_64) + rounding) >> shift
818 else:
819 result_arr = a_arr_64 * b_arr_64
820
821 if (result_arr > -(2**31)).all() and (
822 result_arr <= ((2**31) - 1)
823 ).all():
824 break
825
826 i = i + 1
827 a_arr = a_arr // 2
828 b_arr = b_arr // 2
829
830 placeholders.append(
831 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
832 )
833 placeholders.append(
834 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
835 )
836
837 tens.extend(placeholders)
838
839 return tens
840 else:
841 return TosaTensorValuesGen.tvgDefault(
842 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
843 )
844
845 @staticmethod
846 def tvgConcat(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
847 count = len(shapeList) - testGen.args.num_const_inputs_concat
848 if count < 1:
849 count = 1
850 if testGen.args.num_const_inputs_concat == 0:
851 count = len(shapeList)
852
853 # Ensure axis is an int
854 testArgs[0] = int(testArgs[0])
855
856 shapeList = TosaTensorGen.tgConcatConstInput(
857 testGen, shapeList, testArgs[0], error_name
858 )
859
860 tens = []
861 tens.extend(
862 testGen.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
863 )
864 tens.extend(testGen.buildConstTensors(shapeList[count:], dtypeList[count:]))
865
866 return tens
867
868 @staticmethod
869 def tvgLogicalShift(
870 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
871 ):
872 pCount, cCount = op["operands"]
873 assert (
874 pCount == 2 and cCount == 0
875 ), "Op.LOGICAL_LEFT_SHIFT or Op.LOGICAL_RIGHT_SHIFT must have 2 placeholders, 0 consts"
876 values_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
877 shift_arr = np.int32(testGen.rng.integers(low=0, high=32, size=shapeList[1]))
878 placeholders = []
879 placeholders.append(
880 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
881 )
882 placeholders.append(
883 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], shift_arr)
884 )
885
886 return placeholders
887
888 @staticmethod
889 def tvgEqual(testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None):
890 if error_name is None:
891 pCount, cCount = op["operands"]
892 assert (
893 pCount == 2 and cCount == 0
894 ), "Op.EQUAL must have 2 placeholders, 0 consts"
895 a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
896 b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
897 # Using random numbers means that it will be very unlikely that
898 # there are any matching (equal) values, therefore force that
899 # there are twice the number of matching values as the tensor rank
900 for num in range(0, len(shapeList[0]) * 2):
901 a_index = []
902 b_index = []
903 # Choose an index in each axis for the whole shape
904 for axis in range(0, len(shapeList[0])):
905 # Index can be up to the largest dimension in both shapes
906 index = np.int32(
907 testGen.rng.integers(
908 0, max(shapeList[0][axis], shapeList[1][axis])
909 )
910 )
911 # Reduce the index down to a shape's dim for broadcasting
912 a_index.append(min(shapeList[0][axis] - 1, index))
913 b_index.append(min(shapeList[1][axis] - 1, index))
914
915 a_arr[tuple(a_index)] = b_arr[tuple(b_index)]
916
917 placeholders = []
918 placeholders.append(
919 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], a_arr)
920 )
921 placeholders.append(
922 testGen.ser.addPlaceholder(shapeList[1], dtypeList[1], b_arr)
923 )
924 return placeholders
925 else:
926 return TosaTensorValuesGen.tvgDefault(
927 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
928 )
929
930 @staticmethod
931 def tvgReduceSum(
932 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name=None
933 ):
934 if dtypeList[0] == DType.INT32:
935 pCount, cCount = op["operands"]
936 assert (
937 pCount == 1 and cCount == 0
938 ), "Op.REDUCE_SUM must have 1 placeholders, 0 consts"
939 # Limit values so that the sum cannot exceed the range of an int32 during
940 # summation of any axis
941 range_val = int((1 << 31) / max(shapeList[0]))
942 values_arr = np.int32(
943 testGen.rng.integers(low=-range_val, high=range_val, size=shapeList[0])
944 )
945 placeholders = []
946 placeholders.append(
947 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
948 )
949 return placeholders
950 else:
951 return TosaTensorValuesGen.tvgDefault(
952 testGen, op, dtypeList, shapeList, testArgs, qinfo, error_name
953 )
954
955
956class TosaArgGen:
957 """Argument generators create exhaustive or random lists of attributes for
958 operators that take attributes or other parameters.
959
960 The return value is a list of (descriptive_name, [arglist]) tuples where
961 the descriptive_name is appended to the test name and the arglist is expanded
962 as arguments to the operator build function.
963 """
964
965 def __init__(self):
966 pass
967
968 @staticmethod
969 def agNone(testGen, opName, shapeList, dtype, error_name=None):
970 """A trivial argument generator for operators that don't take any
971 non-tensor arguments"""
972 return [("", [])]
973
974 @staticmethod
975 def agAxis(testGen, opName, shapeList, dtype, error_name=None):
976 """Build the axis argument for operators that take a single axis"""
977 axes = []
978 shape = shapeList[0]
979
980 if error_name == ErrorIf.AxisSmallerZero:
981 small_axis = testGen.rng.integers(-5, 0)
982 axes.append(("axis{}".format(small_axis), [small_axis]))
983 elif error_name == ErrorIf.AxisLargerRank:
984 large_axis = testGen.rng.integers(len(shape) + 1, len(shape) + 10)
985 axes.append(("axis{}".format(large_axis), [large_axis]))
986 else:
987 for a in range(0, len(shape)):
988 axes.append(("axis{}".format(a), [a]))
989
990 return axes
991
992 @staticmethod
993 def agConv(testGen, opName, shapeList, dtype, error_name=None):
994 arg_list = []
995
996 ifm_shape = shapeList[0]
997 filter_shape = shapeList[1]
998 # determine the kernel shape from operator name (e.g. "conv2d_3x3" => [3,3])
999 k = [int(x) for x in opName.split("_")[-1].split("x")]
1000
1001 # Check the rank
1002 rank = 5 if opName.startswith("conv3d") else 4
1003 if error_name != ErrorIf.WrongRank:
1004 assert len(ifm_shape) == rank
1005 assert len(filter_shape) == rank
1006
1007 # kernel rank omits batch and channels
1008 k_rank = rank - 2
1009 assert len(k) == k_rank
1010
1011 # Generate comprehensive argument lists
1012 # - except for named errors, which use specific invalid value(s)
1013 if error_name == ErrorIf.PadSmallerZero:
1014 p_vals = [testGen.rng.choice(range(-5, 0))]
1015 else:
1016 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
1017 paddings = {x for x in itertools.product(*([p_vals] * k_rank * 2))}
1018 if error_name == ErrorIf.StrideSmallerOne:
1019 # Can't use stride=0, as it is used to derive output shape, as a divisor
1020 s_vals = [testGen.rng.choice(range(-5, 0))]
1021 else:
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001022 # Stride must be greater than 1 to force non-integer error
1023 startStride = 1 if error_name != ErrorIf.PoolingOutputShapeNonInteger else 2
1024 s_vals = [x for x in range(startStride, testGen.args.max_conv_stride + 1)]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001025 strides = {x for x in itertools.product(*([s_vals] * k_rank))}
1026 if error_name == ErrorIf.DilationSmallerOne:
1027 d_vals = [testGen.rng.choice(range(-5, 1))]
1028 else:
1029 d_vals = [x for x in range(1, testGen.args.max_conv_dilation + 1)]
1030 dilations = {x for x in itertools.product(*([d_vals] * k_rank))}
1031
1032 if not error_name and testGen.args.oversize:
1033 # add some oversize argument values
1034 if max(ifm_shape) < 64:
1035 bigPadding = 9
1036 paddings.update(
1037 {x for x in itertools.product(*([[0, bigPadding]] * (k_rank * 2)))}
1038 )
1039 bigStride = 8
1040 strides.update({x for x in itertools.product(*([[1, bigStride]] * k_rank))})
1041 bigDilation = 7
1042 dilations.update(
1043 {x for x in itertools.product(*([[1, bigDilation]] * k_rank))}
1044 )
1045
1046 # There are too many parameter combinations, so generate them sparsely,
1047 # very sparse for negative tests
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001048 sparsity_factor = 2 if error_name else 120
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001049 sparsity = len(paddings) * len(strides) * len(dilations) // sparsity_factor + 1
1050 # If there are only a small number of tests, just select them all
1051 if sparsity < 13:
1052 sparsity = 1
1053 # To get a variety of parameter combinations sparsity should not be a
1054 # multiple of 2, 3 or 5
1055 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1056 sparsity += 1
1057
1058 n = 0
1059 for s in sorted(list(strides)):
1060 for p in sorted(list(paddings)):
1061 for d in sorted(list(dilations)):
1062 if (
1063 n % sparsity == 0
1064 # padding must not exceed the kernel size ?
1065 # and p[0] < k[0] and p[1] < k[0]
1066 # and p[2] < k[1] and p[3] < k[1]
1067 # and (k_rank < 3 or (p[4] < k[2] and p[5] < k[2]))
1068 # the padded shape must exceed the kernel size
1069 and (ifm_shape[1] + p[0] + p[1]) > k[0]
1070 and (ifm_shape[2] + p[2] + p[3]) > k[1]
1071 and (k_rank < 3 or ((ifm_shape[3] + p[4] + p[5]) > k[2]))
1072 # the padded shape must exceed the dilation
1073 and (ifm_shape[1] + p[0] + p[1]) > d[0]
1074 and (ifm_shape[2] + p[2] + p[3]) > d[1]
1075 and (k_rank < 3 or ((ifm_shape[3] + p[4] + p[5]) > d[2]))
1076 ):
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001077 remainders = []
1078 for index in range(k_rank):
1079 pad_offset = index * 2
1080 remainders.append(
1081 (
1082 ifm_shape[index + 1]
1083 - 1
1084 + p[pad_offset]
1085 + p[pad_offset + 1]
1086 - (k[index] - 1) * d[index]
1087 )
1088 % s[index]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001089 )
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001090 if (
1091 # the parameters must produce integer exact output
1092 error_name != ErrorIf.ConvOutputShapeNonInteger
1093 and max(remainders) == 0
1094 ) or (
1095 error_name == ErrorIf.ConvOutputShapeNonInteger
1096 and max(remainders) > 0
1097 ):
1098 arg_list.append(
1099 (
1100 "st{}_pad{}_dilat{}".format(
1101 "".join([str(x) for x in s]),
1102 "".join([str(x) for x in p]),
1103 "".join([str(x) for x in d]),
1104 ),
1105 [s, p, d],
1106 )
1107 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001108 n += 1
1109
1110 return arg_list
1111
1112 @staticmethod
1113 def agTransposeConv2D(testGen, opName, shapeList, dtype, error_name=None):
1114 arg_list = []
1115
1116 ifm_shape = shapeList[0]
1117 filter_shape = shapeList[1]
1118
1119 # Must be rank 4
1120 if error_name != ErrorIf.WrongRank:
1121 assert len(ifm_shape) == 4
1122 assert len(filter_shape) == 4
1123
1124 # Generate comprehensive argument lists
1125 # - except for named errors, which use specific invalid value(s)
1126 if error_name == ErrorIf.PadSmallerZero:
1127 p_vals = [testGen.rng.choice(range(-5, 0))]
1128 else:
1129 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001130 paddings = {x for x in itertools.product(*([p_vals] * 4))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001131 if error_name == ErrorIf.StrideSmallerOne:
1132 # Can't use stride=0, as it is used to derive output shape, as a divisor
1133 s_vals = [testGen.rng.choice(range(-5, 0))]
1134 else:
1135 s_vals = [x for x in range(1, testGen.args.max_conv_stride + 1)]
1136 strides = {x for x in itertools.product(*([s_vals] * 2))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001137
Jeremy Johnson5860df62022-05-04 15:30:58 +01001138 if not error_name and testGen.args.oversize:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001139 # add some oversize argument values
1140 if max(ifm_shape) < 64:
1141 bigPadding = 9
1142 paddings.update(
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001143 {x for x in itertools.product(*([[0, bigPadding]] * 4))}
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001144 )
1145 bigStride = 8
1146 strides.update({x for x in itertools.product(*([[1, bigStride]] * 2))})
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001147
1148 # There are too many parameter combinations, so generate them sparsely,
1149 # very sparse for negative tests
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001150 sparsity_factor = 2 if error_name else 10
TatWai Chong24594f52022-06-08 00:48:04 -07001151 sparsity = len(paddings) * len(strides) // sparsity_factor + 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001152 # If there are only a small number of tests, just select them all
1153 if sparsity < 13:
1154 sparsity = 1
1155 # To get a variety of parameter combinations sparsity should not be a
1156 # multiple of 2, 3 or 5
1157 while sparsity % 2 == 0 or sparsity % 3 == 0 or sparsity % 5 == 0:
1158 sparsity += 1
1159
1160 n = 0
1161 for s in sorted(list(strides)):
1162 for p in sorted(list(paddings)):
TatWai Chong24594f52022-06-08 00:48:04 -07001163 if n % sparsity == 0:
1164 # Determine the output shape
1165 oh = (ifm_shape[1] - 1) * s[0] - p[0] - p[1] + filter_shape[1]
1166 ow = (ifm_shape[2] - 1) * s[1] - p[2] - p[3] + filter_shape[2]
1167 os = [ifm_shape[0], oh, ow, filter_shape[0]]
1168 arg_list.append(
1169 (
1170 "st{}_pad{}_os{}".format(
1171 "".join([str(x) for x in s]),
1172 "".join([str(x) for x in p]),
1173 "x".join([str(x) for x in os]),
1174 ),
1175 [s, p, os],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001176 )
TatWai Chong24594f52022-06-08 00:48:04 -07001177 )
1178 n += 1
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001179
1180 return arg_list
1181
1182 @staticmethod
1183 def agPad(testGen, opName, shapeList, dtype, error_name=None):
1184 arg_list = []
1185 rank = len(shapeList[0])
1186
1187 # Exhaustively test combinations of padding on each side of each dimension
1188 # - the range of padding values is defined by pad_min and pad_max
1189 # - for padding >9, the name format needs to be more distinctive
1190 pad_min, pad_max = 0, 1
1191 pad_values = [x for x in range(pad_min, pad_max + 1)]
1192 if error_name == ErrorIf.PadSmallerZero:
1193 pad_values = [x for x in range(-2, 0)]
1194 axis_pad_values = [x for x in itertools.product(pad_values, pad_values)]
1195 shape_pad_values = itertools.product(*([axis_pad_values] * rank))
1196
1197 if dtype in [DType.BOOL, DType.INT8, DType.INT16, DType.INT32]:
1198 pad_const_int = testGen.getRandNumberDType(dtype)
1199 pad_const_fp = 0
1200 elif dtype == DType.FLOAT:
1201 pad_const_int = 0
1202 pad_const_fp = testGen.getRandNumberDType(dtype)
1203 else:
1204 return []
1205
1206 for paddings in shape_pad_values:
1207 name = "pad"
1208 for r in range(rank):
1209 before, after = paddings[r]
1210 name = f"{name}{before}{after}"
1211 arg_list.append((name, [np.array(paddings), pad_const_int, pad_const_fp]))
1212
1213 return arg_list
1214
1215 @staticmethod
1216 def agPooling(testGen, opName, shapeList, dtype, error_name=None):
1217 arg_list = []
1218
1219 shape = shapeList[0]
1220 if error_name != ErrorIf.WrongRank:
1221 assert len(shape) == 4
1222
1223 # Generate comprehensive argument lists
1224 p_vals = [x for x in range(0, testGen.args.max_pooling_padding + 1)]
1225 paddings = {x for x in itertools.product(*([p_vals] * 4))}
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001226 # Stride must be greater than 1 to force non-integer error
1227 startStride = 1 if error_name != ErrorIf.PoolingOutputShapeNonInteger else 2
1228 s_vals = [x for x in range(startStride, testGen.args.max_pooling_stride + 1)]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001229 strides = {x for x in itertools.product(*([s_vals] * 2))}
1230 k_vals = [x for x in range(2, testGen.args.max_pooling_kernel + 1)]
1231 kernels = {x for x in itertools.product(*([k_vals] * 2))}
1232
1233 if testGen.args.oversize:
1234 # add some oversize argument values
1235 bigStride = 7
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001236 strides.update(
1237 {x for x in itertools.product(*([[startStride, bigStride]] * 2))}
1238 )
1239 bigKernel = 9
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001240 kernels.update({x for x in itertools.product(*([[2, bigKernel]] * 2))})
1241 if max(shape) < 64:
1242 # padding must be less than the kernel size
1243 bigPadding = bigKernel - 1
1244 paddings.update(
1245 {x for x in itertools.product(*([[0, bigPadding]] * 4))}
1246 )
1247
1248 # There are too many parameter combinations, so generate them sparsely,
1249 # very sparse for negative tests
1250 sparsity_factor = 2 if error_name else 500
1251 sparsity = len(paddings) * len(strides) * len(kernels) // sparsity_factor + 1
1252
1253 n = 0
1254 for s in sorted(list(strides)):
1255 for p in sorted(list(paddings)):
1256 for k in sorted(list(kernels)):
1257 if error_name in [
1258 ErrorIf.StrideSmallerOne,
1259 ErrorIf.KernelSmallerOne,
1260 ErrorIf.PadSmallerZero,
1261 ErrorIf.PadLargerEqualKernel,
1262 ]:
1263 sNew, pNew, kNew = TosaErrorIfArgGen.eiPoolingErrorIf(
1264 testGen, error_name, s, p, k
1265 )
1266 if None not in [sNew, pNew, kNew] and n % sparsity == 0:
1267 arg_list.append(
1268 (
1269 "st{}_kern{}_pad{}".format(
1270 "".join([str(x) for x in sNew]),
1271 "".join([str(x) for x in kNew]),
1272 "".join([str(x) for x in pNew]),
1273 ),
1274 [sNew, pNew, kNew],
1275 )
1276 )
1277 elif (
1278 n % sparsity == 0
1279 # padding must not exceed the kernel size
1280 and p[0] < k[0]
1281 and p[1] < k[0]
1282 and p[2] < k[1]
1283 and p[3] < k[1]
1284 # the padded shape must exceed the kernel size
1285 and (shape[1] + p[0] + p[1]) > k[0]
1286 and (shape[2] + p[2] + p[3]) > k[1]
1287 ):
Jeremy Johnson4a6fb9b2022-04-26 15:47:21 +01001288 remainder_h = (shape[1] + p[0] + p[1] - k[0]) % s[0]
1289 remainder_w = (shape[2] + p[2] + p[3] - k[1]) % s[1]
1290 if (
1291 # the parameters must produce integer exact output
1292 error_name != ErrorIf.PoolingOutputShapeNonInteger
1293 and remainder_h == 0
1294 and remainder_w == 0
1295 ) or (
1296 error_name == ErrorIf.PoolingOutputShapeNonInteger
1297 and (remainder_h != 0 or remainder_w != 0)
1298 ):
1299 arg_list.append(
1300 (
1301 "st{}_kern{}_pad{}".format(
1302 "".join([str(x) for x in s]),
1303 "".join([str(x) for x in k]),
1304 "".join([str(x) for x in p]),
1305 ),
1306 [s, p, k],
1307 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001308 )
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001309 n += 1
1310
1311 return arg_list
1312
1313 @staticmethod
1314 def agCast(testGen, opName, shapeList, inDtype, error_name=None):
1315 arg_list = []
1316
1317 # Enumerate the output types here
1318 if error_name == ErrorIf.WrongOutputType:
1319 dtypeList = TosaErrorIfArgGen.eiCastErrorIf(testGen, inDtype)
1320 elif inDtype == DType.INT8:
1321 dtypeList = [DType.BOOL, DType.INT16, DType.INT32, DType.FLOAT]
1322 elif inDtype == DType.INT16:
1323 dtypeList = [DType.BOOL, DType.INT8, DType.INT32, DType.FLOAT]
1324 elif inDtype == DType.INT32:
1325 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
1326 elif inDtype == DType.BOOL:
1327 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
1328 elif inDtype == DType.FLOAT:
1329 dtypeList = [DType.INT8, DType.INT16, DType.INT32]
1330 elif error_name == ErrorIf.WrongInputType:
1331 # Pick some potentially correct output type for incorrect input type
1332 dtypeList = [DType.BOOL, DType.INT8, DType.INT16, DType.FLOAT]
1333 else:
1334 raise Exception("Unexpected input dtype: {}".format(inDtype))
1335
1336 for dtype in dtypeList:
1337 arg_list.append(("out{}".format(DTypeNames[dtype]), [dtype]))
1338
1339 return arg_list
1340
1341 @staticmethod
1342 def agRescale(testGen, opName, shapeList, inDtype, error_name=None):
1343 arg_list = []
1344
1345 # Enumerate the output types here
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001346 for outDtype in [
1347 DType.UINT8,
1348 DType.INT8,
1349 DType.INT16,
1350 DType.INT32,
1351 DType.UINT16,
1352 ]:
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001353 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001354 outDtype in [DType.UINT8, DType.INT8, DType.UINT16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001355 and error_name == ErrorIf.OutputZeroPointNotZero
1356 ):
1357 continue
1358 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001359 outDtype != DType.UINT16
1360 and error_name == ErrorIf.U16OutputZeroPointNotValid
1361 ) or (
1362 inDtype != DType.UINT16
1363 and error_name == ErrorIf.U16InputZeroPointNotValid
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001364 ):
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001365 # ErrorIfs only valid with UINT16
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001366 continue
1367 if (
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001368 inDtype == DType.UINT8
1369 and outDtype not in [DType.INT8, DType.INT16]
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001370 and error_name != ErrorIf.WrongOutputType
1371 ):
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001372 # The only output dtypes for UINT8 are INT8/INT16, skip all others
1373 continue
1374 if (
1375 inDtype not in [DType.INT8, DType.INT16]
1376 and outDtype == DType.UINT8
1377 and error_name != ErrorIf.WrongOutputType
1378 ):
1379 # The only input dtypes for UINT8 are INT8/INT16, skip all others
1380 continue
1381 if (
1382 inDtype == DType.UINT16
1383 and outDtype != DType.INT16
1384 and error_name != ErrorIf.WrongOutputType
1385 ):
1386 # The only output dtype for UINT16 is INT16, skip all others
1387 continue
1388 if (
1389 inDtype != DType.INT16
1390 and outDtype == DType.UINT16
1391 and error_name != ErrorIf.WrongOutputType
1392 ):
1393 # The only input dtype for UINT16 is INT16, skip all others
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001394 continue
1395 if (
1396 error_name == ErrorIf.WrongOutputType
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001397 and not TosaErrorIfArgGen.eiRescaleWrongOutputType(inDtype, outDtype)
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001398 ):
1399 continue
1400
1401 for scale32 in [False, True]:
1402 if error_name == ErrorIf.ScaleTrue and not scale32:
1403 continue
1404 elif error_name == ErrorIf.ScaleNotTrue and scale32:
1405 continue
1406 for double_round in [False, True]:
1407 if error_name == ErrorIf.ScaleNotTrue and not double_round:
1408 continue
1409 for per_channel in [False, True]:
1410
1411 if (
1412 inDtype == DType.INT48
1413 and scale32
1414 and error_name != ErrorIf.ScaleTrue
1415 ):
1416 # Illegal condition. Must be scale32=False
1417 continue
1418 if (
1419 double_round
1420 and not scale32
1421 and error_name != ErrorIf.ScaleNotTrue
1422 ):
1423 # Illegal condition. ERROR_IF(!scale32 && double_round)
1424 continue
1425
1426 arg_list.append(
1427 (
1428 "out{}_sc{}_dr{}_pc{}".format(
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001429 DTypeNames[outDtype],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001430 int(scale32),
1431 int(double_round),
1432 int(per_channel),
1433 ),
Jeremy Johnsonf7f78ae2022-05-25 15:26:38 +01001434 [outDtype, scale32, double_round, per_channel],
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001435 )
1436 )
1437
1438 return arg_list
1439
1440 @staticmethod
1441 def agMul(testGen, opName, shapeList, dtype, error_name=None):
1442 arg_list = []
1443
1444 if dtype is DType.INT32:
1445 for p in range(testGen.args.num_rand_permutations):
1446
1447 shift = testGen.randInt(0, 32)
1448
1449 arg_list.append(("perm{}_shift{}".format(p, shift), [shift]))
1450 else:
1451 arg_list.append(("perm0_shift0", [0]))
1452
1453 return arg_list
1454
1455 @staticmethod
1456 def agArithmeticRightShift(testGen, opName, shapeList, dtype, error_name=None):
1457 arg_list = []
1458
1459 arg_list.append(("roundTrue", [True]))
1460 arg_list.append(("roundFalse", [False]))
1461
1462 return arg_list
1463
1464 # Helper function for reshape. Gets some factors of a larger number.
1465 @staticmethod
1466 def getFactors(val, start=1):
1467 factors = []
1468
1469 for i in range(start, int(np.sqrt(val)) + 1):
1470 if (val % i) == 0:
1471 factors.append(i)
1472
1473 return factors
1474
1475 @staticmethod
1476 def agReshape(testGen, opName, shapeList, dtype, error_name=None):
1477 arg_list = []
1478
1479 origShape = shapeList[0]
1480
1481 totalElements = 1
1482 for s in origShape:
1483 totalElements *= s
1484
1485 # This code is NOT fast. Fortunately, the numbers are fairly small.
1486 factors = TosaArgGen.getFactors(totalElements)
1487
1488 for p in range(testGen.args.num_rand_permutations):
1489 newRank = testGen.randInt(1, 7)
1490 if len(factors) < newRank:
1491 continue
1492
1493 found = True
1494 # escape_counter breaks while loop if it continues on for too long
1495 escape_counter = 0
1496 while found:
1497 newShape = []
1498 # Generate newShape ensuring it isn't a duplicate
1499 remainingElements = totalElements
1500 shuffledFactors = testGen.rng.permutation(factors)
1501 for i in range(1, newRank):
1502 # pick rank-1 factors
1503 newShape.append(shuffledFactors[0])
1504 remainingElements = remainingElements // shuffledFactors[0]
1505 shuffledFactors = testGen.rng.permutation(
1506 TosaArgGen.getFactors(remainingElements)
1507 )
1508 newShape.append(remainingElements)
1509
Jeremy Johnson9a66abb2022-04-07 11:29:20 +01001510 # Check for duplicates
1511 found = False
1512 for name, other_shape in arg_list:
1513 if other_shape[0] == newShape:
1514 found = True
1515 break
1516
1517 escape_counter += 1
1518 if escape_counter >= 100:
1519 break
1520
1521 if not found:
1522 arg_list.append(("perm{}_rank{}".format(p, newRank), [newShape]))
1523
1524 return arg_list
1525
1526 @staticmethod
1527 def agTranspose(testGen, opName, shapeList, dtype, error_name=None):
1528 arg_list = []
1529
1530 ifm_shape = shapeList[0]
1531
1532 if error_name == ErrorIf.IndexOutsideBounds:
1533 incorrect_large_index = range(len(ifm_shape) + 1, 2 * len(ifm_shape) + 1)
1534 incorrect_small_index = range(-len(ifm_shape), 0)
1535 permutations = [p for p in itertools.permutations(incorrect_large_index)]
1536 permutations.extend(
1537 [p for p in itertools.permutations(incorrect_small_index)]
1538 )
1539 elif error_name == ErrorIf.IndexUsedTwice:
1540 # Create list with a duplicated index
1541 perm_range = list(range(len(ifm_shape)))
1542 index_choice = testGen.rng.choice(range(len(perm_range)))
1543 perm_range[(index_choice + 1) % len(perm_range)] = perm_range[index_choice]
1544 permutations = [p for p in itertools.permutations(perm_range)]
1545
1546 else:
1547 # Get all permutations
1548 permutations = [p for p in itertools.permutations(range(len(ifm_shape)))]
1549
1550 # Limit to possible permutations from shape dimension or argument setting
1551 limit = min(len(permutations), testGen.args.num_rand_permutations)
1552
1553 # Get random permutation generator that uses all permutations
1554 random_permutations = testGen.rng.permutation(permutations)
1555
1556 # Create list of required amount of permutations
1557 arg_list = [
1558 ("perm{}".format(p), [random_permutations[p].tolist()])
1559 for p in range(limit)
1560 ]
1561 return arg_list
1562
1563 @staticmethod
1564 def agSlice(testGen, opName, shapeList, dtype, error_name=None):
1565 arg_list = []
1566
1567 ifm_shape = shapeList[0]
1568 rank = len(ifm_shape)
1569
1570 for p in range(testGen.args.num_rand_permutations):
1571 start = []
1572 size = []
1573
1574 valid = True
1575
1576 for i in range(rank):
1577 if ifm_shape[i] > 1:
1578 start.append(testGen.randInt(0, ifm_shape[i]))
1579 size.append(testGen.randInt(0, ifm_shape[i] - start[i]))
1580
1581 # Invalid slice size?
1582 if size[i] == 0:
1583 valid = False
1584 else:
1585 start.append(0)
1586 size.append(1)
1587
1588 if valid:
1589 # If ERROR_IF test required then incorrect start, size will be returned
1590 start, size = TosaErrorIfArgGen.eiSliceErrorIf(
1591 testGen, error_name, ifm_shape, start, size
1592 )
1593 arg_list.append(("perm{}".format(p), [start, size]))
1594 return arg_list
1595
1596 @staticmethod
1597 def agTile(testGen, opName, shapeList, dtype, error_name=None):
1598 arg_list = []
1599
1600 ifm_shape = shapeList[0]
1601 rank = len(ifm_shape)
1602
1603 for p in range(testGen.args.num_rand_permutations):
1604
1605 # Pick a few random, but small multiple values
1606 # because otherwise this has a tendency to generate
1607 # enormous tensors
1608 multiples = []
1609 for i in range(rank):
1610 if ifm_shape[i] > 1000:
1611 # Multiple of 1 if ifm_shape dimension is large to reduce
1612 # tensor size
1613 multiples.append(1)
1614 elif max(ifm_shape) > 1000:
1615 multiples.append(2)
1616 else:
1617 multiples.append(testGen.randInt(1, 4))
1618 arg_list.append(("perm{}".format(p), [multiples]))
1619
1620 return arg_list
1621
1622 @staticmethod
1623 def agResize(testGen, opName, shapeList, dtype, error_name=None):
1624 arg_list = []
1625
1626 ifm_shape = shapeList[0]
1627 for mode in [ResizeMode.NEAREST, ResizeMode.BILINEAR]:
1628
1629 # Exclude illegal {mode, type} configurations. Pick legal output types
1630 if mode == ResizeMode.NEAREST and dtype == DType.INT8:
1631 outputDTypeList = [DType.INT8]
1632 elif mode == ResizeMode.NEAREST and dtype == DType.INT16:
1633 outputDTypeList = [DType.INT16]
1634 elif mode == ResizeMode.BILINEAR and dtype == DType.INT8:
1635 outputDTypeList = [DType.INT32]
1636 elif mode == ResizeMode.BILINEAR and dtype == DType.INT16:
1637 outputDTypeList = [DType.INT48]
1638 elif dtype == DType.FLOAT:
1639 outputDTypeList = [DType.FLOAT]
1640 elif error_name == ErrorIf.WrongInputType:
1641 # If an incorrect input type is used then we set a 'correct'
1642 # output type to avoid other errors
1643 outputDTypeList = [DType.INT8, DType.INT16, DType.INT32]
1644 else:
1645 continue
1646
1647 for outputDType in outputDTypeList:
1648 for perm in range(testGen.args.num_rand_permutations):
1649 # Randomly generate legal output dimensions and shift
1650 # and then compute the stride and offset based on them
1651 # A output_dim of 1 will cause offset to exceed allowed range
1652 # so minimum value 2 produced below
1653 output_dims = [testGen.randInt(1) + 1, testGen.randInt(1) + 1]
1654 while (float(ifm_shape[1]) / float(output_dims[0])) >= 16:
1655 output_dims[0] += 1
1656 while (float(ifm_shape[2]) / float(output_dims[1])) >= 16:
1657 output_dims[1] += 1
1658
1659 in_center_h = (ifm_shape[1] - 1) / 2.0
1660 in_center_w = (ifm_shape[2] - 1) / 2.0
1661 out_center_h = (output_dims[0] - 1) / 2.0
1662 out_center_w = (output_dims[1] - 1) / 2.0
1663
1664 fp_stride_y = float(ifm_shape[1]) / float(output_dims[0])
1665 fp_stride_x = float(ifm_shape[2]) / float(output_dims[1])
1666 fp_offset_y = in_center_h - fp_stride_y * out_center_h
1667 fp_offset_x = in_center_w - fp_stride_x * out_center_w
1668
1669 if outputDType == DType.FLOAT:
1670 float_op = True
1671 arg_str = (
1672 "mode{}_shift{}_odim{}x{}_out{}"
1673 "_st{:.2f}x{:.2f}_off{:.2f}x{:.2f}"
1674 )
1675 shift = 0
1676 stride = [0, 0]
1677 offset = [0, 0]
1678 stride_fp = [fp_stride_y, fp_stride_x]
1679 offset_fp = [fp_offset_y, fp_offset_x]
1680
1681 else:
1682 float_op = False
1683 arg_str = "mode{}_shift{}_odim{}x{}_out{}_st{}x{}_off{}x{}"
1684 shift = testGen.randInt(1, 12)
1685 # Now search for a shift value (1 to 11) that will produce
1686 # a valid and predictable resize operation
1687 count = 0
1688 while count < 12:
1689 unit = float(1 << shift)
1690 stride_y = int(round(fp_stride_y * unit))
1691 stride_x = int(round(fp_stride_x * unit))
1692 offset_y = int(round(fp_offset_y * unit))
1693 offset_x = int(round(fp_offset_x * unit))
1694
1695 if (
1696 stride_y <= 0
1697 or stride_x <= 0
1698 or stride_y >= (16 << shift)
1699 or stride_x >= (16 << shift)
1700 or offset_y >= (16 << shift)
1701 or offset_x >= (16 << shift)
1702 or offset_y <= (-16 << shift)
1703 or offset_x <= (-16 << shift)
1704 ):
1705 # Change the shift value and check again
1706 count += 1
1707 shift = (shift % 11) + 1
1708 continue
1709
1710 def RESIZE_REQUIRE_CALC(
1711 length_in, length_out, stride, offset, shift
1712 ):
1713 # Perform the pseudo loop to look for out of bounds
1714 for pos in range(0, length_out):
1715 a = pos * stride + offset
1716 ia = a >> shift
1717 ia0 = max(ia, 0)
1718 ia1 = min(ia + 1, length_in - 1)
1719 if ia0 > ia1:
1720 # Found a problem value
1721 break
1722 return ia0, ia1
1723
1724 iy0, iy1 = RESIZE_REQUIRE_CALC(
1725 ifm_shape[1], output_dims[0], stride_y, offset_y, shift
1726 )
1727 ix0, ix1 = RESIZE_REQUIRE_CALC(
1728 ifm_shape[2], output_dims[1], stride_x, offset_x, shift
1729 )
1730 if ix0 > ix1 or iy0 > iy1:
1731 # Change the shift value and check again
1732 count += 1
1733 shift = (shift % 11) + 1
1734 continue
1735 break
1736
1737 if count >= 12:
1738 # Couldn't find a good set of values for this test, skip it
1739 continue
1740
1741 stride = [stride_y, stride_x]
1742 offset = [offset_y, offset_x]
1743
1744 stride_fp = [0.0, 0.0]
1745 offset_fp = [0.0, 0.0]
1746
1747 # Common for all data types
1748 if error_name is not None:
1749 (
1750 shift,
1751 stride,
1752 stride_fp,
1753 offset,
1754 offset_fp,
1755 outputDTypeNew,
1756 ) = TosaErrorIfArgGen.eiResizeErrorIf(
1757 testGen,
1758 error_name,
1759 mode,
1760 dtype,
1761 shapeList,
1762 outputDType,
1763 shift,
1764 stride,
1765 stride_fp,
1766 offset,
1767 offset_fp,
1768 )
1769 else:
1770 outputDTypeNew = outputDType
1771
1772 arg_list.append(
1773 (
1774 arg_str.format(
1775 "N" if mode == ResizeMode.NEAREST else "B",
1776 shift,
1777 output_dims[0],
1778 output_dims[1],
1779 testGen.typeStr(outputDTypeNew),
1780 stride_fp[0] if float_op else stride[0],
1781 stride_fp[1] if float_op else stride[1],
1782 offset_fp[0] if float_op else offset[0],
1783 offset_fp[1] if float_op else offset[1],
1784 ),
1785 [
1786 mode,
1787 stride,
1788 offset,
1789 shift,
1790 stride_fp,
1791 offset_fp,
1792 output_dims,
1793 dtype,
1794 outputDTypeNew,
1795 ],
1796 )
1797 )
1798
1799 return arg_list
1800
1801 @staticmethod
1802 def agTable(testGen, opName, shapeList, dtype, error_name=None):
1803 arg_list = []
1804
1805 if dtype == DType.INT8:
1806 table = np.int32(
1807 testGen.rng.integers(low=-128, high=128, size=[256])
1808 ).tolist()
1809 else: # INT16
1810 table = np.int32(
1811 testGen.rng.integers(low=-32768, high=32768, size=[513])
1812 ).tolist()
1813
1814 arg_list.append(
1815 (
1816 "",
1817 [table],
1818 )
1819 )
1820 return arg_list
1821
1822 def agCondIf(testGen, opName, shapeList, dtype, error_name=None):
1823 # CondIf generates the condition values here.
1824 # Convert to tensors in the build function, along with the
1825 # then and else blocks
1826 arg_list = []
1827
1828 for c in [False, True]:
1829 arg_list.append(("cond{}".format(int(c)), [c]))
1830
1831 return arg_list
1832
1833 def agWhileLoop(testGen, opName, shapeList, dtype, error_name=None):
1834 # While loop: 0 iterations, 1, more than 1
1835 arg_list = []
1836
1837 for iter in [0, 1, 4]:
1838 arg_list.append(("iter{}".format(iter), [iter]))
1839
1840 return arg_list