Updated build_tests to support different random generators

All generator functions now take RNG argument to allow different
random number generators, rather than relying on global RNG
Default behaviour is the same as before using global RNG
Added stable random generation mode
* shape rng based on operator, rank and datatype
* arguments rng based on operator, shape and datatype
* build operands and data rng based on op, shape, datatype and args
Add optional stable RNG test generation to conformance_generator

Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
Change-Id: I5ee4ff85575a81177fd74ed1617e946bfa3a0769
diff --git a/verif/conformance/tosa_main_profile_ops_info.json b/verif/conformance/tosa_main_profile_ops_info.json
index 63a2a9c..fbf5a82 100644
--- a/verif/conformance/tosa_main_profile_ops_info.json
+++ b/verif/conformance/tosa_main_profile_ops_info.json
@@ -723,7 +723,7 @@
         "profile": [
             "tosa-mi"
         ],
-        "support_for": [ "lazy_data_gen", "generator_select" ],
+        "support_for": [ "lazy_data_gen", "generator_select", "stable_random_gen" ],
         "gen_filter": "^conv2d",
         "generation": {
             "standard": {
@@ -754,7 +754,7 @@
                         "--target-shape",
                         "1,65537,1,3",
                         "--target-shape",
-                        "1,2,65531,2",
+                        "1,2,65530,2",
                         "--tensor-dim-range",
                         "1,16",
                         "--max-conv-dilation",
@@ -1881,7 +1881,7 @@
         "profile": [
             "tosa-mi"
         ],
-        "support_for": [ "lazy_data_gen", "generator_select" ],
+        "support_for": [ "lazy_data_gen", "generator_select", "stable_random_gen" ],
         "generation": {
             "standard": {
                 "generator_args": [
diff --git a/verif/conformance/tosa_verif_conformance_generator.py b/verif/conformance/tosa_verif_conformance_generator.py
index 7c82f31..5402c21 100644
--- a/verif/conformance/tosa_verif_conformance_generator.py
+++ b/verif/conformance/tosa_verif_conformance_generator.py
@@ -138,6 +138,8 @@
 
     if "lazy_data_gen" in supports and args.lazy_data_generation:
         build_cmd_base.append("--lazy-data-generation")
+    if "stable_random_gen" in supports and not args.global_random_generation:
+        build_cmd_base.append("--stable-random-generation")
 
     if "generator_select" in supports:
         if selector_info is None:
@@ -545,6 +547,11 @@
         help="Type of tests produced (default is both)",
     )
     parser.add_argument(
+        "--global-random-generation",
+        action="store_true",
+        help="Disable stable random generation of tests that support this mode",
+    )
+    parser.add_argument(
         "--lazy-data-generation",
         action="store_true",
         help="Enable lazy data generation (only for tosa-mi)",
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index a2ef5bf..83487a1 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -30,48 +30,48 @@
         pass
 
     @staticmethod
-    def getZeroPoint(testGen, dtype, error_name=None):
+    def getZeroPoint(rng, zeropoint, dtype, error_name=None):
 
         if dtype == DType.INT8:
-            if testGen.args.zeropoint is not None:
-                return min(127, max(-128, testGen.args.zeropoint))
-            return testGen.randInt(-128, 128)
+            if zeropoint is not None:
+                return min(127, max(-128, zeropoint))
+            return rng.randInt(-128, 128)
         elif dtype == DType.UINT8:
-            if testGen.args.zeropoint is not None:
-                return min(255, max(0, testGen.args.zeropoint))
-            return testGen.randInt(0, 256)
+            if zeropoint is not None:
+                return min(255, max(0, zeropoint))
+            return rng.randInt(0, 256)
         elif error_name in [
             ErrorIf.InputZeroPointNotZero,
             ErrorIf.WeightZeroPointNotZero,
             ErrorIf.OutputZeroPointNotZero,
         ]:
-            zero_point = testGen.randInt(-128, 128)
+            zero_point = rng.randInt(-128, 128)
             if zero_point == 0:
                 zero_point = 1
             return zero_point
         return 0
 
     @staticmethod
-    def qgUnary(testGen, op, dtype, error_name=None):
+    def qgUnary(rng, zeropoint, op, dtype, error_name=None):
         if error_name == ErrorIf.InputZeroPointNotZero:
             qinfo = [
-                TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
-                TosaQuantGen.getZeroPoint(testGen, dtype),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype, error_name),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype),
             ]
         elif error_name == ErrorIf.OutputZeroPointNotZero:
             qinfo = [
-                TosaQuantGen.getZeroPoint(testGen, dtype),
-                TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype, error_name),
             ]
         else:
             qinfo = [
-                TosaQuantGen.getZeroPoint(testGen, dtype),
-                TosaQuantGen.getZeroPoint(testGen, dtype),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype),
             ]
         return qinfo
 
     @staticmethod
-    def qgConv(testGen, op, dtype_or_dtypeList, error_name=None):
+    def qgConv(rng, zeropoint, op, dtype_or_dtypeList, error_name=None):
         if isinstance(dtype_or_dtypeList, list):
             # a list of [input, weights, accumulator] dtypes
             dtypeList = dtype_or_dtypeList
@@ -81,32 +81,32 @@
 
         if error_name == ErrorIf.InputZeroPointNotZero:
             qinfo = [
-                TosaQuantGen.getZeroPoint(testGen, dtypeList[0], error_name),
-                TosaQuantGen.getZeroPoint(testGen, dtypeList[1]),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtypeList[0], error_name),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtypeList[1]),
             ]
         elif error_name == ErrorIf.WeightZeroPointNotZero:
             qinfo = [
-                TosaQuantGen.getZeroPoint(testGen, dtypeList[0]),
-                TosaQuantGen.getZeroPoint(testGen, dtypeList[1], error_name),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtypeList[0]),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtypeList[1], error_name),
             ]
         else:
             qinfo = [
-                TosaQuantGen.getZeroPoint(testGen, dtypeList[0]),
-                TosaQuantGen.getZeroPoint(testGen, dtypeList[1]),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtypeList[0]),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtypeList[1]),
             ]
         return qinfo
 
     @staticmethod
-    def qgMatmul(testGen, op, dtype, error_name=None):
+    def qgMatmul(rng, zeropoint, op, dtype, error_name=None):
         if error_name == ErrorIf.InputZeroPointNotZero:
             qinfo = [
-                TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
-                TosaQuantGen.getZeroPoint(testGen, dtype, error_name),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype, error_name),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype, error_name),
             ]
         else:
             qinfo = [
-                TosaQuantGen.getZeroPoint(testGen, dtype),
-                TosaQuantGen.getZeroPoint(testGen, dtype),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype),
+                TosaQuantGen.getZeroPoint(rng, zeropoint, dtype),
             ]
         return qinfo
 
@@ -166,9 +166,9 @@
         pass
 
     @staticmethod
-    def tgBasic(testGen, opName, rank, error_name=None):
-        pl, const = opName["operands"]
-        shape = testGen.makeShape(rank)
+    def tgBasic(testGen, rng, op, rank, error_name=None):
+        pl, const = op["operands"]
+        shape = testGen.makeShape(rng, rank)
 
         # Constrict the overall size of the shape when creating ERROR_IF tests
         if error_name:
@@ -181,20 +181,20 @@
             # Generates an input rank mismatch for operators with more than one input
             if error_name == ErrorIf.RankMismatch:
                 if rank == 1 and i != 1:
-                    shape = testGen.makeShape(rank + testGen.rng.choice([1, 2, 3]))
+                    shape = testGen.makeShape(rng, rank + rng.choice([1, 2, 3]))
                 elif i != 1:
-                    shape = testGen.makeShape(rank + testGen.rng.choice([-1, 1]))
+                    shape = testGen.makeShape(rng, rank + rng.choice([-1, 1]))
 
         return shape_list
 
     @staticmethod
-    def tgNHWC(testGen, opName, rank, error_name=None):
-        pl, const = opName["operands"]
+    def tgNHWC(testGen, rng, op, rank, error_name=None):
+        pl, const = op["operands"]
 
         if error_name != ErrorIf.WrongRank:
             assert rank == 4
 
-        shape = testGen.makeShape(rank)
+        shape = testGen.makeShape(rng, rank)
         shape = testGen.constrictBatchSize(shape)
 
         # Constrict the overall size of the shape when creating ERROR_IF tests
@@ -208,7 +208,7 @@
         return shape_list
 
     @staticmethod
-    def tgGather(testGen, opName, rank, error_name=None):
+    def tgGather(testGen, rng, opName, rank, error_name=None):
         pl, const = opName["operands"]
 
         assert pl == 2
@@ -216,18 +216,18 @@
         if error_name != ErrorIf.WrongRank:
             assert rank == 3
 
-        values_shape = testGen.makeShape(rank)
+        values_shape = testGen.makeShape(rng, rank)
         values_shape = testGen.constrictBatchSize(values_shape)
 
         N = values_shape[0]
-        W = testGen.makeDimension()
+        W = testGen.makeDimension(rng)
         indices_shape = [N, W]
 
         shape_list = [values_shape, indices_shape]
         return shape_list
 
     @staticmethod
-    def tgScatter(testGen, opName, rank, error_name=None):
+    def tgScatter(testGen, rng, opName, rank, error_name=None):
         pl, const = opName["operands"]
 
         assert pl == 3
@@ -235,7 +235,7 @@
         if error_name != ErrorIf.WrongRank:
             assert rank == 3
 
-        values_in_shape = testGen.makeShape(rank)
+        values_in_shape = testGen.makeShape(rng, rank)
         values_in_shape = testGen.constrictBatchSize(values_in_shape)
 
         N = values_in_shape[0]
@@ -246,7 +246,7 @@
         # once (having a W greater than K means that you have to repeat a K index)
         W_min = min(testGen.args.tensor_shape_range[0], K)
         W_max = min(testGen.args.tensor_shape_range[1], K)
-        W = testGen.randInt(W_min, W_max) if W_min < W_max else W_min
+        W = rng.randInt(W_min, W_max) if W_min < W_max else W_min
 
         input_shape = [N, W, C]
 
@@ -258,14 +258,14 @@
         return shape_list
 
     @staticmethod
-    def _get_broadcast_shapes(testGen, num_shapes, rank, error_name=None):
-        shape = testGen.makeShape(rank)
+    def _get_broadcast_shapes(testGen, rng, num_shapes, rank, error_name=None):
+        shape = testGen.makeShape(rng, rank)
         shape_list = []
 
         # Choose one of the inputs to broadcast
         # Note: Simplifies OutputShaper code if we don't change first shape for errors
-        bcast_idx = testGen.randInt(0 if error_name is None else 1, num_shapes)
-        fuzz_idx = testGen.randInt(0, rank)
+        bcast_idx = rng.randInt(0 if error_name is None else 1, num_shapes)
+        fuzz_idx = rng.randInt(0, rank)
 
         for i in range(num_shapes):
             shape_bcast = shape.copy()
@@ -278,13 +278,13 @@
             if i == bcast_idx:
                 if error_name == ErrorIf.RankMismatch:
                     # Add one rank to the shape (or more for rank of 1)
-                    extra_ranks = testGen.rng.choice([1, 2, 3]) if rank == 1 else 1
+                    extra_ranks = rng.choice([1, 2, 3]) if rank == 1 else 1
                     shape_bcast = np.concatenate(
-                        (shape_bcast, testGen.makeShape(extra_ranks))
+                        (shape_bcast, testGen.makeShape(rng, extra_ranks))
                     )
                     if rank != 1:
                         # Either keep the extra rank, or remove it
-                        new_len = testGen.rng.choice([-2, len(shape_bcast)])
+                        new_len = rng.choice([-2, len(shape_bcast)])
                         shape_bcast = shape_bcast[:new_len]
                 elif error_name == ErrorIf.BroadcastShapesMismatch:
                     shape_bcast[fuzz_idx] += 2
@@ -296,30 +296,32 @@
         return shape_list
 
     @staticmethod
-    def tgBroadcastFuzz(testGen, op, rank, error_name=None):
+    def tgBroadcastFuzz(testGen, rng, op, rank, error_name=None):
         pl, const = op["operands"]
         num_shapes = pl + const
         return TosaTensorGen._get_broadcast_shapes(
-            testGen, num_shapes, rank, error_name
+            testGen, rng, num_shapes, rank, error_name
         )
 
     @staticmethod
-    def tgMul(testGen, op, rank, error_name=None):
+    def tgMul(testGen, rng, op, rank, error_name=None):
         # Get broadcast shapes for the first 2 inputs as the 3rd is shift
-        shape_list = TosaTensorGen._get_broadcast_shapes(testGen, 2, rank, error_name)
+        shape_list = TosaTensorGen._get_broadcast_shapes(
+            testGen, rng, 2, rank, error_name
+        )
         # Add a single dimension tensor for shift
         shape_list.append([1])
         return shape_list
 
     @staticmethod
-    def tgConv2D(testGen, op, rank, error_name=None):
+    def tgConv2D(testGen, rng, op, rank, error_name=None):
         pl, const = op["operands"]
 
         if error_name != ErrorIf.WrongRank:
             assert rank == 4
 
         # IFM dimensions are NHWC
-        ifm_shape = testGen.makeShape(rank)
+        ifm_shape = testGen.makeShape(rng, rank)
         ifm_shape = testGen.constrictBatchSize(ifm_shape)
 
         # Constrict the overall size of the shape when creating ERROR_IF tests
@@ -332,7 +334,7 @@
         filter_hw = op["filter"]
 
         # Generate a random OFM depth
-        ofm_depth = testGen.makeDimension()
+        ofm_depth = testGen.makeDimension(rng)
 
         # The filter dimensions are OHWI
         filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
@@ -343,14 +345,14 @@
         return [ifm_shape, filter_shape, bias_shape]
 
     @staticmethod
-    def tgConv3D(testGen, op, rank, error_name=None):
+    def tgConv3D(testGen, rng, op, rank, error_name=None):
         pl, const = op["operands"]
 
         if error_name != ErrorIf.WrongRank:
             assert rank == 5
 
         # IFM dimensions are NDHWC
-        ifm_shape = testGen.makeShape(rank)
+        ifm_shape = testGen.makeShape(rng, rank)
         ifm_shape = testGen.constrictBatchSize(ifm_shape)
 
         # Constrict the overall size of the shape when creating ERROR_IF tests
@@ -363,7 +365,7 @@
         filter_dhw = op["filter"]
 
         # Generate a random OFM channel
-        ofm_channel = testGen.makeDimension()
+        ofm_channel = testGen.makeDimension(rng)
 
         # The filter dimensions are ODHWI
         filter_shape = np.asarray(
@@ -376,14 +378,14 @@
         return [ifm_shape, filter_shape, bias_shape]
 
     @staticmethod
-    def tgTransposeConv2D(testGen, op, rank, error_name=None):
+    def tgTransposeConv2D(testGen, rng, op, rank, error_name=None):
         pl, const = op["operands"]
 
         if error_name != ErrorIf.WrongRank:
             assert rank == 4
 
         # IFM dimensions are NHWC
-        ifm_shape = testGen.makeShape(rank)
+        ifm_shape = testGen.makeShape(rng, rank)
         ifm_shape = testGen.constrictBatchSize(ifm_shape)
 
         # Constrict the overall size of the shape when creating ERROR_IF tests
@@ -396,7 +398,7 @@
         filter_hw = op["filter"]
 
         # Generate a random OFM depth
-        ofm_depth = testGen.makeDimension()
+        ofm_depth = testGen.makeDimension(rng)
 
         # The filter dimensions are OHWI
         filter_shape = np.asarray([ofm_depth, filter_hw[0], filter_hw[1], ifm_shape[3]])
@@ -407,7 +409,7 @@
         return [ifm_shape, filter_shape, bias_shape]
 
     @staticmethod
-    def tgDepthwiseConv2D(testGen, op, rank, error_name=None):
+    def tgDepthwiseConv2D(testGen, rng, op, rank, error_name=None):
         pl, const = op["operands"]
 
         if error_name != ErrorIf.WrongRank:
@@ -415,7 +417,7 @@
         assert pl == 1 and const == 2
 
         # IFM dimensions are NHWC
-        ifm_shape = testGen.makeShape(rank)
+        ifm_shape = testGen.makeShape(rng, rank)
         ifm_shape = testGen.constrictBatchSize(ifm_shape)
 
         # Constrict the overall size of the shape when creating ERROR_IF tests
@@ -431,7 +433,7 @@
         # Generate a random OFM depth, but don't let it get too big because
         # the output depth is M * C
         filter_m = (
-            testGen.makeDimension() % (testGen.args.tensor_shape_range[1] // 4)
+            testGen.makeDimension(rng) % (testGen.args.tensor_shape_range[1] // 4)
         ) + 1
 
         # The filter dimensions are HWCM
@@ -443,7 +445,7 @@
         return [ifm_shape, filter_shape, bias_shape]
 
     @staticmethod
-    def tgFFT2d(testGen, op, rank, error_name=None):
+    def tgFFT2d(testGen, rng, op, rank, error_name=None):
         pl, const = op["operands"]
 
         if error_name != ErrorIf.WrongRank:
@@ -451,7 +453,7 @@
         assert pl == 2 and const == 0
 
         # IFM dimensions are NHW
-        ifm_shape = testGen.makeShape(rank)
+        ifm_shape = testGen.makeShape(rng, rank)
 
         # Select nearest lower power of two from input height and width
         ifm_shape[1] = 2 ** int(math.log(ifm_shape[1], 2))
@@ -466,7 +468,7 @@
             inc_h = 2 if ifm_shape[1] == 1 else 1
             inc_w = 2 if ifm_shape[2] == 1 else 1
             inc_choices = [(inc_h, 0), (0, inc_w), (inc_h, inc_w)]
-            selected_inc = testGen.rng.choice(inc_choices)
+            selected_inc = rng.choice(inc_choices)
             ifm_shape[1] += selected_inc[0]
             ifm_shape[2] += selected_inc[1]
 
@@ -474,15 +476,15 @@
 
         ifm_shapes = [ifm_shape.copy(), ifm_shape.copy()]
         if error_name == ErrorIf.FFTInputShapeMismatch:
-            modify_shape = testGen.rng.choice([0, 1])
+            modify_shape = rng.choice([0, 1])
             # Only modify kernel (H, W)
-            modify_dim = testGen.rng.choice([1, 2])
+            modify_dim = rng.choice([1, 2])
             ifm_shapes[modify_shape][modify_dim] *= 2
 
         return [ifm_shapes[0], ifm_shapes[1]]
 
     @staticmethod
-    def tgRFFT2d(testGen, op, rank, error_name=None):
+    def tgRFFT2d(testGen, rng, op, rank, error_name=None):
         pl, const = op["operands"]
 
         if error_name != ErrorIf.WrongRank:
@@ -490,7 +492,7 @@
         assert pl == 1 and const == 0
 
         # IFM dimensions are NHW
-        ifm_shape = testGen.makeShape(rank)
+        ifm_shape = testGen.makeShape(rng, rank)
 
         # Select nearest lower power of two from input height and width
         ifm_shape[1] = 2 ** int(math.log(ifm_shape[1], 2))
@@ -506,7 +508,7 @@
             inc_h = 2 if ifm_shape[1] == 1 else 1
             inc_w = 2 if ifm_shape[2] == 1 else 1
             inc_choices = [(inc_h, 0), (0, inc_w), (inc_h, inc_w)]
-            selected_inc = testGen.rng.choice(inc_choices)
+            selected_inc = rng.choice(inc_choices)
             ifm_shape[1] += selected_inc[0]
             ifm_shape[2] += selected_inc[1]
 
@@ -515,19 +517,19 @@
         return [ifm_shape]
 
     @staticmethod
-    def tgFullyConnected(testGen, op, rank, error_name=None):
+    def tgFullyConnected(testGen, rng, op, rank, error_name=None):
         pl, const = op["operands"]
 
         if error_name != ErrorIf.WrongRank:
             assert rank == 2
 
-        input_shape = testGen.makeShape(rank)
+        input_shape = testGen.makeShape(rng, rank)
 
         # Constrict the overall size of the shape when creating ERROR_IF tests
         if error_name:
             input_shape = TosaErrorIfArgGen.eiRestrictDimensions(input_shape)
 
-        filter_oc = testGen.rng.integers(
+        filter_oc = rng.integers(
             low=testGen.args.tensor_shape_range[0],
             high=testGen.args.tensor_shape_range[1],
             size=1,
@@ -539,14 +541,14 @@
         return [input_shape, filter_shape, bias_shape]
 
     @staticmethod
-    def tgMatmul(testGen, op, rank, error_name=None):
+    def tgMatmul(testGen, rng, op, rank, error_name=None):
         pl, const = op["operands"]
 
         if error_name != ErrorIf.WrongRank:
             assert rank == 3
         assert pl == 2 and const == 0
 
-        a_shape = testGen.makeShape(rank)
+        a_shape = testGen.makeShape(rng, rank)
 
         # Constrict the overall size of the shape when creating ERROR_IF tests
         if error_name:
@@ -554,7 +556,7 @@
 
         # Get a random number for b_oc even if target shape is defined
         b_oc = np.int32(
-            testGen.rng.integers(
+            rng.integers(
                 low=testGen.args.tensor_shape_range[0],
                 high=testGen.args.tensor_shape_range[1],
                 size=1,
@@ -568,24 +570,24 @@
         return [a_shape, b_shape]
 
     @staticmethod
-    def tgConcat(testGen, opName, rank, error_name=None):
-        pl, const = opName["operands"]
-        shape = testGen.makeShape(rank)
+    def tgConcat(testGen, rng, op, rank, error_name=None):
+        pl, const = op["operands"]
+        shape = testGen.makeShape(rng, rank)
 
         # Create extra tensors to concat.
         # Take into account value of pl when getting maximum number of concats
-        num_tensors = testGen.randInt(0, 4)
+        num_tensors = rng.randInt(0, 4)
         shape_list = []
         for i in range(pl + const + num_tensors):
             if error_name == ErrorIf.ConcatInputRankMismatch and i != 0:
-                remove = testGen.rng.choice([True, False])
+                remove = rng.choice([True, False])
                 wrongShape = shape.copy()
 
                 if remove and len(shape) > 1:
                     wrongShape = wrongShape[1:]
                 else:
                     wrongShape = list(wrongShape)
-                    wrongShape.append(testGen.rng.integers(1, 10))
+                    wrongShape.append(rng.integers(1, 10))
 
                 shape_list.append(wrongShape)
             else:
@@ -594,7 +596,7 @@
         return shape_list
 
     @staticmethod
-    def tgConcatConstInput(testGen, shapeList, axis, error_name=None):
+    def tgConcatConstInput(rng, shapeList, axis, error_name=None):
         if error_name in [
             ErrorIf.AxisSmallerZero,
             ErrorIf.AxisLargerRank,
@@ -610,7 +612,7 @@
                 for shape in shapeList[1:]:
                     # Negative test shapeLists are created individually for each test,
                     # so no need to copy the shape before altering it.
-                    shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
+                    shape[(axis + 1) % len(shape)] += rng.integers(5, 10)
             return shapeList
 
         # Create copy of shape we are going to split (so we don't alter shapeList)
@@ -630,7 +632,7 @@
 
             # invalidate dimensions
             if error_name == ErrorIf.ConcatInputDimMismatch:
-                shape[(axis + 1) % len(shape)] += testGen.rng.integers(5, 10)
+                shape[(axis + 1) % len(shape)] += rng.integers(5, 10)
             else:
                 shape[axis] = remaining_length
 
@@ -672,12 +674,12 @@
     }
 
     @staticmethod
-    def _get_data_range(testGen, dtype, highValueLookup, lowValueLookup=None):
+    def _get_data_range(rng, dtype, highValueLookup, lowValueLookup=None):
         # Return a tuple of (low,high) data range values for the given data
         # type using a combination of per operator table limits, data limits
         # and user supplied ranges for FP numbers
         if dtype in highValueLookup:
-            type_range = testGen.getDTypeRange(dtype, high_inclusive=True)
+            type_range = rng.dTypeRange(dtype, high_inclusive=True)
             high_val = highValueLookup[dtype]
             if lowValueLookup is not None and dtype in lowValueLookup:
                 low_val = lowValueLookup[dtype]
@@ -703,7 +705,7 @@
 
     @staticmethod
     def tvgLazyGenDefault(
-        testGen, opName, dtypeList, shapeList, argsDict, error_name=None
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
     ):
         # Variable inputs versus constants
         pCount, cCount = testGen.TOSA_OP_LIST[opName]["operands"]
@@ -742,8 +744,8 @@
                 ):
                     # Change from inclusive to exclusive range
                     data_range = (data_range[0], data_range[1] + 1)
-                # Ignore lazy data gen option and create data array using any range limits
 
+                # Ignore lazy data gen option and create data array using any range limits
                 if "fixed_data" in argsDict and argsDict["fixed_data"][idx] is not None:
                     if dtype == DType.SHAPE:
                         arr = np.int64(argsDict["fixed_data"][idx])
@@ -756,7 +758,7 @@
                     else:
                         assert False, "Unsupported fixed_data type"
                 else:
-                    arr = testGen.getRandTensor(shape, dtype, data_range)
+                    arr = rng.randTensor(shape, dtype, data_range)
                 if roundMode:
                     arr = np.round(arr)
                 if idx < pCount:
@@ -802,8 +804,7 @@
                     info["data"] = [int(i) for i in argsDict["fixed_data"][idx]]
                     tens_meta["fixed_data_info"] = info
                 else:
-                    # TODO - generate seed for this generator based on test
-                    info["rng_seed"] = 42
+                    info["rng_seed"] = rng.seed
 
                     data_range = None
                     if "data_range_list" in argsDict:
@@ -814,9 +815,7 @@
                         data_range = argsDict["data_range"]
 
                     if data_range is None:
-                        data_range = testGen.getDTypeRange(
-                            dtypeList[idx], high_inclusive=True
-                        )
+                        data_range = rng.dTypeRange(dtypeList[idx], high_inclusive=True)
                     info["range"] = [str(v) for v in data_range]
                     tens_meta["pseudo_random_info"] = info
             elif dg_type == gtu.DataGenType.DOT_PRODUCT:
@@ -836,7 +835,7 @@
             elif dg_type == gtu.DataGenType.FULL_RANGE:
                 info = {}
                 info["start_val"] = int(
-                    testGen.randInt(0, gtu.DTYPE_ATTRIBUTES[dtypeList[idx]]["fullset"])
+                    rng.randInt(0, gtu.DTYPE_ATTRIBUTES[dtypeList[idx]]["fullset"])
                 )
                 tens_meta["full_range_info"] = info
             else:
@@ -883,7 +882,9 @@
         return TosaTensorValuesGen.TVGInfo(tens_ser_list, tens_data)
 
     @staticmethod
-    def tvgNegate(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgNegate(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         if dtypeList[0] == DType.INT32 and error_name is None:
             # Integer test
             op = testGen.TOSA_OP_LIST[opName]
@@ -896,7 +897,7 @@
             max_val = (1 << 31) - 1
             min_val = -max_val
             arr = np.int32(
-                testGen.rng.integers(low=min_val, high=(max_val + 1), size=shapeList[0])
+                rng.integers(low=min_val, high=(max_val + 1), size=shapeList[0])
             )
             tens_ser_list = []
             tens_ser_list.append(
@@ -906,7 +907,7 @@
         else:
             # ERROR_IF or floating point test
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
 
     # Set the ADD/SUB data range to half the largest value to avoid infinities
@@ -917,7 +918,9 @@
     }
 
     @staticmethod
-    def tvgAddSub(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgAddSub(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         if dtypeList[0] in (DType.INT32, DType.SHAPE) and error_name is None:
             # Make sure the integer operation does not cause value saturation - where
             # the number wraps due to limited number of bits to store the answer
@@ -929,8 +932,8 @@
             tens_ser_list = []
             add = op["op"] in (Op.ADD, Op.ADD_SHAPE)
             data_range = testGen.args.tensor_shape_range
-            a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0], data_range)
-            b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1], data_range)
+            a_arr = rng.randTensor(shapeList[0], dtypeList[0], data_range)
+            b_arr = rng.randTensor(shapeList[1], dtypeList[1], data_range)
             if add:
                 res_arr = np.add(a_arr, b_arr, dtype=np.int64)
             else:
@@ -985,18 +988,18 @@
         else:
             # ERROR_IF or floating point test
             data_range = TosaTensorValuesGen._get_data_range(
-                testGen, dtypeList[0], TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_ADDSUB
+                rng, dtypeList[0], TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_ADDSUB
             )
             if data_range:
                 argsDict["data_range"] = data_range
 
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
 
     @staticmethod
     def tvgCondIfWhileLoop(
-        testGen, opName, dtypeList, shapeList, argsDict, error_name=None
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
     ):
         if dtypeList[0] in (
             DType.INT32,
@@ -1012,11 +1015,9 @@
             tens_ser_list = []
             for idx, shape in enumerate(shapeList[:]):
                 if dtypeList[0] == DType.INT32:
-                    arr = testGen.getRandTensor(shapeList[idx], DType.INT16)
+                    arr = rng.randTensor(shapeList[idx], DType.INT16)
                 else:
-                    arr = np.int32(
-                        testGen.rng.integers(low=0, high=32, size=shapeList[idx])
-                    )
+                    arr = np.int32(rng.integers(low=0, high=32, size=shapeList[idx]))
                 if pRemain > 0:
                     tens_ser_list.append(
                         testGen.ser.addPlaceholder(shape, dtypeList[idx], arr)
@@ -1030,12 +1031,12 @@
             return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
         else:
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
 
     @staticmethod
     def tvgArithmeticRightShift(
-        testGen, opName, dtypeList, shapeList, argsDict, error_name=None
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
     ):
         op = testGen.TOSA_OP_LIST[opName]
         pCount, cCount = op["operands"]
@@ -1048,34 +1049,38 @@
         for idx, shape in enumerate(shapeList[:]):
             if idx == 1:
                 if dtypeList[idx] == DType.INT8:
-                    arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
+                    arr = np.int32(rng.integers(low=0, high=8, size=shape))
                 elif dtypeList[idx] == DType.INT16:
-                    arr = np.int32(testGen.rng.integers(low=0, high=16, size=shape))
+                    arr = np.int32(rng.integers(low=0, high=16, size=shape))
                 elif dtypeList[idx] == DType.INT32:
-                    arr = np.int32(testGen.rng.integers(low=0, high=32, size=shape))
+                    arr = np.int32(rng.integers(low=0, high=32, size=shape))
                 elif error_name == ErrorIf.WrongInputType:
-                    arr = np.int32(testGen.rng.integers(low=0, high=8, size=shape))
+                    arr = np.int32(rng.integers(low=0, high=8, size=shape))
                 else:
                     raise Exception("OpArithmeticRightShift: invalid input dtype")
             else:
-                arr = testGen.getRandTensor(shape, dtypeList[idx])
+                arr = rng.randTensor(shape, dtypeList[idx])
             tens_ser_list.append(testGen.ser.addPlaceholder(shape, dtypeList[idx], arr))
 
         return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
 
     @staticmethod
-    def tvgReshape(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgReshape(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         dtypeList[1] = DType.SHAPE
         shapeList[1] = [len(argsDict["new_shape"])]
         # Create a new list for the pre-generated data in argsDict["fixed_data"]
         argsDict["fixed_data"] = [None, argsDict["new_shape"]]
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgRescale(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgRescale(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         scale32 = argsDict["scale"]
         multiplier_arr = argsDict["multiplier"]
         shift_arr = argsDict["shift"]
@@ -1091,11 +1096,11 @@
         argsDict["fixed_data"] = [None, multiplier_arr, shift_arr]
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgPad(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgPad(testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None):
         # argsDict["pad"] is 2D array, need to flatten it to get list of values
         pad_values = argsDict["pad"].flatten()
         dtypeList[1] = DType.SHAPE
@@ -1104,11 +1109,11 @@
         argsDict["fixed_data"] = [None, pad_values]
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgSlice(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgSlice(testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None):
         dtypeList[1] = DType.SHAPE
         shapeList[1] = [len(argsDict["start"])]
         dtypeList[2] = DType.SHAPE
@@ -1117,30 +1122,34 @@
         argsDict["fixed_data"] = [None, argsDict["start"], argsDict["size"]]
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgTile(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgTile(testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None):
         dtypeList[1] = DType.SHAPE
         shapeList[1] = [len(argsDict["multiples"])]
         argsDict["fixed_data"] = [None, argsDict["multiples"]]
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgSelect(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgSelect(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         # Set datatype of condition tensor to boolean
         dtypeList[0] = DType.BOOL
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgIntDiv(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgIntDiv(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         if error_name is None:
             op = testGen.TOSA_OP_LIST[opName]
             pCount, cCount = op["operands"]
@@ -1154,8 +1163,8 @@
             # 1. divisor == 0
             # 2. dividend == -(1<<31) and divisor == -1
             while True:
-                dividend_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
-                divisor_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
+                dividend_arr = rng.randTensor(shapeList[0], dtypeList[0])
+                divisor_arr = rng.randTensor(shapeList[1], dtypeList[1])
 
                 if (divisor_arr == 0).any():
                     continue
@@ -1175,7 +1184,7 @@
             return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
         else:
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
 
     # Set the MUL data range to the square root of the largest value
@@ -1187,7 +1196,7 @@
     }
 
     @staticmethod
-    def tvgMul(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgMul(testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None):
         if error_name is not None or dtypeList[0] in (
             DType.FP16,
             DType.BF16,
@@ -1195,7 +1204,7 @@
         ):
             # ERROR_IF or floating point test
             data_range = TosaTensorValuesGen._get_data_range(
-                testGen, dtypeList[0], TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_MUL
+                rng, dtypeList[0], TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_MUL
             )
             if data_range:
                 argsDict["data_range"] = data_range
@@ -1208,10 +1217,9 @@
                 argsDict["fixed_data"] = [None, None, [argsDict["shift"]]]
 
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
         else:
-            # Integer test
             op = testGen.TOSA_OP_LIST[opName]
             pCount, cCount = op["operands"]
 
@@ -1231,7 +1239,9 @@
             elif error_name == ErrorIf.WrongInputType:
                 num_bits = 8
             else:
-                raise Exception("OpMul: invalid input dtype")
+                raise Exception(
+                    f"OpMul: invalid input dtype {gtu.DTYPE_ATTRIBUTES[dtypeList[0]]['str']}"
+                )
 
             for idx, shape in enumerate(shapeList[:]):
                 if dtypeList[idx] == DType.SHAPE:
@@ -1241,12 +1251,8 @@
                     low = -(2 ** (num_bits - 1))
                     high = (2 ** (num_bits - 1)) - 1
 
-                a_arr = np.int32(
-                    testGen.rng.integers(low=low, high=high, size=shapeList[0])
-                )
-                b_arr = np.int32(
-                    testGen.rng.integers(low=low, high=high, size=shapeList[1])
-                )
+                a_arr = np.int32(rng.integers(low=low, high=high, size=shapeList[0]))
+                b_arr = np.int32(rng.integers(low=low, high=high, size=shapeList[1]))
 
             i = 0
             while True:
@@ -1292,7 +1298,9 @@
             return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
 
     @staticmethod
-    def tvgConcat(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgConcat(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         count = len(shapeList) - testGen.args.num_const_inputs_concat
         if count < 1:
             count = 1
@@ -1302,12 +1310,10 @@
         op = testGen.TOSA_OP_LIST[opName]
         if op["op"] == Op.CONCAT_SHAPE:
             # Set the axis to 0
-            shapeList = TosaTensorGen.tgConcatConstInput(
-                testGen, shapeList, 0, error_name
-            )
+            shapeList = TosaTensorGen.tgConcatConstInput(rng, shapeList, 0, error_name)
         else:
             shapeList = TosaTensorGen.tgConcatConstInput(
-                testGen, shapeList, argsDict["axis"], error_name
+                rng, shapeList, argsDict["axis"], error_name
             )
 
         # Override default pCount/cCount for operator
@@ -1315,20 +1321,20 @@
         argsDict["c_count"] = len(shapeList) - count
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
     def tvgLogicalShift(
-        testGen, opName, dtypeList, shapeList, argsDict, error_name=None
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
     ):
         op = testGen.TOSA_OP_LIST[opName]
         pCount, cCount = op["operands"]
         assert (
             pCount == 2 and cCount == 0
         ), "Op.LOGICAL_LEFT_SHIFT or Op.LOGICAL_RIGHT_SHIFT must have 2 placeholders, 0 consts"
-        values_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
-        shift_arr = np.int32(testGen.rng.integers(low=0, high=32, size=shapeList[1]))
+        values_arr = rng.randTensor(shapeList[0], dtypeList[0])
+        shift_arr = np.int32(rng.integers(low=0, high=32, size=shapeList[1]))
         tens_ser_list = []
         tens_ser_list.append(
             testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
@@ -1340,7 +1346,7 @@
         return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
 
     @staticmethod
-    def tvgEqual(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgEqual(testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None):
         if error_name is None and not gtu.dtypeIsSupportedByCompliance(dtypeList[0]):
             # Integer
             op = testGen.TOSA_OP_LIST[opName]
@@ -1349,8 +1355,8 @@
                 pCount == 2 and cCount == 0
             ), "Op.EQUAL must have 2 placeholders, 0 consts"
 
-            a_arr = testGen.getRandTensor(shapeList[0], dtypeList[0])
-            b_arr = testGen.getRandTensor(shapeList[1], dtypeList[1])
+            a_arr = rng.randTensor(shapeList[0], dtypeList[0])
+            b_arr = rng.randTensor(shapeList[1], dtypeList[1])
 
             # Using random numbers means that it will be very unlikely that
             # there are any matching (equal) values, therefore force that
@@ -1362,9 +1368,7 @@
                 for axis in range(0, len(shapeList[0])):
                     # Index can be up to the largest dimension in both shapes
                     index = np.int32(
-                        testGen.rng.integers(
-                            0, max(shapeList[0][axis], shapeList[1][axis])
-                        )
+                        rng.integers(0, max(shapeList[0][axis], shapeList[1][axis]))
                     )
                     # Reduce the index down to a shape's dim for broadcasting
                     a_index.append(min(shapeList[0][axis] - 1, index))
@@ -1383,11 +1387,13 @@
         else:
             # ERROR_IF or floating point test
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
 
     @staticmethod
-    def tvgReduceSum(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgReduceSum(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         dtype = dtypeList[0]
         if dtype == DType.INT32:
             op = testGen.TOSA_OP_LIST[opName]
@@ -1399,7 +1405,7 @@
             # summation of any axis
             range_val = int((1 << 31) / max(shapeList[0]))
             values_arr = np.int32(
-                testGen.rng.integers(low=-range_val, high=range_val, size=shapeList[0])
+                rng.integers(low=-range_val, high=range_val, size=shapeList[0])
             )
             tens_ser_list = []
             tens_ser_list.append(
@@ -1419,18 +1425,18 @@
                     / max(shapeList[0])
                 }
                 data_range = TosaTensorValuesGen._get_data_range(
-                    testGen, dtype, highval_lookup
+                    rng, dtype, highval_lookup
                 )
                 assert data_range is not None
                 argsDict["data_range"] = data_range
 
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
 
     @staticmethod
     def tvgReduceProduct(
-        testGen, opName, dtypeList, shapeList, argsDict, error_name=None
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
     ):
         dtype = dtypeList[0]
         if error_name is None:
@@ -1442,20 +1448,20 @@
                     1 / max(shapeList[0]),
                 )
             }
-            data_range = TosaTensorValuesGen._get_data_range(
-                testGen, dtype, highval_lookup
-            )
+            data_range = TosaTensorValuesGen._get_data_range(rng, dtype, highval_lookup)
             assert data_range is not None
             argsDict["data_range"] = data_range
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgResize(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgResize(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         data_range = TosaTensorValuesGen._get_data_range(
-            testGen,
+            rng,
             dtypeList[0],
             TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE,
         )
@@ -1476,7 +1482,7 @@
         argsDict["fixed_data"] = [None, scale_values, offset_values, border_values]
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     # Set the POW exponent high data range
@@ -1537,10 +1543,10 @@
     }
 
     @staticmethod
-    def tvgPow(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgPow(testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None):
         if error_name is not None:
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
         dtype = dtypeList[0]
         # Different ranges for POW
@@ -1548,25 +1554,25 @@
         if test_set == 0:
             # Positive base with fractional exponent
             base_range = TosaTensorValuesGen._get_data_range(
-                testGen,
+                rng,
                 dtype,
                 TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_POW_BASE,
                 TosaTensorValuesGen.TVG_FLOAT_LOW_VALUE_POW_BASE,
             )
             exp_range = TosaTensorValuesGen._get_data_range(
-                testGen, dtype, TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_POW_EXP
+                rng, dtype, TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_POW_EXP
             )
             exp_round = False
         else:
             # Integer exponent
             exp_range = TosaTensorValuesGen._get_data_range(
-                testGen, dtype, TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_POW_EXP
+                rng, dtype, TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_POW_EXP
             )
             exp_round = True
             if test_set == 1:
                 # Positive base
                 base_range = TosaTensorValuesGen._get_data_range(
-                    testGen,
+                    rng,
                     dtype,
                     TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_POW_BASE,
                     TosaTensorValuesGen.TVG_FLOAT_LOW_VALUE_POW_BASE,
@@ -1576,7 +1582,7 @@
                 # Negative base
                 # Supply new look up tables with negative values
                 base_range = TosaTensorValuesGen._get_data_range(
-                    testGen,
+                    rng,
                     dtype,
                     {dtype: -TosaTensorValuesGen.TVG_FLOAT_LOW_VALUE_POW_BASE[dtype]},
                     {dtype: -TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_POW_BASE[dtype]},
@@ -1593,15 +1599,17 @@
         )
         argsDict["data_range_list"] = data_range_list
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgLogRsqrt(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgLogRsqrt(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         # LOG & RSQRT data range from lowest expressible positive number to
         # largest to avoid NaNs
         data_range = TosaTensorValuesGen._get_data_range(
-            testGen,
+            rng,
             dtypeList[0],
             TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE,
             TosaTensorValuesGen.TVG_FLOAT_LOW_VALUE,
@@ -1610,7 +1618,7 @@
             argsDict["data_range"] = data_range
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     # Set the EXP data range to the log of the largest to smallest values
@@ -1627,9 +1635,9 @@
     }
 
     @staticmethod
-    def tvgExp(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgExp(testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None):
         data_range = TosaTensorValuesGen._get_data_range(
-            testGen,
+            rng,
             dtypeList[0],
             TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE_EXP,
             TosaTensorValuesGen.TVG_FLOAT_LOW_VALUE_EXP,
@@ -1638,12 +1646,12 @@
             argsDict["data_range"] = data_range
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
     def tvgFullyConnected(
-        testGen, opName, dtypeList, shapeList, argsDict, error_name=None
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
     ):
         dtype = dtypeList[0]
         if (
@@ -1658,26 +1666,24 @@
             highval_lookup = {
                 dtype: math.pow(TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE[dtype], 1 / IC)
             }
-            data_range = TosaTensorValuesGen._get_data_range(
-                testGen, dtype, highval_lookup
-            )
+            data_range = TosaTensorValuesGen._get_data_range(rng, dtype, highval_lookup)
             assert data_range is not None
             argsDict["data_range"] = data_range
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgCast(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgCast(testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None):
         in_dtype = dtypeList[0]
         out_dtype = argsDict["out_type"]
         # Create look up to limit input tensor to output type maximums to avoid
         # FP infinities and saturation of integers
-        out_range = testGen.getDTypeRange(out_dtype, high_inclusive=True)
+        out_range = rng.dTypeRange(out_dtype, high_inclusive=True)
         highval_lookup = {in_dtype: out_range[1]}
         data_range = TosaTensorValuesGen._get_data_range(
-            testGen,
+            rng,
             in_dtype,
             highval_lookup,
         )
@@ -1686,11 +1692,13 @@
         argsDict["data_range"] = data_range
 
         return TosaTensorValuesGen.tvgLazyGenDefault(
-            testGen, opName, dtypeList, shapeList, argsDict, error_name
+            testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
         )
 
     @staticmethod
-    def tvgGather(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgGather(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         K = shapeList[0][1]
 
         # Fix the type of the indices tensor
@@ -1709,11 +1717,11 @@
             for idx, shape in enumerate(shapeList):
                 dtype = dtypeList[idx]
                 if idx != 1:
-                    arr = testGen.getRandTensor(shape, dtype)
+                    arr = rng.randTensor(shape, dtype)
                     tens_ser_list.append(testGen.ser.addPlaceholder(shape, dtype, arr))
                 else:
                     # Limit data range of indices tensor upto K (exclusive)
-                    arr = testGen.getRandTensor(shape, dtype, (0, K))
+                    arr = rng.randTensor(shape, dtype, (0, K))
                     # To match old functionality - create indices as CONST
                     tens_ser_list.append(testGen.ser.addConst(shape, dtype, arr))
 
@@ -1729,11 +1737,13 @@
             argsDict["data_range_list"] = data_range_list
 
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
 
     @staticmethod
-    def tvgScatter(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
+    def tvgScatter(
+        testGen, rng, opName, dtypeList, shapeList, argsDict, error_name=None
+    ):
         K = shapeList[0][1]
         W = shapeList[2][1]
 
@@ -1760,7 +1770,7 @@
             for idx, shape in enumerate(shapeList):
                 dtype = dtypeList[idx]
                 if idx != 1:
-                    arr = testGen.getRandTensor(shape, dtype)
+                    arr = rng.randTensor(shape, dtype)
                     tens_ser_list.append(testGen.ser.addPlaceholder(shape, dtype, arr))
                 else:
                     # Create the indices array
@@ -1769,7 +1779,7 @@
                     for n in range(shape[0]):
                         # Get a shuffled list of output indices (0 to K-1) and
                         # limit length to W
-                        arr.append(testGen.rng.permutation(K)[:W])
+                        arr.append(rng.permutation(K)[:W])
                     indices_arr = np.array(arr, dtype=np.int32)  # (N, W)
                     # To match old functionality - create indices as CONST
                     tens_ser_list.append(
@@ -1789,7 +1799,7 @@
             argsDict["data_range_list"] = data_range_list
 
             return TosaTensorValuesGen.tvgLazyGenDefault(
-                testGen, opName, dtypeList, shapeList, argsDict, error_name
+                testGen, rng, opName, dtypeList, shapeList, argsDict, error_name
             )
 
 
@@ -1881,7 +1891,7 @@
         return new_arg_list
 
     @staticmethod
-    def agNone(testGen, opName, shapeList, dtype, error_name=None):
+    def agNone(testGen, rng, opName, shapeList, dtype, error_name=None):
         """A trivial argument generator for operators that don't take any
         non-tensor arguments"""
         arg_list = TosaArgGen._add_data_generators(
@@ -1896,7 +1906,7 @@
         return arg_list
 
     @staticmethod
-    def agPow(testGen, opName, shapeList, dtype, error_name=None):
+    def agPow(testGen, rng, opName, shapeList, dtype, error_name=None):
         """Pow operator needs different test sets to cover random numbers
         without creating NaNs or Infs"""
         arg_list = TosaArgGen._add_data_generators(
@@ -1911,17 +1921,17 @@
         return arg_list
 
     @staticmethod
-    def agAxis(testGen, opName, shapeList, dtype, error_name=None):
+    def agAxis(testGen, rng, opName, shapeList, dtype, error_name=None):
         """Build the axis argument for operators that take a single axis"""
         arg_list = []
         shape = shapeList[0]
 
         if error_name == ErrorIf.AxisSmallerZero:
             # Set too small axis
-            axes = [testGen.rng.integers(-5, 0)]
+            axes = [rng.integers(-5, 0)]
         elif error_name == ErrorIf.AxisLargerRank:
             # Set too large axis
-            axes = [testGen.rng.integers(len(shape) + 1, len(shape) + 10)]
+            axes = [rng.integers(len(shape) + 1, len(shape) + 10)]
         else:
             # Create tests for each dimension
             axes = range(0, len(shape))
@@ -1967,7 +1977,7 @@
         return sparsity
 
     @staticmethod
-    def agConv(testGen, opName, shapeList, dtypes, error_name=None):
+    def agConv(testGen, rng, opName, shapeList, dtypes, error_name=None):
         # Used by CONV2D, CONV3D and DEPTHWISE_CONV2D
         arg_list = []
 
@@ -2005,13 +2015,13 @@
             # Generate comprehensive argument lists
             # - except for named errors, which use specific invalid value(s)
             if error_name == ErrorIf.PadSmallerZero:
-                p_vals = [testGen.rng.choice(range(-5, 0))]
+                p_vals = [rng.choice(range(-5, 0))]
             else:
                 p_vals = [x for x in range(0, testGen.args.max_conv_padding + 1)]
             paddings = {x for x in itertools.product(*([p_vals] * k_rank * 2))}
             if error_name == ErrorIf.StrideSmallerOne:
                 # Can't use stride=0, as it is used to derive output shape, as a divisor
-                s_vals = [testGen.rng.choice(range(-5, 0))]
+                s_vals = [rng.choice(range(-5, 0))]
             else:
                 # Stride must be greater than 1 to force non-integer error
                 startStride = (
@@ -2022,7 +2032,7 @@
                 ]
             strides = {x for x in itertools.product(*([s_vals] * k_rank))}
             if error_name == ErrorIf.DilationSmallerOne:
-                d_vals = [testGen.rng.choice(range(-5, 1))]
+                d_vals = [rng.choice(range(-5, 1))]
             else:
                 d_vals = [x for x in range(1, testGen.args.max_conv_dilation + 1)]
             dilations = {x for x in itertools.product(*([d_vals] * k_rank))}
@@ -2195,13 +2205,13 @@
         return arg_list
 
     @staticmethod
-    def agFullyConnected(testGen, opName, shapeList, dtypes, error_name=None):
+    def agFullyConnected(testGen, rng, opName, shapeList, dtypes, error_name=None):
 
         assert isinstance(dtypes, (list, tuple)), f"{dtypes} unexpected"
         input_dtype = dtypes[0]
 
         if error_name == ErrorIf.WrongOutputType:
-            accum_dtype = gtu.get_wrong_output_type(opName, testGen.rng, input_dtype)
+            accum_dtype = gtu.get_wrong_output_type(opName, rng, input_dtype)
         elif error_name == ErrorIf.WrongInputType:
             # Pick some potentially correct output dtype if input type is incorrect
             accum_dtype = DType.INT32
@@ -2230,7 +2240,7 @@
         return arg_list
 
     @staticmethod
-    def agMatMul(testGen, opName, shapeList, dtype, error_name=None):
+    def agMatMul(testGen, rng, opName, shapeList, dtype, error_name=None):
         # Get valid accumulate type(s)
         if dtype == DType.INT8:
             accum_dtypes = [DType.INT32]
@@ -2249,7 +2259,7 @@
 
         if error_name == ErrorIf.WrongOutputType:
             # Get incorrect output dtype for ErrorIf case
-            accum_dtypes = [gtu.get_wrong_output_type(opName, testGen.rng, dtype)]
+            accum_dtypes = [gtu.get_wrong_output_type(opName, rng, dtype)]
         elif error_name == ErrorIf.WrongInputType:
             # Pick some potentially correct output dtype if input type is incorrect
             accum_dtypes = [DType.INT32]
@@ -2283,7 +2293,7 @@
         return arg_list
 
     @staticmethod
-    def agTransposeConv2D(testGen, opName, shapeList, dtypes, error_name=None):
+    def agTransposeConv2D(testGen, rng, opName, shapeList, dtypes, error_name=None):
         arg_list = []
 
         if testGen.args.level8k and error_name is not None:
@@ -2310,9 +2320,7 @@
             smallest_padding_size = -min(k_shape[0], k_shape[1]) + 1
             if error_name == ErrorIf.PadLargerEqualKernel:
                 max_filter_size = -max(k_shape[0], k_shape[1])
-                p_vals = [
-                    testGen.rng.choice(range(max_filter_size - 10, max_filter_size))
-                ]
+                p_vals = [rng.choice(range(max_filter_size - 10, max_filter_size))]
             else:
                 p_vals = [
                     x
@@ -2323,7 +2331,7 @@
             paddings = {x for x in itertools.product(*([p_vals] * 4))}
             if error_name == ErrorIf.StrideSmallerOne:
                 # Can't use stride=0, as it is used to derive output shape, as a divisor
-                s_vals = [testGen.rng.choice(range(-5, 0))]
+                s_vals = [rng.choice(range(-5, 0))]
             else:
                 s_vals = [x for x in range(1, testGen.args.max_conv_stride + 1)]
             strides = {x for x in itertools.product(*([s_vals] * 2))}
@@ -2440,7 +2448,7 @@
         return arg_list
 
     @staticmethod
-    def agPad(testGen, opName, shapeList, dtype, error_name=None):
+    def agPad(testGen, rng, opName, shapeList, dtype, error_name=None):
         rank = len(shapeList[0])
 
         # Exhaustively test combinations of padding on each side of each dimension
@@ -2454,11 +2462,11 @@
         shape_pad_values = itertools.product(*([axis_pad_values] * rank))
 
         if dtype in [DType.BOOL, DType.INT8, DType.INT16, DType.INT32]:
-            pad_const_int = testGen.getRandNumberDType(dtype)
+            pad_const_int = rng.randNumberDType(dtype)
             pad_const_fp = 0
         elif gtu.dtypeIsFloat(dtype):
             pad_const_int = 0
-            pad_const_fp = testGen.getRandNumberDType(dtype)
+            pad_const_fp = rng.randNumberDType(dtype)
         else:
             return []
 
@@ -2516,7 +2524,7 @@
         return arg_list
 
     @staticmethod
-    def agPooling(testGen, opName, shapeList, dtype, error_name=None):
+    def agPooling(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         shape = shapeList[0]
@@ -2658,7 +2666,7 @@
                             ErrorIf.PadLargerEqualKernel,
                         ]:
                             sNew, pNew, kNew = TosaErrorIfArgGen.eiPoolingErrorIf(
-                                testGen, error_name, s, p, k
+                                rng, error_name, s, p, k
                             )
                             if None not in [sNew, pNew, kNew] and n % sparsity == 0:
                                 arg_list.append(
@@ -2722,12 +2730,12 @@
         return arg_list
 
     @staticmethod
-    def agCast(testGen, opName, shapeList, inDtype, error_name=None):
+    def agCast(testGen, rng, opName, shapeList, inDtype, error_name=None):
         arg_list = []
 
         # Enumerate the output types here
         if error_name == ErrorIf.WrongOutputType:
-            dtypeList = TosaErrorIfArgGen.eiCastErrorIf(testGen, inDtype)
+            dtypeList = TosaErrorIfArgGen.eiCastErrorIf(inDtype)
         elif inDtype == DType.INT8:
             dtypeList = [
                 DType.BOOL,
@@ -2811,7 +2819,7 @@
         return arg_list
 
     @staticmethod
-    def agRescale(testGen, opName, shapeList, inDtype, error_name=None):
+    def agRescale(testGen, rng, opName, shapeList, inDtype, error_name=None):
         arg_list = []
 
         # Enumerate the output types here
@@ -2906,7 +2914,7 @@
                         # Calculate scale based on:
                         # scale = a *(2^output_width)/(2^input_width))
 
-                        a = np.float32(testGen.rng.random(size=[nc]))
+                        a = np.float32(rng.random(size=[nc]))
                         scale_arr = a * np.float32(
                             (1 << out_type_width) / (1 << in_type_width)
                         )
@@ -2965,13 +2973,13 @@
         return arg_list
 
     @staticmethod
-    def agMul(testGen, opName, shapeList, dtype, error_name=None):
+    def agMul(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         if dtype is DType.INT32:
             for p in range(testGen.args.num_rand_permutations):
 
-                shift = testGen.randInt(0, 32)
+                shift = rng.randInt(0, 32)
                 arg_list.append(("perm{}_shift{}".format(p, shift), {"shift": shift}))
         else:
             arg_list.append(("perm0_shift0", {"shift": 0}))
@@ -2988,7 +2996,7 @@
         return arg_list
 
     @staticmethod
-    def agArithmeticRightShift(testGen, opName, shapeList, dtype, error_name=None):
+    def agArithmeticRightShift(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         for round in (True, False):
@@ -3009,7 +3017,7 @@
         return arg_list
 
     @staticmethod
-    def agFFT2d(testGen, opName, shapeList, dtype, error_name=None):
+    def agFFT2d(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         shape = shapeList[0]
@@ -3037,7 +3045,7 @@
         return arg_list
 
     @staticmethod
-    def agRFFT2d(testGen, opName, shapeList, dtype, error_name=None):
+    def agRFFT2d(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         shape = shapeList[0]
@@ -3074,7 +3082,7 @@
         return factors
 
     @staticmethod
-    def agReshape(testGen, opName, shapeList, dtype, error_name=None):
+    def agReshape(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         origShape = shapeList[0]
@@ -3085,7 +3093,7 @@
         # This code is NOT fast.  Fortunately, the numbers are fairly small.
         for p in range(testGen.args.num_rand_permutations):
             # Rank from 1 to TOSA_TENSOR_MAX_RANK
-            newRank = testGen.randInt(1, (testGen.TOSA_TENSOR_MAX_RANK + 1))
+            newRank = rng.randInt(1, (testGen.TOSA_TENSOR_MAX_RANK + 1))
             if len(factors) < newRank:
                 continue
 
@@ -3095,12 +3103,12 @@
                 # Generate the new shape of the chosen new rank
                 newShape = []
                 remainingElements = totalElements
-                shuffledFactors = testGen.rng.permutation(factors)
+                shuffledFactors = rng.permutation(factors)
                 for i in range(1, newRank):
                     # pick rank-1 factors
                     newShape.append(shuffledFactors[0])
                     remainingElements = remainingElements // shuffledFactors[0]
-                    shuffledFactors = testGen.rng.permutation(
+                    shuffledFactors = rng.permutation(
                         TosaArgGen.getFactors(remainingElements)
                     )
                 newShape.append(remainingElements)
@@ -3136,7 +3144,7 @@
         return arg_list
 
     @staticmethod
-    def agTranspose(testGen, opName, shapeList, dtype, error_name=None):
+    def agTranspose(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         ifm_shape = shapeList[0]
@@ -3151,7 +3159,7 @@
         elif error_name == ErrorIf.IndexUsedTwice:
             # Create list with a duplicated index
             perm_range = list(range(len(ifm_shape)))
-            index_choice = testGen.rng.choice(range(len(perm_range)))
+            index_choice = rng.choice(range(len(perm_range)))
             perm_range[(index_choice + 1) % len(perm_range)] = perm_range[index_choice]
             permutations = [p for p in itertools.permutations(perm_range)]
 
@@ -3163,7 +3171,7 @@
         limit = min(len(permutations), testGen.args.num_rand_permutations)
 
         # Get random permutation generator that uses all permutations
-        random_permutations = testGen.rng.permutation(permutations)
+        random_permutations = rng.permutation(permutations)
 
         # Create list of required amount of permutations
         arg_list = [
@@ -3183,7 +3191,7 @@
         return arg_list
 
     @staticmethod
-    def agSlice(testGen, opName, shapeList, dtype, error_name=None):
+    def agSlice(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         ifm_shape = shapeList[0]
@@ -3197,8 +3205,8 @@
 
             for i in range(rank):
                 if ifm_shape[i] > 1:
-                    start.append(testGen.randInt(0, ifm_shape[i]))
-                    size.append(testGen.randInt(0, ifm_shape[i] - start[i]))
+                    start.append(rng.randInt(0, ifm_shape[i]))
+                    size.append(rng.randInt(0, ifm_shape[i] - start[i]))
 
                     # Invalid slice size?
                     if size[i] == 0:
@@ -3210,7 +3218,7 @@
             if valid:
                 # If ERROR_IF test required then incorrect start, size will be returned
                 start, size = TosaErrorIfArgGen.eiSliceErrorIf(
-                    testGen, error_name, ifm_shape, start, size
+                    rng, error_name, ifm_shape, start, size
                 )
                 arg_list.append(("perm{}".format(p), {"start": start, "size": size}))
         # Now add data generator types
@@ -3226,7 +3234,7 @@
         return arg_list
 
     @staticmethod
-    def agTile(testGen, opName, shapeList, dtype, error_name=None):
+    def agTile(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         ifm_shape = shapeList[0]
@@ -3246,7 +3254,7 @@
                 elif max(ifm_shape) > 1000:
                     multiples.append(2)
                 else:
-                    multiples.append(testGen.randInt(1, 4))
+                    multiples.append(rng.randInt(1, 4))
             arg_list.append(("perm{}".format(p), {"multiples": multiples}))
 
         # Now add data generator types
@@ -3262,15 +3270,15 @@
         return arg_list
 
     @staticmethod
-    def agResize(testGen, opName, shapeList, dtype, error_name=None):
+    def agResize(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
         ifm_shape = shapeList[0]
 
         def get_aspect_ratio_resize_params():
             common_aspect_ratios = ((3, 2), (16, 9), (4, 3))
-            aspect_ratio = testGen.rng.choice(common_aspect_ratios)
-            invert = testGen.rng.choice((False, True))
-            letterbox = testGen.rng.choice((False, True))
+            aspect_ratio = rng.choice(common_aspect_ratios)
+            invert = rng.choice((False, True))
+            letterbox = rng.choice((False, True))
 
             scale_y_n = aspect_ratio[0] if invert else aspect_ratio[1]
             scale_x_n = aspect_ratio[1] if invert else aspect_ratio[0]
@@ -3279,13 +3287,13 @@
 
             if letterbox:
                 max_border = scale_y_n
-                border_y = testGen.randInt(low=0, high=max_border)
+                border_y = rng.randInt(low=0, high=max_border)
                 border_x = 0
             else:
                 # Pillarboxing
                 border_y = 0
                 max_border = scale_x_n
-                border_x = testGen.randInt(low=0, high=max_border)
+                border_x = rng.randInt(low=0, high=max_border)
 
             scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
             offset = (offset_y, offset_x)
@@ -3296,13 +3304,13 @@
         def get_upscale_downscale_params():
             valid_params = False
             while not valid_params:
-                upscale = testGen.rng.choice((False, True))
+                upscale = rng.choice((False, True))
 
                 # True if sampling begins from (0,0). Otherwise (-0.5,-0.5)
-                origin_sampling = testGen.rng.choice((False, True))
+                origin_sampling = rng.choice((False, True))
 
                 if upscale:
-                    shift = testGen.randInt(low=1, high=4)
+                    shift = rng.randInt(low=1, high=4)
                     scale_x_d = scale_y_d = 1
                     scale_x_n = scale_y_n = (
                         1 << shift if origin_sampling else 2 << shift
@@ -3328,16 +3336,16 @@
                     if not valid_scale_y_ds:
                         scale_y_d = 1
                     else:
-                        scale_y_d = testGen.rng.choice(valid_scale_y_ds)
+                        scale_y_d = rng.choice(valid_scale_y_ds)
 
                     if not valid_scale_x_ds:
                         scale_x_d = 1
                     else:
-                        scale_x_d = testGen.rng.choice(valid_scale_x_ds)
+                        scale_x_d = rng.choice(valid_scale_x_ds)
 
                     border_x = border_y = 0
-                    offset_y = testGen.randInt(0, 16 * scale_y_n)
-                    offset_x = testGen.randInt(0, 16 * scale_x_n)
+                    offset_y = rng.randInt(0, 16 * scale_y_n)
+                    offset_x = rng.randInt(0, 16 * scale_x_n)
                 valid_params = True
 
             scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
@@ -3356,11 +3364,11 @@
                 return scale_d
 
             # Scale
-            scale_y_n = testGen.randInt(low=1, high=(1 << 11))
-            scale_x_n = testGen.randInt(low=1, high=(1 << 11))
+            scale_y_n = rng.randInt(low=1, high=(1 << 11))
+            scale_x_n = rng.randInt(low=1, high=(1 << 11))
 
-            scale_y_d = testGen.randInt(low=1, high=(16 * scale_y_n))
-            scale_x_d = testGen.randInt(low=1, high=(16 * scale_x_n))
+            scale_y_d = rng.randInt(low=1, high=(16 * scale_y_n))
+            scale_x_d = rng.randInt(low=1, high=(16 * scale_x_n))
 
             scale_y_d = fix_scale_to_max_scale(
                 scale_y_n, scale_y_d, testGen.TOSA_8K_LEVEL_MAX_SCALE
@@ -3370,10 +3378,10 @@
             )
 
             # Offsets and border within the scale
-            offset_y = testGen.randInt(low=-scale_y_n, high=(16 * scale_y_n))
-            offset_x = testGen.randInt(low=-scale_x_n, high=(16 * scale_x_n))
-            border_y = testGen.randInt(low=(-16 * scale_y_n), high=scale_y_n)
-            border_x = testGen.randInt(low=(-16 * scale_x_n), high=scale_x_n)
+            offset_y = rng.randInt(low=-scale_y_n, high=(16 * scale_y_n))
+            offset_x = rng.randInt(low=-scale_x_n, high=(16 * scale_x_n))
+            border_y = rng.randInt(low=(-16 * scale_y_n), high=scale_y_n)
+            border_x = rng.randInt(low=(-16 * scale_x_n), high=scale_x_n)
 
             scale = (scale_y_n, scale_y_d, scale_x_n, scale_x_d)
             offset = (offset_y, offset_x)
@@ -3382,24 +3390,24 @@
 
         def get_level_8k_params():
             # Create 64x scale - 64/1 to 2048/32
-            scale_d = testGen.randInt(
+            scale_d = rng.randInt(
                 low=1, high=(1 << 11) / testGen.TOSA_8K_LEVEL_MAX_SCALE
             )
             scale_n = scale_d * testGen.TOSA_8K_LEVEL_MAX_SCALE
             # Create half to fifth scaling
-            scale_d_alt = testGen.randInt(low=2, high=6)
+            scale_d_alt = rng.randInt(low=2, high=6)
             scale_n_alt = 1
-            switch = testGen.rng.choice((False, True))
+            switch = rng.choice((False, True))
             if switch:
                 scale = (scale_n_alt, scale_d_alt, scale_n, scale_d)
             else:
                 scale = (scale_n, scale_d, scale_n_alt, scale_d_alt)
 
-            offset_y = testGen.rng.choice((-scale[0], 0, (16 * scale[0]) - 1))
-            offset_x = testGen.rng.choice((-scale[2], 0, (16 * scale[2]) - 1))
+            offset_y = rng.choice((-scale[0], 0, (16 * scale[0]) - 1))
+            offset_x = rng.choice((-scale[2], 0, (16 * scale[2]) - 1))
             offset = (offset_y, offset_x)
-            border_y = testGen.rng.choice((-16 * scale[0], 0, scale[0] - 1))
-            border_x = testGen.rng.choice((-16 * scale[2], 0, scale[2] - 1))
+            border_y = rng.choice((-16 * scale[0], 0, scale[0] - 1))
+            border_x = rng.choice((-16 * scale[2], 0, scale[2] - 1))
             border = (border_y, border_x)
             return scale, offset, border
 
@@ -3437,7 +3445,7 @@
                 while perm < testGen.args.num_rand_permutations:
                     # Random choice of type of params we are testing
                     if not testGen.args.level8k:
-                        _rnd_param_fn = testGen.rng.choice(
+                        _rnd_param_fn = rng.choice(
                             (
                                 get_rand_params,
                                 get_upscale_downscale_params,
@@ -3541,7 +3549,7 @@
                             border,
                             outputDTypeNew,
                         ) = TosaErrorIfArgGen.eiResizeErrorIf(
-                            testGen,
+                            rng,
                             error_name,
                             mode,
                             dtype,
@@ -3596,17 +3604,13 @@
         return arg_list
 
     @staticmethod
-    def agTable(testGen, opName, shapeList, dtype, error_name=None):
+    def agTable(testGen, rng, opName, shapeList, dtype, error_name=None):
         arg_list = []
 
         if dtype == DType.INT8:
-            table = np.int32(
-                testGen.rng.integers(low=-128, high=128, size=[256])
-            ).tolist()
+            table = np.int32(rng.integers(low=-128, high=128, size=[256])).tolist()
         else:  # INT16
-            table = np.int32(
-                testGen.rng.integers(low=-32768, high=32768, size=[513])
-            ).tolist()
+            table = np.int32(rng.integers(low=-32768, high=32768, size=[513])).tolist()
             # Make sure all slopes are within REQUIRE min/max 16-bit int
             for idx in range(len(table) - 1):
                 slope = table[idx + 1] - table[idx]
@@ -3635,7 +3639,7 @@
         # Return list of tuples: (arg_str, args_dict)
         return arg_list
 
-    def agCondIf(testGen, opName, shapeList, dtype, error_name=None):
+    def agCondIf(testGen, rng, opName, shapeList, dtype, error_name=None):
         # CondIf generates the condition values here.
         # Convert to tensors in the build function, along with the
         # then and else blocks
@@ -3656,7 +3660,7 @@
         # Return list of tuples: (arg_str, args_dict)
         return arg_list
 
-    def agWhileLoop(testGen, opName, shapeList, dtype, error_name=None):
+    def agWhileLoop(testGen, rng, opName, shapeList, dtype, error_name=None):
         # While loop: 0 iterations, 1, more than 1
         arg_list = []
 
diff --git a/verif/generator/tosa_error_if.py b/verif/generator/tosa_error_if.py
index 3972edd..e557f06 100644
--- a/verif/generator/tosa_error_if.py
+++ b/verif/generator/tosa_error_if.py
@@ -94,7 +94,7 @@
 class TosaErrorIfArgGen:
     @staticmethod
     def eiResizeErrorIf(
-        testGen,
+        rng,
         error_name,
         mode,
         dtype,
@@ -105,28 +105,28 @@
         border,
     ):
         if error_name == ErrorIf.ScaleSmallerEqualZero:
-            index = testGen.randInt(low=0, high=4)
-            scale[index] = testGen.rng.choice([-2, -1, 0])
+            index = rng.randInt(low=0, high=4)
+            scale[index] = rng.choice([-2, -1, 0])
         elif error_name == ErrorIf.ScaleNLargerMax:
-            index = testGen.rng.choice([0, 2])
-            scale[index] = (1 << 11) + testGen.rng.choice([1, 2, 3])
+            index = rng.choice([0, 2])
+            scale[index] = (1 << 11) + rng.choice([1, 2, 3])
         elif error_name == ErrorIf.ScaleDLargerMax:
-            index = testGen.rng.choice([1, 3])
-            scale[index] = 16 * scale[index - 1] + testGen.rng.choice([0, 1, 2])
+            index = rng.choice([1, 3])
+            scale[index] = 16 * scale[index - 1] + rng.choice([0, 1, 2])
 
         if error_name == ErrorIf.OffsetLargerEqualMax:
-            index = testGen.rng.choice([0, 1])
-            offset[index] = 16 * scale[index * 2] + testGen.rng.choice([0, 1, 2])
+            index = rng.choice([0, 1])
+            offset[index] = 16 * scale[index * 2] + rng.choice([0, 1, 2])
         elif error_name == ErrorIf.OffsetSmallerMin:
-            index = testGen.rng.choice([0, 1])
-            offset[index] = -scale[index * 2] - testGen.rng.choice([1, 2, 3])
+            index = rng.choice([0, 1])
+            offset[index] = -scale[index * 2] - rng.choice([1, 2, 3])
 
         if error_name == ErrorIf.BorderLargerEqualMax:
-            index = testGen.rng.choice([0, 1])
-            border[index] = scale[index * 2] + testGen.rng.choice([0, 1, 2])
+            index = rng.choice([0, 1])
+            border[index] = scale[index * 2] + rng.choice([0, 1, 2])
         elif error_name == ErrorIf.BorderSmallerMin:
-            index = testGen.rng.choice([0, 1])
-            border[index] = -16 * scale[index * 2] - testGen.rng.choice([1, 2, 3])
+            index = rng.choice([0, 1])
+            border[index] = -16 * scale[index * 2] - rng.choice([1, 2, 3])
 
         if error_name == ErrorIf.WrongOutputType:
             if mode == ResizeMode.NEAREST and dtype == DType.INT8:
@@ -192,12 +192,12 @@
                     DType.INT48,
                     DType.FP16,
                 )
-            outputDType = testGen.rng.choice(a=incorrect_types)
+            outputDType = rng.choice(a=incorrect_types)
 
         return scale, offset, border, outputDType
 
     @staticmethod
-    def eiPoolingErrorIf(testGen, error_name, stride, pad, kernel):
+    def eiPoolingErrorIf(rng, error_name, stride, pad, kernel):
         if (
             error_name == ErrorIf.StrideSmallerOne
             # padding must not exceed the kernel size
@@ -207,30 +207,30 @@
             and pad[3] < kernel[1]
         ):
             wrongStride = (
-                testGen.rng.choice([0, -1, -2, -3]),
-                testGen.rng.choice([0, -1, -2, -3]),
+                rng.choice([0, -1, -2, -3]),
+                rng.choice([0, -1, -2, -3]),
             )
             return wrongStride, pad, kernel
         elif error_name == ErrorIf.PadSmallerZero:
             wrongPad = (
-                testGen.rng.choice([-1, -2, -3]),
-                testGen.rng.choice([-1, -2, -3]),
-                testGen.rng.choice([-1, -2, -3]),
-                testGen.rng.choice([-1, -2, -3]),
+                rng.choice([-1, -2, -3]),
+                rng.choice([-1, -2, -3]),
+                rng.choice([-1, -2, -3]),
+                rng.choice([-1, -2, -3]),
             )
             return stride, wrongPad, kernel
         elif error_name == ErrorIf.KernelSmallerOne:
             wrongKernel = (
-                testGen.rng.choice([0, -1, -2, -3]),
-                testGen.rng.choice([0, -1, -2, -3]),
+                rng.choice([0, -1, -2, -3]),
+                rng.choice([0, -1, -2, -3]),
             )
             return stride, pad, wrongKernel
         elif error_name == ErrorIf.PadLargerEqualKernel:
             wrongPad = (
-                testGen.rng.choice([kernel[0], kernel[0] + 1, kernel[0] + 2]),
-                testGen.rng.choice([kernel[0], kernel[0] + 1, kernel[0] + 2]),
-                testGen.rng.choice([kernel[1], kernel[1] + 1, kernel[1] + 2]),
-                testGen.rng.choice([kernel[1], kernel[1] + 1, kernel[1] + 2]),
+                rng.choice([kernel[0], kernel[0] + 1, kernel[0] + 2]),
+                rng.choice([kernel[0], kernel[0] + 1, kernel[0] + 2]),
+                rng.choice([kernel[1], kernel[1] + 1, kernel[1] + 2]),
+                rng.choice([kernel[1], kernel[1] + 1, kernel[1] + 2]),
             )
             return stride, wrongPad, kernel
         else:
@@ -265,16 +265,16 @@
         return False
 
     @staticmethod
-    def eiInvalidateInputOutputList(testGen, error_name, input_list, output_list):
+    def eiInvalidateInputOutputList(rng, error_name, input_list, output_list):
         # Mess up input/output tensors for ERROR_IF checks
         if error_name == "WrongInputList":
-            add_input = testGen.rng.choice([True, False])
+            add_input = rng.choice([True, False])
             if add_input:
                 input_list.append("eiDummyInput")
             else:
                 input_list = input_list[:-1]
         elif error_name == "WrongOutputList":
-            add_output = testGen.rng.choice([True, False])
+            add_output = rng.choice([True, False])
             if add_output:
                 output_list.append("eiDummyOutput")
             else:
@@ -291,25 +291,25 @@
             new_shape = [max(d - 1, 1) for d in new_shape]
         return new_shape
 
-    def eiSliceErrorIf(testGen, error_name, input_shape, start, size):
+    def eiSliceErrorIf(rng, error_name, input_shape, start, size):
         if error_name == ErrorIf.StartSmallerZero:
             newStart = []
             for i in range(len(input_shape)):
-                newStart.append(testGen.rng.choice([-3, -2, -1]))
+                newStart.append(rng.choice([-3, -2, -1]))
             return newStart, size
         elif error_name == ErrorIf.SizeSmallerEqualZero:
             newSize = []
             for i in range(len(input_shape)):
-                newSize.append(testGen.rng.choice([-3, -2, -1, 0]))
+                newSize.append(rng.choice([-3, -2, -1, 0]))
             return start, newSize
         elif error_name == ErrorIf.StartSizeOutsideBounds:
             newStart, newSize = [], []
             for i in range(len(input_shape)):
                 newStart.append(input_shape[i] - 1)
-                newSize.append(testGen.rng.choice([2, 3, 4]))
+                newSize.append(rng.choice([2, 3, 4]))
             return newStart, newSize
         elif error_name == ErrorIf.InputSizeStartLengthMismatch:
-            remove = testGen.rng.choice([True, False])
+            remove = rng.choice([True, False])
 
             # Get an empty tensor when diminishing dimension on 1-d tensor.
             if len(start) == 1 or len(size) == 1:
@@ -328,9 +328,7 @@
             return start, size
 
     @staticmethod
-    def eiCastErrorIf(testGen, input_dtype):
-        # if input_dtype in [DType.BOOL, DType.FP32]:
-        #    outputDType = [DType.BOOL, DType.INT48, DType.FP32]
+    def eiCastErrorIf(input_dtype):
         if input_dtype in [DType.BOOL]:
             outputDType = [
                 DType.BOOL,
diff --git a/verif/generator/tosa_random_gen.py b/verif/generator/tosa_random_gen.py
new file mode 100644
index 0000000..ae8ae5c
--- /dev/null
+++ b/verif/generator/tosa_random_gen.py
@@ -0,0 +1,174 @@
+# Copyright (c) 2024, ARM Limited.
+# SPDX-License-Identifier: Apache-2.0
+import hashlib
+import logging
+
+import generator.tosa_utils as gtu
+import numpy as np
+from tosa.DType import DType
+
+logging.basicConfig()
+logger = logging.getLogger("tosa_verif_build_tests")
+
+
+class TosaRandomGenerator(np.random.Generator):
+    """Equivalent to numpy.default_rng, with support for TOSA data types"""
+
+    def __init__(self, seed, restrict_range_by_type={}):
+        """Create random generator with TOSA type support.
+
+        seed: integer seed
+        restrict_range_by_type: see TosaHashRandomGenerator.__init__()
+        """
+        self._restrict_range_by_type = restrict_range_by_type
+        self._seed = int(seed)
+        self._bitgen = np.random.PCG64(self._seed)
+        super().__init__(self._bitgen)
+
+    @property
+    def seed(self):
+        return self._seed
+
+    @property
+    def hexSeed(self):
+        return hex(self._seed)
+
+    def dTypeRange(self, dtype, high_inclusive=False):
+        """Returns range tuple for given dtype.
+
+        dtype: DType
+        high_inclusive: True for inclusive high values
+        Returns: dtype value range boundaries tuple (low, high)
+            The high boundary is excluded in the range unless high_inclusive is True
+        """
+        if dtype in self._restrict_range_by_type:
+            rng = self._restrict_range_by_type[dtype]
+        elif dtype == DType.BOOL:
+            rng = (0, 2)
+        elif dtype == DType.UINT8:
+            rng = (0, 256)
+        elif dtype == DType.UINT16:
+            rng = (0, 65536)
+        elif dtype == DType.INT4:
+            # TOSA specific INT4 weight range from -7 to 7
+            rng = (-7, 8)
+        elif dtype == DType.INT8:
+            rng = (-128, 128)
+        elif dtype == DType.INT16:
+            rng = (-32768, 32768)
+        elif dtype == DType.INT32:
+            rng = (-(1 << 31), (1 << 31))
+        elif dtype == DType.INT48:
+            rng = (-(1 << 47), (1 << 47))
+        else:
+            # Float types and SHAPE should be in _restrict_range_by_type dict
+            raise Exception("Unknown supported dtype: {}".format(dtype))
+
+        if dtype in (DType.FP16, DType.BF16, DType.FP32, DType.FP8E4M3, DType.FP8E5M2):
+            # Floating point - range is always inclusive
+            return rng
+        else:
+            # Integer
+            if not high_inclusive:
+                # Exclusive high: low <= range < high
+                return rng
+            else:
+                # Inclusive range: low <= range <= high
+                return (rng[0], rng[1] - 1)
+
+    def randInt(self, low=0, high=256):
+        return np.int32(self.integers(low=low, high=high, size=1))[0]
+
+    def randNumberDType(self, dtype):
+        low, high = self.dTypeRange(dtype)
+
+        if dtype == DType.FP32:
+            return np.float32(self.uniform(low=low, high=high))
+        elif dtype == DType.FP16:
+            return np.float16(self.uniform(low=low, high=high))
+        elif dtype == DType.BF16:
+            rand_f32 = np.float32(self.uniform(low=low, high=high))
+            return gtu.vect_f32_to_bf16(rand_f32)
+        elif dtype == DType.FP8E4M3:
+            rand_f32 = np.float32(self.uniform(low=low, high=high))
+            return gtu.vect_f32_to_fp8e4m3(rand_f32)
+        elif dtype == DType.FP8E5M2:
+            rand_f32 = np.float32(self.uniform(low=low, high=high))
+            return gtu.vect_f32_to_fp8e5m2(rand_f32)
+        elif dtype == DType.BOOL:
+            return self.choice([False, True])
+        elif dtype == DType.INT48 or dtype == DType.SHAPE:
+            # Special size
+            return np.int64(self.integers(low, high, size=1))[0]
+
+        return np.int32(self.integers(low, high, size=1))[0]
+
+    def randTensor(self, shape, dtype, data_range=None):
+        if data_range is None:
+            low, high = self.dTypeRange(dtype)
+        else:
+            low, high = data_range
+
+        if dtype == DType.BOOL:
+            return np.bool_(self.choice(a=[False, True], size=shape))
+        elif dtype == DType.INT4:
+            return np.int8(self.integers(low=low, high=high, size=shape))
+        elif dtype == DType.INT8:
+            return np.int8(self.integers(low=low, high=high, size=shape))
+        elif dtype == DType.UINT8:
+            return np.uint8(self.integers(low=low, high=high, size=shape))
+        elif dtype == DType.INT16:
+            return np.int16(self.integers(low=low, high=high, size=shape))
+        elif dtype == DType.UINT16:
+            return np.uint16(self.integers(low=low, high=high, size=shape))
+        elif dtype in (DType.INT48, DType.SHAPE):
+            return np.int64(self.integers(low=low, high=high, size=shape))
+        elif dtype in (
+            DType.FP16,
+            DType.BF16,
+            DType.FP32,
+            DType.FP8E4M3,
+            DType.FP8E5M2,
+        ):
+            f_tensor = self.uniform(low=low, high=high, size=shape)
+
+            if dtype == DType.FP16:
+                return np.float16(f_tensor)
+            else:
+                f32_tensor = np.float32(f_tensor)
+                if dtype == DType.BF16:
+                    # Floor the last 16 bits of each f32 value
+                    return np.float32(gtu.vect_f32_to_bf16(f32_tensor))
+                elif dtype == DType.FP8E4M3:
+                    return np.float32(gtu.vect_f32_to_fp8e4m3(f32_tensor))
+                elif dtype == DType.FP8E5M2:
+                    return np.float32(gtu.vect_f32_to_fp8e5m2(f32_tensor))
+                else:
+                    return f32_tensor
+        else:
+            # All other integer types
+            return np.int32(self.integers(low=low, high=high, size=shape))
+
+
+class TosaHashRandomGenerator(TosaRandomGenerator):
+    """Hash seeded TOSA random number generator."""
+
+    def __init__(self, seed, seed_list, restrict_range_by_type={}):
+        """Create TOSA random generator seeding it with a hashable list.
+
+        seed: integer starting seed
+        seed_list: list of hashable items to add to starting seed
+        restrict_range_by_type: dictionary of DTypes with (low, high) range tuples
+            This must contain entries for SHAPE and all Floating Point data types.
+            NOTE: For integers, the high value must be the exclusive value
+        """
+        # Convert seed_list to strings
+        seed_strings_list = [str(s) for s in seed_list]
+        # Create a single string and create hash
+        self._seed_string = "__".join(seed_strings_list)
+        self._hash = hashlib.md5(bytes(self._seed_string, "utf-8"))
+        # Add the hash value to the given seed
+        seed += int(self._hash.hexdigest(), 16)
+
+        logger.debug(f"Seed={seed} Seed string={self._seed_string}")
+        super().__init__(seed, restrict_range_by_type)
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 3173906..7702753 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -20,6 +20,8 @@
 from generator.tosa_error_if import TosaErrorIfArgGen
 from generator.tosa_error_if import TosaErrorValidator
 from generator.tosa_error_if import TosaInvalidValidator
+from generator.tosa_random_gen import TosaHashRandomGenerator
+from generator.tosa_random_gen import TosaRandomGenerator
 from schemavalidation.schemavalidation import TestDescSchemaValidator
 from tosa.DType import DType
 from tosa.Op import Op
@@ -50,10 +52,10 @@
         self.basePath = args.output_dir
         self.random_seed = args.random_seed
         self.ser = None
-        self.rng = np.random.default_rng(self.random_seed)
         self.createDynamicOpLists()
         self.initOpListDefaults()
         self.quantGen = TosaQuantGen()
+        self.global_rng = None
         # Force makeShape to do a specific starting shape
         self.targetted_shape = None
         # JSON schema validation
@@ -80,12 +82,18 @@
                 vals.append(v)
             return tuple(sorted(vals))
 
-        self.random_float_range = {}
+        self.random_dtype_range = {
+            DType.SHAPE: tuple(self.args.tensor_shape_range[0:2])
+        }
         for dtype in (DType.FP32, DType.FP16, DType.BF16, DType.FP8E4M3, DType.FP8E5M2):
-            self.random_float_range[dtype] = convertFPRange(
+            self.random_dtype_range[dtype] = convertFPRange(
                 args.tensor_fp_value_range,
                 TosaTensorValuesGen.TVG_FLOAT_HIGH_VALUE[dtype],
             )
+        self.resetGlobalRNG()
+
+    def resetGlobalRNG(self):
+        self.global_rng = TosaRandomGenerator(self.random_seed, self.random_dtype_range)
 
     def createSerializer(self, opName, testPath):
         self.testPath = os.path.join(opName, testPath)
@@ -148,93 +156,7 @@
         with path_desc.open("w") as fd:
             json.dump(desc, fd, indent=1)
 
-    def resetRNG(self, seed=None):
-        if seed is None:
-            seed = self.random_seed + 1
-        self.rng = np.random.default_rng(seed)
-
-    def getDTypeRange(self, dtype, high_inclusive=False):
-        # Returns dtype value range boundaries (low, high)
-        # The high boundary is excluded in the range
-        # unless high_inclusive is True
-        if dtype in (DType.FP32, DType.FP16, DType.BF16, DType.FP8E4M3, DType.FP8E5M2):
-            return self.random_float_range[dtype]
-        elif dtype == DType.BOOL:
-            rng = (0, 2)
-        elif dtype == DType.UINT8:
-            rng = (0, 256)
-        elif dtype == DType.UINT16:
-            rng = (0, 65536)
-        elif dtype == DType.INT4:
-            # TOSA specific INT4 weight range from -7 to 7
-            rng = (-7, 8)
-        elif dtype == DType.INT8:
-            rng = (-128, 128)
-        elif dtype == DType.INT16:
-            rng = (-32768, 32768)
-        elif dtype == DType.INT32:
-            rng = (-(1 << 31), (1 << 31))
-        elif dtype == DType.SHAPE:
-            rng = tuple(self.args.tensor_shape_range[0:2])
-        elif dtype == DType.INT48:
-            rng = (-(1 << 47), (1 << 47))
-        else:
-            raise Exception("Unknown dtype: {}".format(dtype))
-
-        if not high_inclusive:
-            # Exclusive high: low <= range < high
-            return rng
-        else:
-            # Inclusive range: low <= range <= high
-            return (rng[0], rng[1] - 1)
-
-    def getRandTensor(self, shape, dtype, data_range=None):
-        if data_range is None:
-            low, high = self.getDTypeRange(dtype)
-        else:
-            low, high = data_range
-
-        if dtype == DType.BOOL:
-            return np.bool_(self.rng.choice(a=[False, True], size=shape))
-        elif dtype == DType.INT4:
-            return np.int8(self.rng.integers(low=low, high=high, size=shape))
-        elif dtype == DType.INT8:
-            return np.int8(self.rng.integers(low=low, high=high, size=shape))
-        elif dtype == DType.UINT8:
-            return np.uint8(self.rng.integers(low=low, high=high, size=shape))
-        elif dtype == DType.INT16:
-            return np.int16(self.rng.integers(low=low, high=high, size=shape))
-        elif dtype == DType.UINT16:
-            return np.uint16(self.rng.integers(low=low, high=high, size=shape))
-        elif dtype in (DType.INT48, DType.SHAPE):
-            return np.int64(self.rng.integers(low=low, high=high, size=shape))
-        elif dtype in (
-            DType.FP16,
-            DType.BF16,
-            DType.FP32,
-            DType.FP8E4M3,
-            DType.FP8E5M2,
-        ):
-            f_tensor = self.rng.uniform(low=low, high=high, size=shape)
-
-            if dtype == DType.FP16:
-                return np.float16(f_tensor)
-            else:
-                f32_tensor = np.float32(f_tensor)
-                if dtype == DType.BF16:
-                    # Floor the last 16 bits of each f32 value
-                    return np.float32(gtu.vect_f32_to_bf16(f32_tensor))
-                elif dtype == DType.FP8E4M3:
-                    return np.float32(gtu.vect_f32_to_fp8e4m3(f32_tensor))
-                elif dtype == DType.FP8E5M2:
-                    return np.float32(gtu.vect_f32_to_fp8e5m2(f32_tensor))
-                else:
-                    return f32_tensor
-        else:
-            # All other integer types
-            return np.int32(self.rng.integers(low=low, high=high, size=shape))
-
-    def buildPlaceholderTensors(self, shape_list, dtype_list):
+    def buildPlaceholderTensors(self, rng, shape_list, dtype_list):
         placeholders = []
 
         assert len(shape_list) == len(dtype_list)
@@ -242,12 +164,12 @@
         arr = None
         for idx, shape in enumerate(shape_list):
             if not self.args.lazy_data_gen:
-                arr = self.getRandTensor(shape, dtype_list[idx])
+                arr = rng.randTensor(shape, dtype_list[idx])
             placeholders.append(self.ser.addPlaceholder(shape, dtype_list[idx], arr))
 
         return placeholders
 
-    def buildConstTensors(self, shape_list, dtype_list):
+    def buildConstTensors(self, rng, shape_list, dtype_list):
         consts = []
 
         assert len(shape_list) == len(dtype_list)
@@ -255,16 +177,16 @@
         arr = None
         for idx, shape in enumerate(shape_list):
             if not self.args.lazy_data_gen:
-                arr = self.getRandTensor(shape, dtype_list[idx])
+                arr = rng.randTensor(shape, dtype_list[idx])
             consts.append(self.ser.addConst(shape, dtype_list[idx], arr))
 
         return consts
 
-    def makeShape(self, rank):
+    def makeShape(self, rng, rank):
         if self.targetted_shape:
             return np.int32(self.targetted_shape)
         return np.int32(
-            self.rng.integers(
+            rng.integers(
                 low=self.args.tensor_shape_range[0],
                 high=self.args.tensor_shape_range[1],
                 size=rank,
@@ -274,33 +196,6 @@
     def setTargetShape(self, shape):
         self.targetted_shape = shape
 
-    def randInt(self, low=0, high=256):
-        return np.int32(self.rng.integers(low=low, high=high, size=1))[0]
-
-    def getRandNumberDType(self, dtype):
-        low, high = self.getDTypeRange(dtype)
-
-        if dtype == DType.FP32:
-            return np.float32(self.rng.uniform(low=low, high=high))
-        elif dtype == DType.FP16:
-            return np.float16(self.rng.uniform(low=low, high=high))
-        elif dtype == DType.BF16:
-            rand_f32 = np.float32(self.rng.uniform(low=low, high=high))
-            return gtu.vect_f32_to_bf16(rand_f32)
-        elif dtype == DType.FP8E4M3:
-            rand_f32 = np.float32(self.rng.uniform(low=low, high=high))
-            return gtu.vect_f32_to_fp8e4m3(rand_f32)
-        elif dtype == DType.FP8E5M2:
-            rand_f32 = np.float32(self.rng.uniform(low=low, high=high))
-            return gtu.vect_f32_to_fp8e5m2(rand_f32)
-        elif dtype == DType.BOOL:
-            return self.rng.choice([False, True])
-        elif dtype == DType.INT48 or dtype == DType.SHAPE:
-            # Special size
-            return np.int64(self.rng.integers(low, high, size=1))[0]
-
-        return np.int32(self.rng.integers(low, high, size=1))[0]
-
     def shapeStr(self, shape):
 
         sStr = []
@@ -330,8 +225,8 @@
             shape[0] = min(shape[0], self.args.max_batch_size)
         return shape
 
-    def makeDimension(self):
-        return self.randInt(
+    def makeDimension(self, rng):
+        return rng.randInt(
             low=self.args.tensor_shape_range[0], high=self.args.tensor_shape_range[1]
         )
 
@@ -445,11 +340,18 @@
                 return compliance
 
     def build_unary(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 1
         a = inputs[0]
-        result_tensor = OutputShaper.unaryOp(self.ser, self.rng, a, error_name)
+        result_tensor = OutputShaper.unaryOp(self.ser, rng, a, error_name)
 
         assert not isinstance(op, int)
 
@@ -457,8 +359,10 @@
         if error_name == ErrorIf.WrongOutputType:
             if result_tensor.dtype not in [DType.INT8, DType.UINT8]:
                 qinfo = [
-                    TosaQuantGen.getZeroPoint(self, a.dtype),
-                    TosaQuantGen.getZeroPoint(self, result_tensor.dtype),
+                    TosaQuantGen.getZeroPoint(rng, self.args.zeropoint, a.dtype),
+                    TosaQuantGen.getZeroPoint(
+                        rng, self.args.zeropoint, result_tensor.dtype
+                    ),
                 ]
 
         # Invalidate Input/Output list for error if checks.
@@ -467,7 +371,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -498,13 +402,11 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_binary_broadcast(
-        self, op, inputs, args_dict, validator_fcns, error_name=None, qinfo=None
+        self, rng, op, inputs, args_dict, validator_fcns, error_name=None, qinfo=None
     ):
         assert len(inputs) == 2
         a, b = inputs
-        result_tensor = OutputShaper.binaryBroadcastOp(
-            self.ser, self.rng, a, b, error_name
-        )
+        result_tensor = OutputShaper.binaryBroadcastOp(self.ser, rng, a, b, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name, b.name]
@@ -512,7 +414,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -539,20 +441,20 @@
 
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
-    def build_binary_nonbroadcast(self, op, a, b, validator_fcns=None, error_name=None):
-        result_tens = OutputShaper.binaryNonBroadcastOp(self.ser, a, b)
-        self.ser.addOperator(op["op"], [a.name, b.name], [result_tens.name])
-        return result_tens
-
     def build_arithmetic_right_shift(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 2
         a, b = inputs
         round = args_dict["round"]
-        result_tensor = OutputShaper.binaryBroadcastOp(
-            self.ser, self.rng, a, b, error_name
-        )
+        result_tensor = OutputShaper.binaryBroadcastOp(self.ser, rng, a, b, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name, b.name]
@@ -560,7 +462,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -591,15 +493,20 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_mul(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         # Note that mul is binary operator but it has a shift value tensor
         assert len(inputs) == 3
         a, b, s = inputs
 
-        result_tensor = OutputShaper.binaryBroadcastOp(
-            self.ser, self.rng, a, b, error_name
-        )
+        result_tensor = OutputShaper.binaryBroadcastOp(self.ser, rng, a, b, error_name)
 
         # Special for multiply: Force the result to INT32 for INT types
         if a.dtype not in (DType.FP16, DType.BF16, DType.FP32):
@@ -607,7 +514,7 @@
 
         if error_name == ErrorIf.WrongOutputType:
             all_dtypes = [DType.INT8, DType.INT16, DType.INT48]
-            outputDType = self.rng.choice(all_dtypes)
+            outputDType = rng.choice(all_dtypes)
             result_tensor.setDtype(outputDType)
 
         # Invalidate Input/Output list for error if checks.
@@ -616,7 +523,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -644,12 +551,19 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_table(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 1
         a = inputs[0]
         table = args_dict["table"]
-        result_tensor = OutputShaper.tableOp(self.ser, self.rng, a, error_name)
+        result_tensor = OutputShaper.tableOp(self.ser, rng, a, error_name)
 
         attr = ts.TosaSerializerAttribute()
         attr.TableAttribute(table)
@@ -660,7 +574,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -687,14 +601,19 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_select(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 3
         cond, a, b = inputs
 
-        result_tensor = OutputShaper.selectOp(
-            self.ser, self.rng, cond, a, b, error_name
-        )
+        result_tensor = OutputShaper.selectOp(self.ser, rng, cond, a, b, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [cond.name, a.name, b.name]
@@ -702,7 +621,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -735,14 +654,19 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_comparison(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 2
         a, b = inputs
 
-        result_tensor = OutputShaper.binaryComparisonOp(
-            self.ser, self.rng, a, b, error_name
-        )
+        result_tensor = OutputShaper.binaryComparisonOp(self.ser, rng, a, b, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name, b.name]
@@ -750,7 +674,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -783,12 +707,12 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_argmax(
-        self, op, inputs, args_dict, validator_fcns, error_name, qinfo=None
+        self, rng, op, inputs, args_dict, validator_fcns, error_name, qinfo=None
     ):
         assert len(inputs) == 1
         a = inputs[0]
         axis = args_dict["axis"]
-        result_tensor = OutputShaper.argmaxOp(self.ser, self.rng, a, axis, error_name)
+        result_tensor = OutputShaper.argmaxOp(self.ser, rng, a, axis, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
@@ -796,7 +720,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -828,6 +752,7 @@
 
     def build_pool2d(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -846,15 +771,17 @@
         kernel = args_dict["kernel"]
 
         result_tensor = OutputShaper.pool2dOp(
-            self.ser, self.rng, input, kernel, stride, pad, error_name
+            self.ser, rng, input, kernel, stride, pad, error_name
         )
 
         # Ensure new output type has correct qinfo
         if error_name == ErrorIf.WrongInputType:
             if input.dtype not in [DType.INT8, DType.UINT8]:
                 qinfo = [
-                    TosaQuantGen.getZeroPoint(self, input.dtype),
-                    TosaQuantGen.getZeroPoint(self, result_tensor.dtype),
+                    TosaQuantGen.getZeroPoint(rng, self.args.zeropoint, input.dtype),
+                    TosaQuantGen.getZeroPoint(
+                        rng, self.args.zeropoint, result_tensor.dtype
+                    ),
                 ]
 
         # Invalidate Input/Output list for error if checks.
@@ -863,7 +790,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -903,6 +830,7 @@
 
     def build_conv2d(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -920,7 +848,7 @@
         assert len(padding) == 4
         result_tensor = OutputShaper.conv2dOp(
             self.ser,
-            self.rng,
+            rng,
             ifm,
             filter,
             accum_dtype,
@@ -936,8 +864,10 @@
             DType.UINT8,
         ):
             qinfo = [
-                TosaQuantGen.getZeroPoint(self, ifm.dtype),
-                TosaQuantGen.getZeroPoint(self, result_tensor.dtype),
+                TosaQuantGen.getZeroPoint(rng, self.args.zeropoint, ifm.dtype),
+                TosaQuantGen.getZeroPoint(
+                    rng, self.args.zeropoint, result_tensor.dtype
+                ),
             ]
 
         # Invalidate Input/Output list for error_if checks.
@@ -945,7 +875,7 @@
         output_list = [result_tensor.name]
         num_operands = sum(op["operands"])
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -985,6 +915,7 @@
 
     def build_conv3d(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -1002,7 +933,7 @@
         assert len(padding) == 6
         result_tensor = OutputShaper.conv3dOp(
             self.ser,
-            self.rng,
+            rng,
             ifm,
             filter,
             accum_dtype,
@@ -1018,8 +949,10 @@
             DType.UINT8,
         ):
             qinfo = [
-                TosaQuantGen.getZeroPoint(self, ifm.dtype),
-                TosaQuantGen.getZeroPoint(self, result_tensor.dtype),
+                TosaQuantGen.getZeroPoint(rng, self.args.zeropoint, ifm.dtype),
+                TosaQuantGen.getZeroPoint(
+                    rng, self.args.zeropoint, result_tensor.dtype
+                ),
             ]
 
         # Invalidate Input/Output list for error_if checks.
@@ -1027,7 +960,7 @@
         output_list = [result_tensor.name]
         num_operands = sum(op["operands"])
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1067,6 +1000,7 @@
 
     def build_transpose_conv2d(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -1083,7 +1017,7 @@
 
         assert len(out_pad) == 4
         result_tensor = OutputShaper.transposeConv2DOp(
-            self.ser, self.rng, ifm, output_shape, accum_dtype, error_name
+            self.ser, rng, ifm, output_shape, accum_dtype, error_name
         )
 
         # Ensure new output type has correct qinfo
@@ -1092,8 +1026,10 @@
             DType.UINT8,
         ):
             qinfo = [
-                TosaQuantGen.getZeroPoint(self, ifm.dtype),
-                TosaQuantGen.getZeroPoint(self, result_tensor.dtype),
+                TosaQuantGen.getZeroPoint(rng, self.args.zeropoint, ifm.dtype),
+                TosaQuantGen.getZeroPoint(
+                    rng, self.args.zeropoint, result_tensor.dtype
+                ),
             ]
 
         # Invalidate Input/Output list for error_if checks.
@@ -1101,7 +1037,7 @@
         output_list = [result_tensor.name]
         num_operands = sum(op["operands"])
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1142,6 +1078,7 @@
 
     def build_depthwise_conv2d(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -1158,7 +1095,7 @@
 
         result_tensor = OutputShaper.depthwiseConv2dOp(
             self.ser,
-            self.rng,
+            rng,
             ifm,
             filter,
             accum_dtype,
@@ -1174,8 +1111,10 @@
             DType.UINT8,
         ):
             qinfo = [
-                TosaQuantGen.getZeroPoint(self, ifm.dtype),
-                TosaQuantGen.getZeroPoint(self, result_tensor.dtype),
+                TosaQuantGen.getZeroPoint(rng, self.args.zeropoint, ifm.dtype),
+                TosaQuantGen.getZeroPoint(
+                    rng, self.args.zeropoint, result_tensor.dtype
+                ),
             ]
 
         # Invalidate Input/Output list for error_if checks.
@@ -1183,7 +1122,7 @@
         output_list = [result_tensor.name]
         num_operands = sum(op["operands"])
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1223,6 +1162,7 @@
 
     def build_fully_connected(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -1235,7 +1175,7 @@
         accum_dtype = args_dict["acc_type"]
 
         result_tensor = OutputShaper.fullyConnectedOp(
-            self.ser, self.rng, ifm, filter, accum_dtype, error_name
+            self.ser, rng, ifm, filter, accum_dtype, error_name
         )
 
         # Invalidate Input/Output list for error if checks.
@@ -1244,7 +1184,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1278,13 +1218,20 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_matmul(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 2
         a, b = inputs
         accum_dtype = args_dict["acc_type"]
         result_tensor = OutputShaper.matmulOp(
-            self.ser, self.rng, a, b, accum_dtype, error_name
+            self.ser, rng, a, b, accum_dtype, error_name
         )
 
         # Invalidate Input/Output list for error if checks.
@@ -1293,7 +1240,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1328,12 +1275,12 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_reduce(
-        self, op, inputs, args_dict, validator_fcns, error_name=None, qinfo=None
+        self, rng, op, inputs, args_dict, validator_fcns, error_name=None, qinfo=None
     ):
         assert len(inputs) == 1
         a = inputs[0]
         axis = args_dict["axis"]
-        result_tensor = OutputShaper.reduceOp(self.ser, self.rng, a, axis, error_name)
+        result_tensor = OutputShaper.reduceOp(self.ser, rng, a, axis, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
@@ -1341,7 +1288,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1377,19 +1324,26 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_clamp(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 1
         a = inputs[0]
 
-        result_tensor = OutputShaper.unaryOp(self.ser, self.rng, a, error_name)
+        result_tensor = OutputShaper.unaryOp(self.ser, rng, a, error_name)
 
-        v = [self.getRandNumberDType(a.dtype), self.getRandNumberDType(a.dtype)]
+        v = [rng.randNumberDType(a.dtype), rng.randNumberDType(a.dtype)]
 
         if error_name == ErrorIf.MaxSmallerMin:
             # Make sure the numbers are different to invoke this error
             while v[0] == v[1]:
-                v = [self.getRandNumberDType(a.dtype), self.getRandNumberDType(a.dtype)]
+                v = [rng.randNumberDType(a.dtype), rng.randNumberDType(a.dtype)]
             max_val = min(v)
             min_val = max(v)
         else:
@@ -1402,7 +1356,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1449,29 +1403,20 @@
 
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
-    def build_leaky_relu(self, op, a, validator_fcns=None, error_name=None):
-        result_tens = OutputShaper.unaryOp(self.ser, self.rng, a, error_name)
-        attr = ts.TosaSerializerAttribute()
-
-        attr.LeakyReluAttribute(self.getRandNumberDType(DType.FP32))
-
-        self.ser.addOperator(op["op"], [a.name], [result_tens.name], attr)
-        return result_tens
-
-    # Needs an additional type/input
-    def build_prelu(self, op, a, validator_fcns=None, error_name=None):
-        result_tens = OutputShaper.unaryOp(self.ser, self.rng, a, error_name)
-
-        self.ser.addOperator(op["op"], [a.name], [result_tens.name])
-        return result_tens
-
     def build_activation(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 1
         a = inputs[0]
 
-        result_tensor = OutputShaper.unaryOp(self.ser, self.rng, a, error_name)
+        result_tensor = OutputShaper.unaryOp(self.ser, rng, a, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
@@ -1479,7 +1424,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1507,7 +1452,14 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_concat(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         if op["op"] == Op.CONCAT_SHAPE:
             axis = 0
@@ -1517,7 +1469,7 @@
             assert type(axis) == int
 
         result_tensor = OutputShaper.concatOp(
-            self.ser, self.rng, axis, inputs, error_name=error_name
+            self.ser, rng, axis, inputs, error_name=error_name
         )
 
         input_tensor_names = []
@@ -1530,7 +1482,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1567,6 +1519,7 @@
 
     def build_pad(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -1581,7 +1534,7 @@
         pad_const_int = args_dict["pad_const_int"]
         pad_const_float = args_dict["pad_const_fp"]
 
-        result_tensor = OutputShaper.padOp(self.ser, self.rng, a, padding, error_name)
+        result_tensor = OutputShaper.padOp(self.ser, rng, a, padding, error_name)
 
         # get pad_const_val_as_bytes from either pad_const_float or pad_const_int
         if gtu.dtypeIsFloat(a.dtype):
@@ -1598,7 +1551,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1630,6 +1583,7 @@
 
     def build_dim(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -1640,7 +1594,7 @@
         assert len(inputs) == 1
         a = inputs[0]
         axis = args_dict["axis"]
-        result_tensor = OutputShaper.dimOp(self.ser, self.rng, a, axis, error_name)
+        result_tensor = OutputShaper.dimOp(self.ser, rng, a, axis, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
@@ -1648,7 +1602,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1675,15 +1629,20 @@
         return TosaTestGen.BuildInfo(result_tensor, None)
 
     def build_reshape(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 2
         a = inputs[0]
         shape = inputs[1]
         shape_attr = args_dict["new_shape"]
-        result_tensor = OutputShaper.reshapeOp(
-            self.ser, self.rng, a, shape_attr, error_name
-        )
+        result_tensor = OutputShaper.reshapeOp(self.ser, rng, a, shape_attr, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name, shape.name]
@@ -1691,7 +1650,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1719,12 +1678,19 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_reverse(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 1
         a = inputs[0]
         axis = args_dict["axis"]
-        result_tensor = OutputShaper.unaryOp(self.ser, self.rng, a, error_name)
+        result_tensor = OutputShaper.unaryOp(self.ser, rng, a, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
@@ -1732,7 +1698,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1759,15 +1725,20 @@
         return TosaTestGen.BuildInfo(result_tensor, None)
 
     def build_transpose(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 1
         a = inputs[0]
         perms = args_dict["perms"]
 
-        result_tensor = OutputShaper.transposeOp(
-            self.ser, self.rng, a, perms, error_name
-        )
+        result_tensor = OutputShaper.transposeOp(self.ser, rng, a, perms, error_name)
 
         attr = ts.TosaSerializerAttribute()
         attr.TransposeAttribute(perms)
@@ -1778,7 +1749,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1808,7 +1779,14 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_slice(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 3
         a, start_var, size_var = inputs
@@ -1816,7 +1794,7 @@
         size_const = args_dict["size"]
 
         result_tensor = OutputShaper.sliceOp(
-            self.ser, self.rng, a, start_const, size_const, error_name
+            self.ser, rng, a, start_const, size_const, error_name
         )
 
         # Invalidate Input/Output list for error if checks.
@@ -1825,7 +1803,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1856,14 +1834,21 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_tile(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 2
         a = inputs[0]
         multiples = inputs[1]
         multiples_attr = args_dict["multiples"]
         result_tensor = OutputShaper.tileOp(
-            self.ser, self.rng, a, multiples_attr, error_name
+            self.ser, rng, a, multiples_attr, error_name
         )
 
         # Invalidate Input/Output list for error if checks.
@@ -1872,7 +1857,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1901,13 +1886,20 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_gather(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 2
         values, indices = inputs
 
         result_tensor = OutputShaper.gatherOp(
-            self.ser, self.rng, values, indices, error_name
+            self.ser, rng, values, indices, error_name
         )
 
         # Invalidate Input/Output list for error if checks.
@@ -1916,7 +1908,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1944,12 +1936,19 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_scatter(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 3
         values_in, indices, input = inputs
         result_tensor = OutputShaper.scatterOp(
-            self.ser, self.rng, values_in, indices, input, error_name
+            self.ser, rng, values_in, indices, input, error_name
         )
 
         # Invalidate Input/Output list for error if checks.
@@ -1958,7 +1957,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -1987,6 +1986,7 @@
 
     def build_resize(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -2008,7 +2008,7 @@
 
         result_tensor = OutputShaper.resizeOp(
             self.ser,
-            self.rng,
+            rng,
             input,
             mode,
             scale,
@@ -2030,7 +2030,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -2064,16 +2064,15 @@
 
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
-    def build_identityn(self, op, val, val2, validator_fcns=None, error_name=None):
-        result_tens = OutputShaper.unaryOp(self.ser, self.rng, val, error_name)
-        result_tens2 = OutputShaper.unaryOp(self.ser, self.rng, val2, error_name)
-        self.ser.addOperator(
-            op, [val.name, val2.name], [result_tens.name, result_tens2.name]
-        )
-        return result_tens
-
     def build_const(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 1
         val = inputs[0]
@@ -2087,14 +2086,21 @@
 
     # Type Conversion
     def build_cast(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 1
         val = inputs[0]
         out_dtype = args_dict["out_type"]
 
         result_tensor = OutputShaper.typeConversionOp(
-            self.ser, self.rng, val, out_dtype, error_name
+            self.ser, rng, val, out_dtype, error_name
         )
 
         # Invalidate Input/Output list for error if checks.
@@ -2103,7 +2109,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -2132,6 +2138,7 @@
 
     def build_rescale(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -2151,7 +2158,7 @@
         multiplier_arr = args_dict["multiplier"]
 
         result_tensor = OutputShaper.typeConversionOp(
-            self.ser, self.rng, val, out_dtype, error_name
+            self.ser, rng, val, out_dtype, error_name
         )
 
         if per_channel:
@@ -2166,46 +2173,46 @@
         output_unsigned = False
 
         if val.dtype == DType.INT8:
-            input_zp = self.randInt(-128, 128)
+            input_zp = rng.randInt(-128, 128)
             in_type_width += 1
         elif val.dtype == DType.UINT8:
-            input_zp = self.randInt(0, 256)
+            input_zp = rng.randInt(0, 256)
             in_type_width += 1
             input_unsigned = True
         elif error_name in [
             ErrorIf.InputZeroPointNotZero,
             ErrorIf.U16InputZeroPointNotValid,
         ]:
-            input_zp = self.randInt(-128, 128)
+            input_zp = rng.randInt(-128, 128)
             if input_zp == 0:
-                input_zp = input_zp + self.rng.integers(1, 10)
+                input_zp = input_zp + rng.integers(1, 10)
             in_type_width += 1
         elif val.dtype == DType.UINT16:
             # Must come after ErrorIf.U16InputZeroPointNotValid check
-            input_zp = self.rng.choice([0, 32768])
+            input_zp = rng.choice([0, 32768])
             in_type_width += 1
             input_unsigned = True
         else:
             input_zp = 0
 
         if out_dtype == DType.INT8:
-            output_zp = self.randInt(-128, 128)
+            output_zp = rng.randInt(-128, 128)
             out_type_width += 1
         elif out_dtype == DType.UINT8:
-            output_zp = self.randInt(0, 256)
+            output_zp = rng.randInt(0, 256)
             out_type_width += 1
             output_unsigned = True
         elif error_name in [
             ErrorIf.OutputZeroPointNotZero,
             ErrorIf.U16OutputZeroPointNotValid,
         ]:
-            output_zp = self.randInt(-128, 128)
+            output_zp = rng.randInt(-128, 128)
             if output_zp == 0:
-                output_zp = output_zp + self.rng.integers(1, 10)
+                output_zp = output_zp + rng.integers(1, 10)
             out_type_width += 1
         elif out_dtype == DType.UINT16:
             # Must come after ErrorIf.U16OutputZeroPointNotValid check
-            output_zp = self.rng.choice([0, 32768])
+            output_zp = rng.choice([0, 32768])
             out_type_width += 1
             output_unsigned = True
         else:
@@ -2255,7 +2262,7 @@
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_list, output_list
+            rng, error_name, input_list, output_list
         )
 
         qinfo = (input_zp, output_zp)
@@ -2296,13 +2303,13 @@
 
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
-    def _get_condition_tensor(self, op, cond, error_name):
+    def _get_condition_tensor(self, rng, op, cond, error_name):
         if error_name == ErrorIf.CondIfCondNotMatchingBool:
-            cond_type = gtu.get_wrong_output_type(op, self.rng, DType.BOOL)
+            cond_type = gtu.get_wrong_output_type(op, rng, DType.BOOL)
         else:
             cond_type = DType.BOOL
         if error_name == ErrorIf.CondIfCondShapeNotSizeOne:
-            choice = self.rng.choice([1, 2])
+            choice = rng.choice([1, 2])
             if choice == 1:
                 cond_shape = [2]
             else:
@@ -2315,6 +2322,7 @@
 
     def build_cond_if_const(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -2331,7 +2339,7 @@
         cond = args_dict["condition"]
 
         # Condition tensor
-        cond_tens = self._get_condition_tensor(op, cond, error_name)
+        cond_tens = self._get_condition_tensor(rng, op, cond, error_name)
 
         # Make then/else tensors
         out_shape = then_tens.shape
@@ -2346,14 +2354,14 @@
             incorrect_shape = deepcopy(then_tens.shape)
             for i in range(len(incorrect_shape)):
                 incorrect_shape[i] += (
-                    self.rng.choice([-3, -2, 2, 3])
+                    rng.choice([-3, -2, 2, 3])
                     if incorrect_shape[i] > 3
-                    else self.rng.choice([1, 2, 4])
+                    else rng.choice([1, 2, 4])
                 )
-            incorrect_arr = np.int32(self.rng.integers(0, 256, size=incorrect_shape))
+            incorrect_arr = np.int32(rng.integers(0, 256, size=incorrect_shape))
 
-        then_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
-        else_arr = np.int32(self.rng.integers(0, 256, size=out_shape))
+        then_arr = np.int32(rng.integers(0, 256, size=out_shape))
+        else_arr = np.int32(rng.integers(0, 256, size=out_shape))
 
         # And the result tensor based on any of the outputs
         result_tensor = self.ser.addOutput(out_shape, dtype)
@@ -2400,6 +2408,7 @@
 
     def build_cond_if_binary(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -2415,7 +2424,7 @@
         cond = args_dict["condition"]
 
         # Condition tensor
-        cond_tens = self._get_condition_tensor(op, cond, error_name)
+        cond_tens = self._get_condition_tensor(rng, op, cond, error_name)
 
         result_tensor = self.ser.addOutput(a.shape, a.dtype)
 
@@ -2433,7 +2442,7 @@
         ]:
             incorrect_shape = a.shape.copy()
             for i in range(len(incorrect_shape)):
-                incorrect_shape[i] += self.rng.choice([-3, -2, 2, 3])
+                incorrect_shape[i] += rng.choice([-3, -2, 2, 3])
             incorrect_block_input = deepcopy(a)
             incorrect_block_input.shape = incorrect_shape
 
@@ -2503,7 +2512,14 @@
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_while_loop(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 1
         a = inputs[0]
@@ -2528,7 +2544,7 @@
         if error_name == ErrorIf.InputListOutputListMismatch:
             incorrect_acc = deepcopy(acc)
             for i in range(len(incorrect_acc.shape)):
-                incorrect_acc.shape[i] += self.rng.choice([-3, -2, 2, 3])
+                incorrect_acc.shape[i] += rng.choice([-3, -2, 2, 3])
             acc_out = self.ser.addIntermediate(incorrect_acc.shape, acc.dtype)
         else:
             acc_out = self.ser.addIntermediate(acc.shape, acc.dtype)
@@ -2549,13 +2565,13 @@
         ]:
             incorrect_iter = deepcopy(iter)
             for i in range(len(incorrect_iter.shape)):
-                incorrect_iter.shape[i] += self.rng.choice([-3, -2, 2, 3])
+                incorrect_iter.shape[i] += rng.choice([-3, -2, 2, 3])
             if len(incorrect_iter.shape) == 0:
-                incorrect_iter.shape.append(self.rng.choice([-3, -2, 2, 3]))
+                incorrect_iter.shape.append(rng.choice([-3, -2, 2, 3]))
 
             incorrect_acc = deepcopy(acc)
             for i in range(len(incorrect_acc.shape)):
-                incorrect_acc.shape[i] += self.rng.choice([-3, -2, 2, 3])
+                incorrect_acc.shape[i] += rng.choice([-3, -2, 2, 3])
 
         # COND block (input: iter, output: cond_tens )
         self.ser.addBasicBlock(cond_block)
@@ -2571,11 +2587,11 @@
         zero_tens = self.ser.addConst([], DType.INT32, [np.int32(0)])
 
         if error_name == ErrorIf.CondGraphOutputNotMatchingBool:
-            cond_type = self.rng.choice([DType.INT8, DType.INT32, DType.FP32])
+            cond_type = rng.choice([DType.INT8, DType.INT32, DType.FP32])
         else:
             cond_type = DType.BOOL
         if error_name == ErrorIf.CondGraphOutputShapeNotSizeOne:
-            choice = self.rng.choice([1, 2])
+            choice = rng.choice([1, 2])
             if choice == 1:
                 cond_shape = [3]
             else:
@@ -2635,6 +2651,7 @@
 
     def build_fft2d(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -2646,7 +2663,7 @@
         val1, val2 = inputs
         inverse = args_dict["inverse"]
 
-        results = OutputShaper.fft2dOp(self.ser, self.rng, val1, val2, error_name)
+        results = OutputShaper.fft2dOp(self.ser, rng, val1, val2, error_name)
 
         input_names = [val1.name, val2.name]
         pCount, cCount = op["operands"]
@@ -2657,7 +2674,7 @@
         output_dtypes = [res.dtype for res in results]
 
         input_names, output_names = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_names, output_names
+            rng, error_name, input_names, output_names
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -2699,6 +2716,7 @@
 
     def build_rfft2d(
         self,
+        rng,
         op,
         inputs,
         args_dict,
@@ -2708,7 +2726,7 @@
     ):
         assert len(inputs) == 1
         val = inputs[0]
-        results = OutputShaper.rfft2dOp(self.ser, self.rng, val, error_name)
+        results = OutputShaper.rfft2dOp(self.ser, rng, val, error_name)
 
         input_names = [val.name]
         pCount, cCount = op["operands"]
@@ -2719,7 +2737,7 @@
         output_dtypes = [res.dtype for res in results]
 
         input_names, output_names = TosaErrorIfArgGen.eiInvalidateInputOutputList(
-            self, error_name, input_names, output_names
+            rng, error_name, input_names, output_names
         )
 
         if not TosaErrorValidator.evValidateErrorIfs(
@@ -2755,12 +2773,19 @@
         return TosaTestGen.BuildInfo(results, compliance)
 
     def build_shape_op(
-        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self,
+        rng,
+        op,
+        inputs,
+        args_dict,
+        validator_fcns=None,
+        error_name=None,
+        qinfo=None,
     ):
         assert len(inputs) == 2
         a, b = inputs
 
-        result_tensor = OutputShaper.addShapeOp(self.ser, self.rng, a, b, error_name)
+        result_tensor = OutputShaper.addShapeOp(self.ser, rng, a, b, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name, b.name]
@@ -2895,8 +2920,9 @@
         except KeyError:
             raise Exception("Cannot find op with name {}".format(opName))
 
-        # Initialize a new random number generator
-        self.rng = np.random.default_rng(self.random_seed)
+        if not self.args.stable_rng:
+            # Initialize a new random number generator per op
+            self.resetGlobalRNG()
 
         _, tgen_fcn, _, agen_fcn = op["build_fcn"]
 
@@ -2933,37 +2959,53 @@
                         if shape is not None and len(shape) != r:
                             continue
                         self.setTargetShape(shape)
-                        shapeList = tgen_fcn(self, op, r, error_name)
+                        typeStr = self.typeStr(t)
+                        if self.args.stable_rng:
+                            shape_rng = TosaHashRandomGenerator(
+                                self.random_seed,
+                                [opName, r, typeStr],
+                                self.random_dtype_range,
+                            )
+                        else:
+                            shape_rng = self.global_rng
+                        shapeList = tgen_fcn(self, shape_rng, op, r, error_name)
 
                         shapeStr = self.shapeStr(shapeList[0])
-                        typeStr = self.typeStr(t)
 
                         # Argument lists consists of tuples of the (str, []) string representation and the build function argument list
                         argList = []
                         if agen_fcn:
-                            argList = agen_fcn(self, opName, shapeList, t, error_name)
+                            if self.args.stable_rng:
+                                arg_rng = TosaHashRandomGenerator(
+                                    self.random_seed,
+                                    [opName, shapeStr, typeStr],
+                                    self.random_dtype_range,
+                                )
+                            else:
+                                arg_rng = self.global_rng
+
+                            argList = agen_fcn(
+                                self, arg_rng, opName, shapeList, t, error_name
+                            )
                         else:
                             argList = [("", [])]
 
                         for argStr, args in argList:
+                            # Create the test name string - for example: add_1x2x3_i32
                             if testType == "positive":
-                                if argStr:
-                                    testStr = "{}_{}_{}_{}".format(
-                                        opName, shapeStr, typeStr, argStr
-                                    )
-                                else:
-                                    testStr = "{}_{}_{}".format(
-                                        opName, shapeStr, typeStr
-                                    )
-                            elif testType == "negative":
-                                if argStr:
-                                    testStr = "{}_ERRORIF_{}_{}_{}_{}".format(
-                                        opName, error_name, shapeStr, typeStr, argStr
-                                    )
-                                else:
-                                    testStr = "{}_ERRORIF_{}_{}_{}".format(
-                                        opName, error_name, shapeStr, typeStr
-                                    )
+                                name_parts = [opName, shapeStr, typeStr]
+                            else:
+                                assert testType == "negative"
+                                name_parts = [
+                                    opName,
+                                    "ERRORIF",
+                                    error_name,
+                                    shapeStr,
+                                    typeStr,
+                                ]
+                            if argStr:
+                                name_parts.append(argStr)
+                            testStr = "_".join(name_parts)
 
                             testList.append(
                                 (opName, testStr, t, error_name, shapeList, args)
@@ -3038,8 +3080,18 @@
 
         # Build the random tensor operands and the test
 
+        # Set the random number generator
+        if self.args.stable_rng:
+            build_rng = TosaHashRandomGenerator(
+                self.random_seed, [testStr], self.random_dtype_range
+            )
+        else:
+            build_rng = self.global_rng
+
         if qgen is not None:
-            qinfo = qgen(self, op, dtype_or_dtypeList, error_name)
+            qinfo = qgen(
+                build_rng, self.args.zeropoint, op, dtype_or_dtypeList, error_name
+            )
         else:
             qinfo = None
 
@@ -3053,13 +3105,16 @@
 
         # New interface with args info in dictionary
         assert "dg_type" in argsDict
-        tvgInfo = tvgen_fcn(self, opName, dtypeList, shapeList, argsDict, error_name)
+        tvgInfo = tvgen_fcn(
+            self, build_rng, opName, dtypeList, shapeList, argsDict, error_name
+        )
         if tvgInfo.dataGenDict:
             tensMeta["data_gen"] = tvgInfo.dataGenDict
         tens = tvgInfo.tensorList
 
         result = build_fcn(
             self,
+            build_rng,
             op,
             tens,
             argsDict,
diff --git a/verif/generator/tosa_verif_build_tests.py b/verif/generator/tosa_verif_build_tests.py
index 47c351a..83c06d7 100644
--- a/verif/generator/tosa_verif_build_tests.py
+++ b/verif/generator/tosa_verif_build_tests.py
@@ -80,6 +80,13 @@
         help="Random seed for test generation",
     )
 
+    parser.add_argument(
+        "--stable-random-generation",
+        dest="stable_rng",
+        action="store_true",
+        help="Produces less variation (when the test-generator changes) in the test output using the same options",
+    )
+
     filter_group.add_argument(
         "--filter",
         dest="filter",
@@ -395,7 +402,7 @@
         else:
             # Use the random number generator to shuffle the test list
             # and select the per op tests from it
-            tests = testList.select(ttg.rng)
+            tests = testList.select(ttg.global_rng)
 
         if args.list_tests:
             for test in tests: