Main Compliance testing support for ARGMAX, REDUCE_SUM/MAX/MIN

Add extra tests for FP32 REDUCE_SUM that meet MIN_DOT_PRODUCTS.
Plus improved dot product test generation skip information.

Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
Change-Id: Ia8198a9500ddddfc86c5bb84230b9a4edf5ffd50
diff --git a/reference_model/src/generate/generate_dot_product.cc b/reference_model/src/generate/generate_dot_product.cc
index e6815ad..c1934dd 100644
--- a/reference_model/src/generate/generate_dot_product.cc
+++ b/reference_model/src/generate/generate_dot_product.cc
@@ -189,6 +189,48 @@
             return false;
     }
 }
+//---------------------------------------------------------------------------//
+//                              Reduce Sum                                   //
+//---------------------------------------------------------------------------//
+
+bool generateReduceSum(const TosaReference::GenerateConfig& cfg,
+                       TosaReference::IDotProductGenerator& generator,
+                       void* data,
+                       size_t size)
+{
+    if (cfg.dataType != DType::DType_FP32)
+    {
+        WARNING("[Generator][DP][ReduceSum] Only supports FP32.");
+        return false;
+    }
+    if (cfg.inputPos != 0)
+    {
+        WARNING("[Generator][DP][ReduceSum] Invalid input tensor slot position to operator.");
+        return false;
+    }
+    if (cfg.dotProductInfo.axis < 0 || static_cast<size_t>(cfg.dotProductInfo.axis) >= cfg.shape.size())
+    {
+        WARNING("[Generator][DP][ReduceSum] Invalid axis %d.", cfg.dotProductInfo.axis);
+        return false;
+    }
+
+    float* input        = reinterpret_cast<float*>(data);
+    const int64_t T     = TosaReference::numElementsFromShape(cfg.shape);
+    const uint32_t axis = cfg.dotProductInfo.axis;
+
+    for (int64_t t = 0; t < T; ++t)
+    {
+        uint64_t k = t;
+        for (uint32_t d = cfg.shape.size() - 1; d > axis; --d)
+        {
+            k = k / cfg.shape[d];
+        }
+        k = k % cfg.shape[axis];
+
+        input[t] = generator(static_cast<int32_t>(k));
+    }
+    return true;
+}
 }    // namespace
 
 namespace TosaReference
@@ -210,6 +252,8 @@
             return generateMatMul(cfg, *generator, data, size);
         case tosa::Op_CONV2D:
             return generateConv2D(cfg, *generator, data, size);
+        case tosa::Op_REDUCE_SUM:
+            return generateReduceSum(cfg, *generator, data, size);
         default:
             WARNING("[Generator][DP] Unsupported operator.");
             return false;
diff --git a/reference_model/src/generate/generate_utils.cc b/reference_model/src/generate/generate_utils.cc
index 8ae889a..e410436 100644
--- a/reference_model/src/generate/generate_utils.cc
+++ b/reference_model/src/generate/generate_utils.cc
@@ -39,6 +39,7 @@
                              {
                                  { Op::Op_UNKNOWN, "UNKNOWN" },
                                  { Op::Op_ADD, "ADD" },
+                                 { Op::Op_ARGMAX, "ARGMAX" },
                                  { Op::Op_CONV2D, "CONV2D" },
                                  { Op::Op_MATMUL, "MATMUL" },
                                  { Op::Op_MAXIMUM, "MAXIMUM" },
@@ -46,6 +47,9 @@
                                  { Op::Op_MINIMUM, "MINIMUM" },
                                  { Op::Op_MUL, "MUL" },
                                  { Op::Op_PAD, "PAD" },
+                                 { Op::Op_REDUCE_MAX, "REDUCE_MAX" },
+                                 { Op::Op_REDUCE_MIN, "REDUCE_MIN" },
+                                 { Op::Op_REDUCE_SUM, "REDUCE_SUM" },
                                  { Op::Op_SUB, "SUB" },
                              })
 
diff --git a/reference_model/test/generate_tests.cpp b/reference_model/test/generate_tests.cpp
index 6173372..40dd59f 100644
--- a/reference_model/test/generate_tests.cpp
+++ b/reference_model/test/generate_tests.cpp
@@ -509,4 +509,75 @@
         check_not_output<float>(bufferP0, bufferP1);
     }
 }
+void reduce_sum_test_FP32(const std::string tosaName,
+                          const size_t tosaElements,
+                          const std::string templateJsonCfg,
+                          const std::string setStr,
+                          const std::vector<uint32_t> expected)
+{
+    std::string jsonCfg = templateJsonCfg;
+    update_json_template(jsonCfg, "_SET_", setStr);
+
+    std::vector<float> buffer(tosaElements);
+    REQUIRE(tgd_generate_data(jsonCfg.c_str(), tosaName.c_str(), (void*)buffer.data(), tosaElements * 4));
+    // Choose different generator values to test at positions 6, 7 & 8
+    std::vector<float> mid_three(buffer.begin() + 6, buffer.begin() + 9);
+    check_output<float>(mid_three, expected);
+}
+
+TEST_CASE("positive - FP32 reduce_sum dot product (values 6,7 & 8)")
+{
+    std::string templateJsonCfg = R"({
+        "tensors" : {
+            "input" : {
+                "generator": "DOT_PRODUCT",
+                "data_type": "FP32",
+                "input_type": "VARIABLE",
+                "shape" : [ 5, 3, 7 ],
+                "input_pos": 0,
+                "op" : "REDUCE_SUM",
+                "dot_product_info": {
+                    "s": _SET_,
+                    "ks": 3,
+                    "acc_type": "FP32",
+                    "axis": 1
+                }
+            }
+        }
+    })";
+
+    const std::string tosaName = "input";
+    const size_t tosaElements  = 5 * 3 * 7;
+
+    SUBCASE("reduce_sum, set 0, param 0")
+    {
+        std::vector<uint32_t> expected = { 0x3df2e612, 0x3f59255f, 0x0 };
+        reduce_sum_test_FP32(tosaName, tosaElements, templateJsonCfg, "0", expected);
+    }
+    SUBCASE("reduce_sum, set 1, param 0")
+    {
+        std::vector<uint32_t> expected = { 0x5edaa175, 0x5edb84c1, 0x5ea3c765 };
+        reduce_sum_test_FP32(tosaName, tosaElements, templateJsonCfg, "1", expected);
+    }
+    SUBCASE("reduce_sum, set 2, param 0")
+    {
+        std::vector<uint32_t> expected = { 0x3f800000, 0x3e73f143, 0x3f12cef8 };
+        reduce_sum_test_FP32(tosaName, tosaElements, templateJsonCfg, "2", expected);
+    }
+    SUBCASE("reduce_sum, set 3, param 0")
+    {
+        std::vector<uint32_t> expected = { 0x41800000, 0xbe9f659e, 0xbfaca78c };
+        reduce_sum_test_FP32(tosaName, tosaElements, templateJsonCfg, "3", expected);
+    }
+    SUBCASE("reduce_sum, set 4, param 0")
+    {
+        std::vector<uint32_t> expected = { 0x5e1e6f12, 0x3f000000, 0x3f000000 };
+        reduce_sum_test_FP32(tosaName, tosaElements, templateJsonCfg, "4", expected);
+    }
+    SUBCASE("reduce_sum, set 5, param 0")
+    {
+        std::vector<uint32_t> expected = { 0x5d2790c5, 0xdec3dadc, 0xdea1486e };
+        reduce_sum_test_FP32(tosaName, tosaElements, templateJsonCfg, "5", expected);
+    }
+}
 TEST_SUITE_END();    // generate
diff --git a/verif/conformance/tosa_main_profile_ops_info.json b/verif/conformance/tosa_main_profile_ops_info.json
index 254f5e7..faccf75 100644
--- a/verif/conformance/tosa_main_profile_ops_info.json
+++ b/verif/conformance/tosa_main_profile_ops_info.json
@@ -131,6 +131,7 @@
         "profile": [
             "tosa-mi"
         ],
+        "support_for": [ "lazy_data_gen" ],
         "generation": {
             "standard": {
                 "generator_args": [
@@ -142,7 +143,7 @@
                         "--target-dtype",
                         "bf16",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--tensor-dim-range",
                         "32,64",
                         "--target-rank",
@@ -162,7 +163,7 @@
                         "--target-dtype",
                         "bf16",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--tensor-dim-range",
                         "1,32",
                         "--target-rank",
@@ -174,7 +175,7 @@
                         "--target-dtype",
                         "fp32",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--target-shape",
                         "1,3,65535,1",
                         "--target-shape",
@@ -612,7 +613,7 @@
                         "--target-dtype",
                         "bf16",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--target-shape",
                         "1,34,19,27",
                         "--target-shape",
@@ -625,7 +626,7 @@
                         "--target-dtype",
                         "fp32",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--target-shape",
                         "1,65537,1,3",
                         "--target-shape",
@@ -2378,6 +2379,7 @@
         "profile": [
             "tosa-mi"
         ],
+        "support_for": [ "lazy_data_gen" ],
         "generation": {
             "standard": {
                 "negative_dim_range": "1,10",
@@ -2390,7 +2392,7 @@
                         "--target-dtype",
                         "bf16",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--tensor-dim-range",
                         "1,32"
                     ],
@@ -2398,7 +2400,7 @@
                         "--target-dtype",
                         "fp32",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--target-shape",
                         "1,1,1,65531",
                         "--target-shape",
@@ -2423,6 +2425,7 @@
         "profile": [
             "tosa-mi"
         ],
+        "support_for": [ "lazy_data_gen" ],
         "generation": {
             "standard": {
                 "negative_dim_range": "1,10",
@@ -2435,7 +2438,7 @@
                         "--target-dtype",
                         "bf16",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--tensor-dim-range",
                         "1,32"
                     ],
@@ -2443,7 +2446,7 @@
                         "--target-dtype",
                         "fp32",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--target-shape",
                         "1,1,65531,1",
                         "--target-shape",
@@ -2510,6 +2513,7 @@
         "profile": [
             "tosa-mi"
         ],
+        "support_for": [ "lazy_data_gen" ],
         "generation": {
             "standard": {
                 "generator_args": [
@@ -2521,13 +2525,25 @@
                         "--target-dtype",
                         "bf16",
                         "--fp-values-range",
-                        "-2.0,2.0",
+                        "-max,max",
                         "--tensor-dim-range",
                         "1,37"
                     ],
                     [
                         "--target-dtype",
+                        "fp32",
+                        "--fp-values-range",
+                        "-max,max",
+                        "--target-shape",
+                        "1001",
+                        "--target-shape",
+                        "50,200"
+                    ],
+                    [
+                        "--target-dtype",
                         "bf16",
+                        "--fp-values-range",
+                        "-max,max",
                         "--target-shape",
                         "1,3,65529,1",
                         "--target-shape",
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 1be243c..4014656 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -1078,27 +1078,26 @@
             return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
 
     @staticmethod
-    def tvgConcat(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
+    def tvgConcat(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
         count = len(shapeList) - testGen.args.num_const_inputs_concat
         if count < 1:
             count = 1
         if testGen.args.num_const_inputs_concat == 0:
             count = len(shapeList)
 
-        # Ensure axis is an int
-        testArgs[0] = int(testArgs[0])
-
         shapeList = TosaTensorGen.tgConcatConstInput(
-            testGen, shapeList, testArgs[0], error_name
+            testGen, shapeList, argsDict["axis"], error_name
         )
 
-        tens = []
-        tens.extend(
+        tens_ser_list = []
+        tens_ser_list.extend(
             testGen.buildPlaceholderTensors(shapeList[0:count], dtypeList[0:count])
         )
-        tens.extend(testGen.buildConstTensors(shapeList[count:], dtypeList[count:]))
+        tens_ser_list.extend(
+            testGen.buildConstTensors(shapeList[count:], dtypeList[count:])
+        )
 
-        return tens
+        return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
 
     @staticmethod
     def tvgLogicalShift(
@@ -1164,8 +1163,9 @@
             )
 
     @staticmethod
-    def tvgReduceSum(testGen, op, dtypeList, shapeList, testArgs, error_name=None):
+    def tvgReduceSum(testGen, opName, dtypeList, shapeList, argsDict, error_name=None):
         if dtypeList[0] == DType.INT32:
+            op = testGen.TOSA_OP_LIST[opName]
             pCount, cCount = op["operands"]
             assert (
                 pCount == 1 and cCount == 0
@@ -1176,14 +1176,15 @@
             values_arr = np.int32(
                 testGen.rng.integers(low=-range_val, high=range_val, size=shapeList[0])
             )
-            placeholders = []
-            placeholders.append(
+            tens_ser_list = []
+            tens_ser_list.append(
                 testGen.ser.addPlaceholder(shapeList[0], dtypeList[0], values_arr)
             )
-            return placeholders
+            return TosaTensorValuesGen.TVGInfo(tens_ser_list, None)
         else:
-            return TosaTensorValuesGen.tvgDefault(
-                testGen, op, dtypeList, shapeList, testArgs, error_name
+            # ERROR_IF or dot product floating point test
+            return TosaTensorValuesGen.tvgLazyGenDefault(
+                testGen, opName, dtypeList, shapeList, argsDict, error_name
             )
 
 
@@ -1228,12 +1229,18 @@
                     # Extra tests for each dot product test set
                     dot_products = args_dict["dot_products"]
                     if dot_products < testGen.TOSA_MI_DOT_PRODUCT_MIN:
+                        shape_info = (
+                            " ({})".format(testGen.shapeStr(args_dict["shape"]))
+                            if "shape" in args_dict
+                            else ""
+                        )
                         print(
-                            f"Skipping {opName} dot product test as too few calculations {dot_products} < {testGen.TOSA_MI_DOT_PRODUCT_MIN}"
+                            f"Skipping {opName}{shape_info} dot product test as too few calculations {dot_products} < {testGen.TOSA_MI_DOT_PRODUCT_MIN}"
                         )
                         continue
-                    # KS is required by all dot product generators
+                    # KS and acc_type is required by all dot product generators
                     assert "ks" in args_dict
+                    assert "acc_type" in args_dict
 
                     for s in testGen.TOSA_MI_DOT_PRODUCT_TEST_SETS:
                         new_arg_str = f"{arg_str}_s{s}"
@@ -1260,20 +1267,40 @@
     @staticmethod
     def agAxis(testGen, opName, shapeList, dtype, error_name=None):
         """Build the axis argument for operators that take a single axis"""
-        axes = []
+        arg_list = []
         shape = shapeList[0]
 
         if error_name == ErrorIf.AxisSmallerZero:
-            small_axis = testGen.rng.integers(-5, 0)
-            axes.append(("axis{}".format(small_axis), [small_axis]))
+            # Set too small axis
+            axes = [testGen.rng.integers(-5, 0)]
         elif error_name == ErrorIf.AxisLargerRank:
-            large_axis = testGen.rng.integers(len(shape) + 1, len(shape) + 10)
-            axes.append(("axis{}".format(large_axis), [large_axis]))
+            # Set too large axis
+            axes = [testGen.rng.integers(len(shape) + 1, len(shape) + 10)]
         else:
-            for a in range(0, len(shape)):
-                axes.append(("axis{}".format(a), [a]))
+            # Create tests for each dimension
+            axes = range(0, len(shape))
 
-        return axes
+        opid = testGen.TOSA_OP_LIST[opName]["op"]
+
+        for a in axes:
+            args_dict = {"axis": int(a)}
+            if opid == Op.REDUCE_SUM:
+                args_dict["dot_products"] = gtu.product(shape)
+                args_dict["shape"] = shape
+                args_dict["ks"] = int(shape[a]) if a >= 0 and a < len(shape) else 1
+                args_dict["acc_type"] = dtype if dtype != DType.BF16 else DType.FP32
+
+            arg_list.append(("axis{}".format(a), args_dict))
+
+        arg_list = TosaArgGen._add_data_generators(
+            testGen,
+            opName,
+            dtype,
+            arg_list,
+            error_name,
+        )
+        # Return list of tuples: (arg_str, args_dict)
+        return arg_list
 
     @staticmethod
     def _calculate_sparsity(num_tests, sparsity_factor):
@@ -1483,6 +1510,7 @@
                                 "kernel": k_shape,
                                 "ks": k_size,
                                 "dot_products": dots,
+                                "shape": ifm_shape,
                             }
 
                             # Support for larger values than 9 needs different delimiter
@@ -1558,6 +1586,7 @@
             "dot_products": gtu.product(
                 (shapeList[0][0], shapeList[0][1], shapeList[1][2])
             ),
+            "shape": shapeList[0],
         }
 
         # Create arg tuple of string and dict
@@ -1886,7 +1915,7 @@
             else "st{}_kern{}_pad{}"
         )
 
-        def get_arg_list_element(accum, stride, pad, kern, dot_products=0):
+        def get_arg_list_element(accum, stride, pad, kern, dot_products=0, shape=[]):
             # Return tuple containing the formatted argument string and
             # the corresponding argument values in a dictionary
 
@@ -1902,6 +1931,7 @@
                 "pad": pad,
                 "kernel": kern,
                 "dot_products": dot_products,  # Ignored for error tests
+                "shape": shape,
                 "ks": gtu.product(kern),  # avg_pool2d: KS = KX*KY
             }
 
@@ -1926,7 +1956,7 @@
                             )
                             if None not in [sNew, pNew, kNew] and n % sparsity == 0:
                                 arg_list.append(
-                                    get_arg_list_element(a, sNew, pNew, kNew)
+                                    get_arg_list_element(a, sNew, pNew, kNew, shape)
                                 )
                         elif (
                             n % sparsity == 0
@@ -1965,7 +1995,9 @@
                                 dp = gtu.product(
                                     (shape[0], output_h, output_w, shape[3])
                                 )
-                                arg_list.append(get_arg_list_element(a, s, p, k, dp))
+                                arg_list.append(
+                                    get_arg_list_element(a, s, p, k, dp, shape)
+                                )
                         n += 1
 
         # Now add data generator types
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 556a0d8..3180cf5 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -658,12 +658,17 @@
         )
         return result_tens
 
-    def build_argmax(self, op, a, axis, validator_fcns, error_name):
-        result_tens = OutputShaper.argmaxOp(self.ser, self.rng, a, axis, error_name)
+    def build_argmax(
+        self, op, inputs, args_dict, validator_fcns, error_name, qinfo=None
+    ):
+        assert len(inputs) == 1
+        a = inputs[0]
+        axis = args_dict["axis"]
+        result_tensor = OutputShaper.argmaxOp(self.ser, self.rng, a, axis, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
-        output_list = [result_tens.name]
+        output_list = [result_tensor.name]
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -678,9 +683,9 @@
             axis=axis,
             input_shape=a.shape,
             input_dtype=a.dtype,
-            output_shape=result_tens.shape,
-            output_dtype=result_tens.dtype,
-            result_tensors=[result_tens],
+            output_shape=result_tensor.shape,
+            output_dtype=result_tensor.dtype,
+            result_tensors=[result_tensor],
             input_list=input_list,
             output_list=output_list,
             num_operands=num_operands,
@@ -691,7 +696,11 @@
         attr.AxisAttribute(axis)
 
         self.ser.addOperator(op["op"], input_list, output_list, attr)
-        return result_tens
+
+        compliance = self.tensorComplianceMetaData(
+            op, inputs[0].dtype, args_dict, result_tensor, error_name
+        )
+        return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_pool2d(
         self,
@@ -1173,12 +1182,17 @@
 
         return TosaTestGen.BuildInfo(result_tensor, compliance)
 
-    def build_reduce(self, op, a, axis, validator_fcns, error_name=None):
-        result_tens = OutputShaper.reduceOp(self.ser, self.rng, a, axis, error_name)
+    def build_reduce(
+        self, op, inputs, args_dict, validator_fcns, error_name=None, qinfo=None
+    ):
+        assert len(inputs) == 1
+        a = inputs[0]
+        axis = args_dict["axis"]
+        result_tensor = OutputShaper.reduceOp(self.ser, self.rng, a, axis, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
-        output_list = [result_tens.name]
+        output_list = [result_tensor.name]
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1192,10 +1206,10 @@
             op=op,
             axis=axis,
             input_shape=a.shape,
-            output_shape=result_tens.shape,
+            output_shape=result_tensor.shape,
             input_dtype=a.dtype,
-            output_dtype=result_tens.dtype,
-            result_tensors=[result_tens],
+            output_dtype=result_tensor.dtype,
+            result_tensors=[result_tensor],
             input_list=input_list,
             output_list=output_list,
             num_operands=num_operands,
@@ -1206,7 +1220,16 @@
         attr.AxisAttribute(axis)
 
         self.ser.addOperator(op["op"], input_list, output_list, attr)
-        return result_tens
+
+        if op["op"] == Op.REDUCE_PRODUCT:
+            # TODO: Add compliance support!
+            compliance = None
+        else:
+            compliance = self.tensorComplianceMetaData(
+                op, a.dtype, args_dict, result_tensor, error_name
+            )
+
+        return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_clamp(self, op, a, validator_fcns=None, error_name=None):
         result_tens = OutputShaper.unaryOp(self.ser, self.rng, a, error_name)
@@ -1373,25 +1396,24 @@
         self.ser.addOperator(op["op"], input_list, output_list)
         return result_tens
 
-    def build_concat(self, op, *a, validator_fcns=None, error_name=None):
+    def build_concat(
+        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+    ):
+        axis = args_dict["axis"]
         if error_name != ErrorIf.WrongInputType:
-            assert type(a[-1]) == int
+            assert type(axis) == int
 
-        # To store variable length list of input tensors we need to store axis along with it
-        axis = a[-1]
-        a = a[:-1]
-
-        result_tens = OutputShaper.concatOp(
-            self.ser, self.rng, axis, *a, error_name=error_name
+        result_tensor = OutputShaper.concatOp(
+            self.ser, self.rng, axis, inputs, error_name=error_name
         )
 
         input_tensor_names = []
-        for tensor in a:
+        for tensor in inputs:
             input_tensor_names.append(tensor.name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = input_tensor_names
-        output_list = [result_tens.name]
+        output_list = [result_tensor.name]
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1404,12 +1426,12 @@
             error_name,
             op=op,
             axis=axis,
-            input_shape=a[0].shape,
-            output_shape=result_tens.shape,
-            input_dtype=a[0].dtype,
-            output_dtype=result_tens.dtype,
-            inputs=a,
-            result_tensors=[result_tens],
+            input_shape=inputs[0].shape,
+            output_shape=result_tensor.shape,
+            input_dtype=inputs[0].dtype,
+            output_dtype=result_tensor.dtype,
+            inputs=inputs,
+            result_tensors=[result_tensor],
             input_list=input_list,
             output_list=output_list,
             num_operands=num_operands,
@@ -1420,7 +1442,7 @@
         attr.AxisAttribute(axis)
 
         self.ser.addOperator(op["op"], input_list, output_list, attr)
-        return result_tens
+        return TosaTestGen.BuildInfo(result_tensor, None)
 
     def build_pad(
         self,
@@ -1483,17 +1505,20 @@
     def build_dim(
         self,
         op,
-        a,
-        axis,
+        inputs,
+        args_dict,
         validator_fcns=None,
         error_name=None,
         qinfo=None,
     ):
-        result_tens = OutputShaper.dimOp(self.ser, self.rng, a, axis, error_name)
+        assert len(inputs) == 1
+        a = inputs[0]
+        axis = args_dict["axis"]
+        result_tensor = OutputShaper.dimOp(self.ser, self.rng, a, axis, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
-        output_list = [result_tens.name]
+        output_list = [result_tensor.name]
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1508,9 +1533,9 @@
             axis=axis,
             input_shape=a.shape,
             input_dtype=a.dtype,
-            output_shape=result_tens.shape,
-            output_dtype=result_tens.dtype,
-            result_tensors=[result_tens],
+            output_shape=result_tensor.shape,
+            output_dtype=result_tensor.dtype,
+            result_tensors=[result_tensor],
             input_list=input_list,
             output_list=output_list,
             num_operands=num_operands,
@@ -1521,7 +1546,7 @@
         attr.AxisAttribute(axis)
 
         self.ser.addOperator(op["op"], input_list, output_list, attr)
-        return result_tens
+        return TosaTestGen.BuildInfo(result_tensor, None)
 
     def build_reshape(self, op, a, newShape, validator_fcns=None, error_name=None):
         result_tens = OutputShaper.reshapeOp(
@@ -1559,12 +1584,17 @@
         self.ser.addOperator(op["op"], input_list, output_list, attr)
         return result_tens
 
-    def build_reverse(self, op, a, axis, validator_fcns=None, error_name=None):
-        result_tens = OutputShaper.unaryOp(self.ser, self.rng, a, error_name)
+    def build_reverse(
+        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
+    ):
+        assert len(inputs) == 1
+        a = inputs[0]
+        axis = args_dict["axis"]
+        result_tensor = OutputShaper.unaryOp(self.ser, self.rng, a, error_name)
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
-        output_list = [result_tens.name]
+        output_list = [result_tensor.name]
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1578,10 +1608,10 @@
             op=op,
             axis=axis,
             input_shape=a.shape,
-            output_shape=result_tens.shape,
+            output_shape=result_tensor.shape,
             input_dtype=a.dtype,
-            output_dtype=result_tens.dtype,
-            result_tensors=[result_tens],
+            output_dtype=result_tensor.dtype,
+            result_tensors=[result_tensor],
             input_list=input_list,
             output_list=output_list,
             num_operands=num_operands,
@@ -1592,7 +1622,7 @@
         attr.AxisAttribute(axis)
 
         self.ser.addOperator(op["op"], input_list, output_list, attr)
-        return result_tens
+        return TosaTestGen.BuildInfo(result_tensor, None)
 
     def build_transpose(self, op, a, perms, validator_fcns=None, error_name=None):
         result_tens = OutputShaper.transposeOp(self.ser, self.rng, a, perms, error_name)
@@ -2898,7 +2928,7 @@
             "build_fcn": (
                 build_argmax,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agAxis,
             ),
             "types": TYPE_NARROW_INT_FP,
@@ -2913,6 +2943,9 @@
                 TosaErrorValidator.evWrongInputList,
                 TosaErrorValidator.evWrongOutputList,
             ),
+            "data_gen": {
+                "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+            },
         },
         "avg_pool2d": {
             "op": Op.AVG_POOL2D,
@@ -3853,7 +3886,7 @@
             "build_fcn": (
                 build_reduce,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agAxis,
             ),
             "types": TYPE_BOOL,
@@ -3875,7 +3908,7 @@
             "build_fcn": (
                 build_reduce,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agAxis,
             ),
             "types": TYPE_BOOL,
@@ -3897,7 +3930,7 @@
             "build_fcn": (
                 build_reduce,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agAxis,
             ),
             "types": TYPE_INT_FP,
@@ -3911,6 +3944,9 @@
                 TosaErrorValidator.evWrongInputList,
                 TosaErrorValidator.evWrongOutputList,
             ),
+            "data_gen": {
+                "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+            },
         },
         "reduce_min": {
             "op": Op.REDUCE_MIN,
@@ -3919,7 +3955,7 @@
             "build_fcn": (
                 build_reduce,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agAxis,
             ),
             "types": TYPE_INT_FP,
@@ -3933,6 +3969,9 @@
                 TosaErrorValidator.evWrongInputList,
                 TosaErrorValidator.evWrongOutputList,
             ),
+            "data_gen": {
+                "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+            },
         },
         "reduce_product": {
             "op": Op.REDUCE_PRODUCT,
@@ -3941,7 +3980,7 @@
             "build_fcn": (
                 build_reduce,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agAxis,
             ),
             "types": TYPE_FP,
@@ -3977,6 +4016,9 @@
                 TosaErrorValidator.evWrongInputList,
                 TosaErrorValidator.evWrongOutputList,
             ),
+            "data_gen": {
+                "fp": (gtu.DataGenType.DOT_PRODUCT,),
+            },
         },
         # Data layout operators
         "concat": {
@@ -4030,7 +4072,7 @@
             "build_fcn": (
                 build_dim,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agAxis,
             ),
             "types": TYPE_FIB,
@@ -4069,7 +4111,7 @@
             "build_fcn": (
                 build_reverse,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agAxis,
             ),
             "types": TYPE_FIB,
@@ -4892,9 +4934,9 @@
         return ser.addOutput(output_shape, out_dtype)
 
     @staticmethod
-    def concatOp(ser, rng, axis, *a, error_name=None):
-        input1 = a[0]
-        remaining_inputs = a[1:]
+    def concatOp(ser, rng, axis, inputs, error_name=None):
+        input1 = inputs[0]
+        remaining_inputs = inputs[1:]
 
         # calculate the output shape, if possible, otherwise just use the first input shape
         output_shape = input1.shape.copy()