Compliance testing support for MAX_POOL2D & PAD

Added Pseudo Random number generator in generate library.
Enabled MAX_POOL2D, PAD FP32 tests to use new generator and compliance.
Fixed verify library exact mode to expect reference data as FP64.
Simplified tosa_verif_build_tests internal interfaces for new tests.

Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
Change-Id: Icc0ffa924cf38107c3a212efd452c47a650c9d98
diff --git a/reference_model/CMakeLists.txt b/reference_model/CMakeLists.txt
index 5be6f8f..24467c8 100644
--- a/reference_model/CMakeLists.txt
+++ b/reference_model/CMakeLists.txt
@@ -73,6 +73,7 @@
     src/tensor.cc
     src/generate/generate_dot_product_states.cc
     src/generate/generate_dot_product.cc
+    src/generate/generate_pseudo_random.cc
     src/generate/generate_entry.cc
     src/generate/generate_utils.cc
     src/verify/verify_dot_product.cc
@@ -167,6 +168,7 @@
 add_library(tosa_reference_generate_lib SHARED
   src/generate/generate_dot_product_states.cc
   src/generate/generate_dot_product.cc
+  src/generate/generate_pseudo_random.cc
   src/generate/generate_entry.cc
   src/generate/generate_utils.cc
   src/generate/generate_config.cc
diff --git a/reference_model/src/generate/generate_dot_product.cc b/reference_model/src/generate/generate_dot_product.cc
index 1d2325f..cbfac4b 100644
--- a/reference_model/src/generate/generate_dot_product.cc
+++ b/reference_model/src/generate/generate_dot_product.cc
@@ -56,6 +56,11 @@
                     void* data,
                     size_t size)
 {
+    if (cfg.dataType != DType::DType_FP32)
+    {
+        WARNING("[Generator][DP][MatMul] Only supports FP32.");
+        return false;
+    }
     if (cfg.shape.size() != 3)
     {
         WARNING("[Generator][DP][MatMul] Tensor shape expected 3 dimensions.");
diff --git a/reference_model/src/generate/generate_dot_product.h b/reference_model/src/generate/generate_dot_product.h
index 236f577..cd9d4ba 100644
--- a/reference_model/src/generate/generate_dot_product.h
+++ b/reference_model/src/generate/generate_dot_product.h
@@ -37,7 +37,7 @@
 ///
 /// \param cfg Generator related meta-data
 /// \param data Buffer to generate the data to
-/// \param size Size of the buffet
+/// \param size Size of the buffer
 ///
 /// \return True on successful generation
 bool generateDotProduct(const GenerateConfig& cfg, void* data, size_t size);
diff --git a/reference_model/src/generate/generate_entry.cc b/reference_model/src/generate/generate_entry.cc
index e7a0044..741cd79 100644
--- a/reference_model/src/generate/generate_entry.cc
+++ b/reference_model/src/generate/generate_entry.cc
@@ -15,6 +15,7 @@
 #include "generate.h"
 
 #include "generate_dot_product.h"
+#include "generate_pseudo_random.h"
 #include "generate_utils.h"
 
 #include "func_debug.h"
@@ -31,6 +32,10 @@
             return generateDotProduct(cfg, data, size);
             break;
         }
+        case GeneratorType::PseudoRandom: {
+            return generatePseudoRandom(cfg, data, size);
+            break;
+        }
         default: {
             WARNING("[Generator] Unsupported generation mode.");
             break;
diff --git a/reference_model/src/generate/generate_pseudo_random.cc b/reference_model/src/generate/generate_pseudo_random.cc
new file mode 100644
index 0000000..858a4b2
--- /dev/null
+++ b/reference_model/src/generate/generate_pseudo_random.cc
@@ -0,0 +1,103 @@
+// Copyright (c) 2023, ARM Limited.
+//
+//    Licensed under the Apache License, Version 2.0 (the "License");
+//    you may not use this file except in compliance with the License.
+//    You may obtain a copy of the License at
+//
+//         http://www.apache.org/licenses/LICENSE-2.0
+//
+//    Unless required by applicable law or agreed to in writing, software
+//    distributed under the License is distributed on an "AS IS" BASIS,
+//    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//    See the License for the specific language governing permissions and
+//    limitations under the License.
+#include "generate.h"
+#include "generate_utils.h"
+
+#include <array>
+#include <iterator>
+#include <limits>
+#include <numeric>
+#include <random>
+#include <string>
+#include <type_traits>
+#include <vector>
+
+namespace
+{
+
+// Random generator
+template <typename FP>
+class PseudoRandomGeneratorFloat
+{
+public:
+    PseudoRandomGeneratorFloat(uint64_t seed)
+        : _gen(seed)
+    {
+        // Uniform real distribution generates real values in the range [a, b]
+        // and requires that b - a <= std::numeric_limits<FP>::max() so here
+        // we choose some arbitrary values that satisfy that condition.
+        constexpr auto min = std::numeric_limits<FP>::lowest() / 2;
+        constexpr auto max = std::numeric_limits<FP>::max() / 2;
+        static_assert(max <= std::numeric_limits<FP>::max() + min);
+        _unidis = std::uniform_real_distribution<FP>(min, max);
+
+        // Piecewise Constant distribution
+        const std::array<double, 7> intervals{ min, min + 1000, -1000.0, 0.0, 1000.0, max - 1000, max };
+        const std::array<double, 7> weights{ 1.0, 0.1, 1.0, 2.0, 1.0, 0.1, 1.0 };
+        _pwcdis = std::piecewise_constant_distribution<FP>(intervals.begin(), intervals.end(), weights.begin());
+    }
+
+    FP getRandomUniformFloat()
+    {
+        return _unidis(_gen);
+    }
+
+    FP getRandomPWCFloat()
+    {
+        return _pwcdis(_gen);
+    }
+
+private:
+    std::mt19937 _gen;
+    std::uniform_real_distribution<FP> _unidis;
+    std::piecewise_constant_distribution<FP> _pwcdis;
+};
+
+bool generateFP32(const TosaReference::GenerateConfig& cfg, void* data, size_t size)
+{
+    const TosaReference::PseudoRandomInfo& prinfo = cfg.pseudoRandomInfo;
+    PseudoRandomGeneratorFloat<float> generator(prinfo.rngSeed);
+
+    float* a     = reinterpret_cast<float*>(data);
+    const auto T = TosaReference::numElementsFromShape(cfg.shape);
+    for (auto t = 0; t < T; ++t)
+    {
+        a[t] = generator.getRandomPWCFloat();
+    }
+    return true;
+}
+
+}    // namespace
+
+namespace TosaReference
+{
+bool generatePseudoRandom(const GenerateConfig& cfg, void* data, size_t size)
+{
+    // Check we support the operator
+    if (cfg.opType == Op::Op_UNKNOWN)
+    {
+        WARNING("[Generator][PR] Unknown operator.");
+        return false;
+    }
+
+    switch (cfg.dataType)
+    {
+        case DType::DType_FP32:
+            return generateFP32(cfg, data, size);
+        default:
+            WARNING("[Generator][PR] Unsupported type.");
+            return false;
+    }
+}
+}    // namespace TosaReference
diff --git a/reference_model/src/generate/generate_pseudo_random.h b/reference_model/src/generate/generate_pseudo_random.h
new file mode 100644
index 0000000..6796d20
--- /dev/null
+++ b/reference_model/src/generate/generate_pseudo_random.h
@@ -0,0 +1,34 @@
+// Copyright (c) 2023, ARM Limited.
+//
+//    Licensed under the Apache License, Version 2.0 (the "License");
+//    you may not use this file except in compliance with the License.
+//    You may obtain a copy of the License at
+//
+//         http://www.apache.org/licenses/LICENSE-2.0
+//
+//    Unless required by applicable law or agreed to in writing, software
+//    distributed under the License is distributed on an "AS IS" BASIS,
+//    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+//    See the License for the specific language governing permissions and
+//    limitations under the License.
+
+#ifndef GENERATE_PSEUDO_RANDOM_H_
+#define GENERATE_PSEUDO_RANDOM_H_
+
+#include "generate_utils.h"
+
+namespace TosaReference
+{
+
+/// \brief Perform pseudo random based generation
+///
+/// \param cfg Generator related meta-data
+/// \param data Buffer to generate the data to
+/// \param size Size of the buffer
+///
+/// \return True on successful generation
+bool generatePseudoRandom(const GenerateConfig& cfg, void* data, size_t size);
+
+};    // namespace TosaReference
+
+#endif    // GENERATE_PSEUDO_RANDOM_H_
diff --git a/reference_model/src/generate/generate_utils.cc b/reference_model/src/generate/generate_utils.cc
index da16632..bcbf9d7 100644
--- a/reference_model/src/generate/generate_utils.cc
+++ b/reference_model/src/generate/generate_utils.cc
@@ -39,6 +39,8 @@
                              {
                                  { Op::Op_UNKNOWN, "UNKNOWN" },
                                  { Op::Op_MATMUL, "MATMUL" },
+                                 { Op::Op_MAX_POOL2D, "MAX_POOL2D" },
+                                 { Op::Op_PAD, "PAD" },
                              })
 
 }    // namespace tosa
@@ -78,6 +80,11 @@
     }
 }
 
+void from_json(const nlohmann::json& j, PseudoRandomInfo& pseudoRandomInfo)
+{
+    j.at("rng_seed").get_to(pseudoRandomInfo.rngSeed);
+}
+
 void from_json(const nlohmann::json& j, GenerateConfig& cfg)
 {
     j.at("data_type").get_to(cfg.dataType);
@@ -90,6 +97,10 @@
     {
         j.at("dot_product_info").get_to(cfg.dotProductInfo);
     }
+    if (j.contains("pseudo_random_info"))
+    {
+        j.at("pseudo_random_info").get_to(cfg.pseudoRandomInfo);
+    }
 }
 
 std::optional<GenerateConfig> parseGenerateConfig(const char* json, const char* tensorName)
diff --git a/reference_model/src/generate/generate_utils.h b/reference_model/src/generate/generate_utils.h
index e8e67bb..0239e98 100644
--- a/reference_model/src/generate/generate_utils.h
+++ b/reference_model/src/generate/generate_utils.h
@@ -55,6 +55,15 @@
     std::array<int32_t, 2> kernel;
 };
 
+/// \brief Pseudo random generator meta-data
+struct PseudoRandomInfo
+{
+    PseudoRandomInfo() = default;
+
+    int64_t rngSeed;
+    // TODO: Add range support
+};
+
 /// \brief Generator configuration
 struct GenerateConfig
 {
@@ -65,6 +74,7 @@
     int32_t inputPos;
     tosa::Op opType;
     DotProductInfo dotProductInfo;
+    PseudoRandomInfo pseudoRandomInfo;
 };
 
 /// \brief Parse the generator config when given in JSON form
diff --git a/reference_model/src/verify/verify_exact.cc b/reference_model/src/verify/verify_exact.cc
index 4d6c72f..36b4ec9 100644
--- a/reference_model/src/verify/verify_exact.cc
+++ b/reference_model/src/verify/verify_exact.cc
@@ -16,6 +16,14 @@
 #include "verifiers.h"
 #include <cmath>
 
+namespace
+{
+bool exact_fp32(const double& referenceValue, const float& implementationValue)
+{
+    return std::isnan(referenceValue) ? std::isnan(implementationValue) : (referenceValue == implementationValue);
+}
+}    // namespace
+
 namespace TosaReference
 {
 
@@ -33,15 +41,14 @@
     switch (implementationTensor->data_type)
     {
         case tosa_datatype_fp32_t: {
-            const auto* refData = reinterpret_cast<const float*>(referenceTensor->data);
+            TOSA_REF_REQUIRE(referenceTensor->data_type == tosa_datatype_fp64_t, "[E] Reference tensor is not fp64");
+            const auto* refData = reinterpret_cast<const double*>(referenceTensor->data);
             TOSA_REF_REQUIRE(refData != nullptr, "[E] Missing data for reference");
             const auto* impData = reinterpret_cast<const float*>(implementationTensor->data);
             TOSA_REF_REQUIRE(impData != nullptr, "[E] Missing data for implementation");
-            return std::equal(refData, std::next(refData, elementCount), impData, std::next(impData, elementCount),
-                              [](const auto& referenceValue, const auto& implementationValue) {
-                                  return std::isnan(referenceValue) ? std::isnan(implementationValue)
-                                                                    : (referenceValue == implementationValue);
-                              });
+            auto result = std::equal(refData, std::next(refData, elementCount), impData,
+                                     std::next(impData, elementCount), exact_fp32);
+            return result;
         }
         default:
             WARNING("[Verifier][E] Data-type not supported.");
diff --git a/reference_model/test/generate_tests.cpp b/reference_model/test/generate_tests.cpp
index 503ecfe..c24a369 100644
--- a/reference_model/test/generate_tests.cpp
+++ b/reference_model/test/generate_tests.cpp
@@ -56,6 +56,24 @@
     }
 }
 
+template <typename T>
+void check_output(const std::vector<T>& results, const std::vector<T>& expected)
+{
+    for (size_t idx = 0; idx < expected.size(); ++idx)
+    {
+        check_value(true, *(uint32_t*)&results[idx], *(uint32_t*)&expected[idx], idx);
+    }
+}
+
+template <typename T>
+void check_not_output(const std::vector<T>& results, const std::vector<T>& expected)
+{
+    for (size_t idx = 0; idx < expected.size(); ++idx)
+    {
+        check_value(false, *(uint32_t*)&results[idx], *(uint32_t*)&expected[idx], idx);
+    }
+}
+
 }    // namespace
 
 TEST_SUITE_BEGIN("generate");
@@ -268,4 +286,65 @@
         matmul_test_FP32(tosaName, tosaElements, templateJsonCfg, "5", 1, expected);
     }
 }
+TEST_CASE("positive - pseudo random")
+{
+    std::string templateJsonCfg = R"({
+        "tensors" : {
+            "input0" : {
+                "generator": "PSEUDO_RANDOM",
+                "data_type": "FP32",
+                "input_type": "VARIABLE",
+                "shape" : [ 12, 3 ],
+                "input_pos": 0,
+                "op" : "PAD",
+                "pseudo_random_info": {
+                    "rng_seed": _SEED0_
+                }
+            },
+            "input1" : {
+                "generator": "PSEUDO_RANDOM",
+                "data_type": "FP32",
+                "input_type": "VARIABLE",
+                "shape" : [ 1, 3 ],
+                "input_pos": 1,
+                "op" : "PAD",
+                "pseudo_random_info": {
+                    "rng_seed": _SEED1_
+                }
+            }
+
+        }
+    })";
+
+    const std::string tosaNameP0 = "input0";
+    const size_t tosaElementsP0  = 12 * 3;
+    const std::string tosaNameP1 = "input1";
+    const size_t tosaElementsP1  = 1 * 3;
+
+    SUBCASE("pad - same rng")
+    {
+        std::string jsonCfg = templateJsonCfg;
+        update_json_template(jsonCfg, "_SEED0_", "0");
+        update_json_template(jsonCfg, "_SEED1_", "0");
+
+        std::vector<float> bufferP0(tosaElementsP0);
+        std::vector<float> bufferP1(tosaElementsP1);
+        REQUIRE(tgd_generate_data(jsonCfg.c_str(), tosaNameP0.c_str(), (void*)bufferP0.data(), tosaElementsP0 * 4));
+        REQUIRE(tgd_generate_data(jsonCfg.c_str(), tosaNameP1.c_str(), (void*)bufferP1.data(), tosaElementsP1 * 4));
+        check_output<float>(bufferP0, bufferP1);
+    }
+
+    SUBCASE("pad - different rng")
+    {
+        std::string jsonCfg = templateJsonCfg;
+        update_json_template(jsonCfg, "_SEED0_", "0");
+        update_json_template(jsonCfg, "_SEED1_", "1000");
+
+        std::vector<float> bufferP0(tosaElementsP0);
+        std::vector<float> bufferP1(tosaElementsP1);
+        REQUIRE(tgd_generate_data(jsonCfg.c_str(), tosaNameP0.c_str(), (void*)bufferP0.data(), tosaElementsP0 * 4));
+        REQUIRE(tgd_generate_data(jsonCfg.c_str(), tosaNameP1.c_str(), (void*)bufferP1.data(), tosaElementsP1 * 4));
+        check_not_output<float>(bufferP0, bufferP1);
+    }
+}
 TEST_SUITE_END();    // generate
diff --git a/reference_model/test/verify_tests.cpp b/reference_model/test/verify_tests.cpp
index 3aa477f..369a8cd 100644
--- a/reference_model/test/verify_tests.cpp
+++ b/reference_model/test/verify_tests.cpp
@@ -75,7 +75,7 @@
 std::enable_if_t<std::is_floating_point_v<FP>, std::add_lvalue_reference_t<std::uniform_real_distribution<FP>>>
     getUniformRealDist()
 {
-    // Uniform real distribution generates real values in the range [a, b)
+    // Uniform real distribution generates real values in the range [a, b]
     // and requires that b - a <= std::numeric_limits<FP>::max() so here
     // we choose some arbitrary values that satisfy that condition.
     constexpr auto min = std::numeric_limits<FP>::lowest() / 2;
@@ -261,13 +261,14 @@
     const auto elementCount = std::accumulate(std::begin(shape), std::end(shape), 1, std::multiplies<>());
 
     // Generate some random floats using the full range of fp32.
-    auto data = generateRandomTensorData<float>(elementCount);
+    auto data_fp32 = generateRandomTensorData<float>(elementCount);
+    std::vector<double> data_fp64(data_fp32.begin(), data_fp32.end());
     SUBCASE("same")
     {
         const auto referenceTensor =
-            TosaTensor("out1", tosa_datatype_fp64_t, shape, reinterpret_cast<uint8_t*>(data.data()));
+            TosaTensor("out1", tosa_datatype_fp64_t, shape, reinterpret_cast<uint8_t*>(data_fp64.data()));
         const auto implementationTensor =
-            TosaTensor("out1", tosa_datatype_fp32_t, shape, reinterpret_cast<uint8_t*>(data.data()));
+            TosaTensor("out1", tosa_datatype_fp32_t, shape, reinterpret_cast<uint8_t*>(data_fp32.data()));
         REQUIRE(tvf_verify_data(referenceTensor.cTensor(), nullptr, implementationTensor.cTensor(), jsonCfg.c_str()));
     }
 
@@ -275,16 +276,16 @@
     {
         // Generate some mismatched tensors by setting every other value to an incrementing counter.
         // In theory this could be the same, but the probability is tiny.
-        auto otherData = std::vector<float>(elementCount);
-        std::generate(std::begin(otherData), std::end(otherData), [&, i = 0]() mutable {
+        auto otherData_fp32 = std::vector<float>(elementCount);
+        std::generate(std::begin(otherData_fp32), std::end(otherData_fp32), [&, i = 0]() mutable {
             auto oldIndex = i++;
-            return oldIndex % 2 ? data[oldIndex] : static_cast<float>(oldIndex);
+            return oldIndex % 2 ? data_fp32[oldIndex] : static_cast<float>(oldIndex);
         });
 
         const auto referenceTensor =
-            TosaTensor("out1", tosa_datatype_fp64_t, shape, reinterpret_cast<uint8_t*>(data.data()));
+            TosaTensor("out1", tosa_datatype_fp64_t, shape, reinterpret_cast<uint8_t*>(data_fp64.data()));
         const auto implementationTensor =
-            TosaTensor("out1", tosa_datatype_fp32_t, shape, reinterpret_cast<uint8_t*>(otherData.data()));
+            TosaTensor("out1", tosa_datatype_fp32_t, shape, reinterpret_cast<uint8_t*>(otherData_fp32.data()));
         REQUIRE_FALSE(
             tvf_verify_data(referenceTensor.cTensor(), nullptr, implementationTensor.cTensor(), jsonCfg.c_str()));
     }
diff --git a/verif/conformance/tosa_main_profile_ops_info.json b/verif/conformance/tosa_main_profile_ops_info.json
index 0b6dc79..9c18879 100644
--- a/verif/conformance/tosa_main_profile_ops_info.json
+++ b/verif/conformance/tosa_main_profile_ops_info.json
@@ -1471,6 +1471,7 @@
         "profile": [
             "tosa-mi"
         ],
+        "support_for": [ "lazy_data_gen" ],
         "generation": {
             "standard": {
                 "generator_args": [
@@ -1599,6 +1600,7 @@
         "profile": [
             "tosa-mi"
         ],
+        "support_for": [ "lazy_data_gen" ],
         "generation": {
             "standard": {
                 "generator_args": [
diff --git a/verif/generator/tosa_arg_gen.py b/verif/generator/tosa_arg_gen.py
index 475f062..f7837a0 100644
--- a/verif/generator/tosa_arg_gen.py
+++ b/verif/generator/tosa_arg_gen.py
@@ -635,7 +635,11 @@
         # Variable inputs versus constants
         pCount, cCount = testGen.TOSA_OP_LIST[opName]["operands"]
 
-        if error_name is not None or not gtu.dtypeIsSupportedByCompliance(dtypeList[0]):
+        if (
+            error_name is not None
+            or not gtu.dtypeIsSupportedByCompliance(dtypeList[0])
+            or opName in ("avg_pool2d",)
+        ):
             # Fall back to original path when dealing with unsupported types
 
             # First turn off lazy data gen so we always produce data
@@ -678,7 +682,7 @@
             if dg_type == gtu.DataGenType.PSEUDO_RANDOM:
                 info = {}
                 # TODO - generate seed for this generator based on test
-                info["rng_seed"] = -1
+                info["rng_seed"] = 42
                 info["range"] = [
                     str(v)
                     for v in testGen.getDTypeRange(dtypeList[idx], high_inclusive=True)
@@ -1107,7 +1111,7 @@
         pass
 
     @staticmethod
-    def _add_data_generators(testGen, opName, dtype, arg_list, error_name, **kwargs):
+    def _add_data_generators(testGen, opName, dtype, arg_list, error_name):
         """Add extra tests for each type of data generator for this op."""
         if (
             error_name is None
@@ -1125,32 +1129,28 @@
         # Expand arg list with other data generator types
         new_arg_list = []
         for dg_type in dataGenTypesList:
-            for arg_str, arg_attrs in arg_list:
-                arg_dict = arg_attrs[0]
-                arg_dict["dg_type"] = dg_type
-
+            for arg_str, args_dict in arg_list:
+                args_dict["dg_type"] = dg_type
                 if dg_type == gtu.DataGenType.PSEUDO_RANDOM:
                     # Default test
-                    new_arg_list.append((arg_str, [arg_dict]))
+                    new_arg_list.append((arg_str, args_dict))
 
                 elif dg_type == gtu.DataGenType.DOT_PRODUCT:
                     # Extra tests for each dot product test set
-                    dot_products = kwargs["dot_products"]
+                    dot_products = args_dict["dot_products"]
                     if dot_products < testGen.TOSA_MI_DOT_PRODUCT_MIN:
                         print(
                             f"Skipping {opName} dot product test as too few calculations {dot_products} < {testGen.TOSA_MI_DOT_PRODUCT_MIN}"
                         )
                         continue
-                    arg_dict["ks"] = kwargs["ks"]
-                    for key in gtu.DG_DOT_PRODUCT_OPTIONAL_INFO:
-                        if key in kwargs:
-                            arg_dict[key] = kwargs[key]
+                    # KS is required by all dot product generators
+                    assert "ks" in args_dict
 
                     for s in testGen.TOSA_MI_DOT_PRODUCT_TEST_SETS:
                         new_arg_str = f"{arg_str}_s{s}"
-                        new_arg_dict = arg_dict.copy()
-                        new_arg_dict["s"] = s
-                        new_arg_list.append((new_arg_str, [new_arg_dict]))
+                        new_args_dict = args_dict.copy()
+                        new_args_dict["s"] = s
+                        new_arg_list.append((new_arg_str, new_args_dict))
 
         return new_arg_list
 
@@ -1421,9 +1421,21 @@
             # Pick some potentially correct output dtype if input type is incorrect
             accum_dtypes = [DType.INT32]
 
-        arg_list = [
-            (f"acc{testGen.typeStr(a)}", [{"acc_type": a}]) for a in accum_dtypes
-        ]
+        # Set up compliance info
+        args_dict = {
+            "ks": int(shapeList[0][2]),  # Set KS = C, from input A (N,H,C)
+            # Set dot_products = N*H*W
+            "dot_products": gtu.product(
+                (shapeList[0][0], shapeList[0][1], shapeList[1][2])
+            ),
+        }
+
+        # Create arg tuple of string and dict
+        arg_list = []
+        for a in accum_dtypes:
+            d = args_dict.copy()
+            d["acc_type"] = a
+            arg_list.append((f"acc{testGen.typeStr(a)}", d))
 
         arg_list = TosaArgGen._add_data_generators(
             testGen,
@@ -1431,12 +1443,8 @@
             dtype,
             arg_list,
             error_name,
-            ks=int(shapeList[0][2]),  # Set KS = C, from input A (N,H,C)
-            # Set dot_products = N*H*W
-            dot_products=gtu.product(
-                (shapeList[0][0], shapeList[0][1], shapeList[1][2])
-            ),
         )
+        # Return list of tuples: (arg_str, args_dict)
         return arg_list
 
     @staticmethod
@@ -1574,7 +1582,6 @@
 
     @staticmethod
     def agPad(testGen, opName, shapeList, dtype, error_name=None):
-        arg_list = []
         rank = len(shapeList[0])
 
         # Exhaustively test combinations of padding on each side of each dimension
@@ -1606,6 +1613,8 @@
         else:
             sparsity = 1
 
+        # Build arg list
+        arg_list = []
         for n, paddings in enumerate(list_shape_pad_values):
             paddings = list(paddings)
             args_valid = True
@@ -1625,13 +1634,25 @@
                 for r in range(rank):
                     before, after = paddings[r]
                     name = f"{name}{before}{after}"
-                arg_list.append(
-                    (name, [np.array(paddings), pad_const_int, pad_const_fp])
-                )
+                    args_dict = {
+                        "pad": np.array(paddings),
+                        "pad_const_int": pad_const_int,
+                        "pad_const_fp": pad_const_fp,
+                    }
+                arg_list.append((name, args_dict))
 
         if error_name == ErrorIf.PadSmallerZero and len(arg_list) == 0:
             warnings.warn(f"No ErrorIf test created for input shape: {shapeList[0]}")
 
+        arg_list = TosaArgGen._add_data_generators(
+            testGen,
+            opName,
+            dtype,
+            arg_list,
+            error_name,
+        )
+
+        # Return list of tuples: (arg_str, args_dict)
         return arg_list
 
     @staticmethod
@@ -1735,9 +1756,9 @@
             else "st{}_kern{}_pad{}"
         )
 
-        def get_arg_list_element(accum, stride, pad, kern):
+        def get_arg_list_element(accum, stride, pad, kern, dot_products=0):
             # Return tuple containing the formatted argument string and
-            # the corresponding argument values
+            # the corresponding argument values in a dictionary
 
             # Support for larger values than 9 needs different delimiter
             delim = "" if max(stride + kern + pad) <= 9 else "x"
@@ -1746,13 +1767,18 @@
                 delim.join([str(x) for x in kern]),
                 delim.join([str(x) for x in pad]),
             ]
-            # Note: different order to string
-            arg_val_elems = [stride, pad, kern]
+            args_dict = {
+                "stride": stride,
+                "pad": pad,
+                "kernel": kern,
+                "dot_products": dot_products,  # Ignored for error tests
+                "ks": gtu.product(kern),  # avg_pool2d: KS = KX*KY
+            }
 
             if accum is not None:
                 arg_str_elems.insert(0, testGen.typeStr(accum))
-                arg_val_elems.insert(0, accum)
-            return (arg_str.format(*arg_str_elems), arg_val_elems)
+                args_dict["acc_type"] = accum
+            return (arg_str.format(*arg_str_elems), args_dict)
 
         n = 0
         for a in accum_dtypes:
@@ -1769,8 +1795,9 @@
                                 testGen, error_name, s, p, k
                             )
                             if None not in [sNew, pNew, kNew] and n % sparsity == 0:
-                                arg_vals = [a, sNew, pNew, kNew]
-                                arg_list.append(get_arg_list_element(*arg_vals))
+                                arg_list.append(
+                                    get_arg_list_element(a, sNew, pNew, kNew)
+                                )
                         elif (
                             n % sparsity == 0
                             # padding must not exceed the kernel size
@@ -1804,10 +1831,23 @@
                                 ):
                                     # Test will consume too much memory - skip it
                                     continue
-                                arg_vals = [a, s, p, k]
-                                arg_list.append(get_arg_list_element(*arg_vals))
+                                # Dot products = N*OH*OW*C
+                                dp = gtu.product(
+                                    (shape[0], output_h, output_w, shape[3])
+                                )
+                                arg_list.append(get_arg_list_element(a, s, p, k, dp))
                         n += 1
 
+        # Now add data generator types
+        arg_list = TosaArgGen._add_data_generators(
+            testGen,
+            opName,
+            dtype,
+            arg_list,
+            error_name,
+        )
+
+        # Return list of tuples: (arg_str, args_dict)
         return arg_list
 
     @staticmethod
diff --git a/verif/generator/tosa_error_if.py b/verif/generator/tosa_error_if.py
index d490cf2..ed1a941 100644
--- a/verif/generator/tosa_error_if.py
+++ b/verif/generator/tosa_error_if.py
@@ -2653,16 +2653,28 @@
 
         args = kwargs["args"]
 
-        # Skip accum_dtype arg (apart from MaxPool2D that doesn't have one)
-        stride_idx, pad_idx = (1, 2) if opName != "max_pool2d" else (0, 1)
+        if isinstance(args, dict):
+            args_dict = args
+        else:
+            # Create args_dict from list elements
+            # TODO - Remove this once all NWHC operators agFunctions have been
+            # converted to args_dict output
+
+            # Skip accum_dtype arg (apart from MaxPool2D that doesn't have one)
+            stride_idx, pad_idx = (1, 2) if opName != "max_pool2d" else (0, 1)
+            args_dict = {"stride": args[stride_idx], "pad": args[pad_idx]}
+            # Alias different info for each op
+            args_dict["kernel"] = args[pad_idx + 1]
+            args_dict["out_shape"] = args[pad_idx + 1]
+            args_dict["dilation"] = args[pad_idx + 1]
 
         # Common info for all ops
-        strides = args[stride_idx]
-        padding = args[pad_idx]
+        strides = args_dict["stride"]
+        padding = args_dict["pad"]
 
         if opName.endswith("pool2d"):
             # avg_pool2d, max_pool2d
-            kernel_shape = args[pad_idx + 1]
+            kernel_shape = args_dict["kernel"]
             h = (
                 input_shape[1] + padding[0] + padding[1] + strides[0] - kernel_shape[0]
             ) // strides[0]
@@ -2674,7 +2686,7 @@
 
         if opName.startswith("transpose_conv2d"):
             # transpose_conv2d
-            output_shape = args[pad_idx + 1]
+            output_shape = args_dict["out_shape"]
             filter_shape = inputShapes[1]
             kernel_shape = filter_shape[1:-1]
 
@@ -2703,7 +2715,7 @@
 
         if "conv2d" in opName or "conv3d" in opName:
             # conv2d, conv3d, depthwise_conv2d
-            dilations = args[pad_idx + 1]
+            dilations = args_dict["dilation"]
             filter_shape = inputShapes[1]
             kernel_shape = (
                 filter_shape[0:2]
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index 8fcea29..17cbd8f 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -658,15 +658,22 @@
     def build_pool2d(
         self,
         op,
-        input,
-        accum_dtype,
-        stride,
-        pad,
-        kernel,
+        inputs,
+        args_dict,
         validator_fcns=None,
         error_name=None,
         qinfo=None,
     ):
+        assert len(inputs) == 1
+        input = inputs[0]
+        # max_pool has no accum_dtype
+        accum_dtype = (
+            args_dict["acc_type"] if "acc_type" in args_dict else DType.UNKNOWN
+        )
+        stride = args_dict["stride"]
+        pad = args_dict["pad"]
+        kernel = args_dict["kernel"]
+
         result_tens = OutputShaper.pool2dOp(
             self.ser, self.rng, input, kernel, stride, pad, error_name
         )
@@ -720,27 +727,28 @@
     def build_maxpool2d(
         self,
         op,
-        input,
-        stride,
-        pad,
-        kernel,
+        inputs,
+        args_dict,
         validator_fcns=None,
         error_name=None,
         qinfo=None,
     ):
-        # Same as build_pool2d but manually sets accum_dtype value
-        # (maxpool has no accum_dtype)
-        return self.build_pool2d(
+        result_tensor = self.build_pool2d(
             op,
-            input,
-            DType.UNKNOWN,
-            stride,
-            pad,
-            kernel,
+            inputs,
+            args_dict,
             validator_fcns,
             error_name,
             qinfo,
         )
+        if gtu.dtypeIsSupportedByCompliance(inputs[0].dtype):
+            compliance = self.tensorComplianceMetaData(
+                op, args_dict, result_tensor, error_name
+            )
+        else:
+            compliance = None
+
+        return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_conv2d(
         self,
@@ -1070,8 +1078,10 @@
         return result_tens
 
     def build_matmul(
-        self, op, a, b, args_dict, validator_fcns=None, error_name=None, qinfo=None
+        self, op, inputs, args_dict, validator_fcns=None, error_name=None, qinfo=None
     ):
+        assert len(inputs) == 2
+        a, b = inputs
         accum_dtype = args_dict["acc_type"]
         result_tensor = OutputShaper.matmulOp(
             self.ser, self.rng, a, b, accum_dtype, error_name
@@ -1372,15 +1382,19 @@
     def build_pad(
         self,
         op,
-        a,
-        padding,
-        pad_const_int,
-        pad_const_float,
+        inputs,
+        args_dict,
         validator_fcns=None,
         error_name=None,
         qinfo=None,
     ):
-        result_tens = OutputShaper.padOp(self.ser, self.rng, a, padding, error_name)
+        assert len(inputs) == 1
+        a = inputs[0]
+        padding = args_dict["pad"]
+        pad_const_int = args_dict["pad_const_int"]
+        pad_const_float = args_dict["pad_const_fp"]
+
+        result_tensor = OutputShaper.padOp(self.ser, self.rng, a, padding, error_name)
 
         attr = ts.TosaSerializerAttribute()
         attr.PadAttribute(
@@ -1389,7 +1403,7 @@
 
         # Invalidate Input/Output list for error if checks.
         input_list = [a.name]
-        output_list = [result_tens.name]
+        output_list = [result_tensor.name]
         pCount, cCount = op["operands"]
         num_operands = pCount + cCount
         input_list, output_list = TosaErrorIfArgGen.eiInvalidateInputOutputList(
@@ -1402,12 +1416,12 @@
             error_name,
             op=op,
             input_shape=a.shape,
-            output_shape=result_tens.shape,
+            output_shape=result_tensor.shape,
             input_dtype=a.dtype,
-            output_dtype=result_tens.dtype,
+            output_dtype=result_tensor.dtype,
             pad=padding,
             qinfo=qinfo,
-            result_tensors=[result_tens],
+            result_tensors=[result_tensor],
             input_list=input_list,
             output_list=output_list,
             num_operands=num_operands,
@@ -1416,7 +1430,15 @@
             return None
 
         self.ser.addOperator(op["op"], input_list, output_list, attr)
-        return result_tens
+
+        if gtu.dtypeIsSupportedByCompliance(a.dtype):
+            compliance = self.tensorComplianceMetaData(
+                op, args_dict, result_tensor, error_name
+            )
+        else:
+            compliance = None
+
+        return TosaTestGen.BuildInfo(result_tensor, compliance)
 
     def build_dim(
         self,
@@ -2609,8 +2631,9 @@
         tensMeta = {}
 
         # Check we are using the new testArgs interface with an argsDict dictionary
-        if len(testArgs) == 1 and isinstance(testArgs[0], dict):
-            argsDict = testArgs[0]
+        if isinstance(testArgs, dict):
+            # New interface with args info in dictionary
+            argsDict = testArgs
             assert "dg_type" in argsDict
             tvgInfo = tvgen_fcn(
                 self, opName, dtypeList, shapeList, argsDict, error_name
@@ -2618,38 +2641,49 @@
             if tvgInfo.dataGenDict:
                 tensMeta["data_gen"] = tvgInfo.dataGenDict
             tens = tvgInfo.tensorList
+
+            result = build_fcn(
+                self,
+                op,
+                tens,
+                argsDict,
+                validator_fcns=error_if_validators,
+                error_name=error_name,
+                qinfo=qinfo,
+            )
         else:
+            # Old interface with args info in a list
             tens = tvgen_fcn(self, op, dtypeList, shapeList, testArgs, error_name)
 
-        try:
-            if error_if_validators is None:
-                if qinfo is not None:
-                    result = build_fcn(self, op, *tens, *testArgs, qinfo)
+            try:
+                if error_if_validators is None:
+                    if qinfo is not None:
+                        result = build_fcn(self, op, *tens, *testArgs, qinfo)
+                    else:
+                        result = build_fcn(self, op, *tens, *testArgs)
                 else:
-                    result = build_fcn(self, op, *tens, *testArgs)
-            else:
-                if qinfo is not None:
-                    result = build_fcn(
-                        self,
-                        op,
-                        *tens,
-                        *testArgs,
-                        validator_fcns=error_if_validators,
-                        error_name=error_name,
-                        qinfo=qinfo,
-                    )
-                else:
-                    result = build_fcn(
-                        self,
-                        op,
-                        *tens,
-                        *testArgs,
-                        validator_fcns=error_if_validators,
-                        error_name=error_name,
-                    )
-        except TypeError as e:
-            print(f"build_fcn: {build_fcn}\nTensors: {tens}\nArgs: {testArgs}\n")
-            raise e
+                    if qinfo is not None:
+                        result = build_fcn(
+                            self,
+                            op,
+                            *tens,
+                            *testArgs,
+                            validator_fcns=error_if_validators,
+                            error_name=error_name,
+                            qinfo=qinfo,
+                        )
+                    else:
+                        result = build_fcn(
+                            self,
+                            op,
+                            *tens,
+                            *testArgs,
+                            validator_fcns=error_if_validators,
+                            error_name=error_name,
+                        )
+            except TypeError as e:
+                print(f"build_fcn: {build_fcn}\nTensors: {tens}\nArgs: {testArgs}\n")
+                raise e
 
         if result:
             # The test is valid, serialize it
@@ -2847,7 +2881,7 @@
             "build_fcn": (
                 build_pool2d,
                 TosaTensorGen.tgNHWC,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agPooling,
             ),
             "qgen": TosaQuantGen.qgUnary,
@@ -3004,7 +3038,6 @@
             ),
             "data_gen": {
                 "fp": (gtu.DataGenType.DOT_PRODUCT,),
-                "int": (gtu.DataGenType.PSEUDO_RANDOM,),
             },
         },
         "max_pool2d": {
@@ -3014,7 +3047,7 @@
             "build_fcn": (
                 build_maxpool2d,
                 TosaTensorGen.tgNHWC,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agPooling,
             ),
             "types": TYPE_NARROW_INT_FP,
@@ -3032,6 +3065,9 @@
                 TosaErrorValidator.evPoolingOutputShapeMismatch,
                 TosaErrorValidator.evPoolingOutputShapeNonInteger,
             ),
+            "data_gen": {
+                "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+            },
         },
         # Templated operator.  Filled in by createDynamicOpLists
         "transpose_conv2d_TEMPLATE": {
@@ -3909,7 +3945,7 @@
             "build_fcn": (
                 build_pad,
                 TosaTensorGen.tgBasic,
-                TosaTensorValuesGen.tvgDefault,
+                TosaTensorValuesGen.tvgLazyGenDefault,
                 TosaArgGen.agPad,
             ),
             "types": TYPE_FIB,
@@ -3923,6 +3959,9 @@
                 TosaErrorValidator.evRankMismatch,
                 TosaErrorValidator.evWrongRank,
             ),
+            "data_gen": {
+                "fp": (gtu.DataGenType.PSEUDO_RANDOM,),
+            },
         },
         "dim": {
             "op": Op.DIM,