Add CONST testing to Numpy refmodel tests

Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
Change-Id: Id32f3b7c287c9e8b0fd42dd96922be5a255598e2
diff --git a/verif/generator/tosa_test_gen.py b/verif/generator/tosa_test_gen.py
index fe05b57..b76b656 100644
--- a/verif/generator/tosa_test_gen.py
+++ b/verif/generator/tosa_test_gen.py
@@ -40,7 +40,7 @@
 
         fullPath = os.path.join(self.basePath, self.testPath)
         os.makedirs(fullPath, exist_ok=True)
-        self.ser = ts.TosaSerializer(fullPath)
+        self.ser = ts.TosaSerializer(fullPath, saveConstsToFile=self.args.dump_consts)
 
     def getSerializer(self):
         return self.ser
diff --git a/verif/generator/tosa_verif_build_tests.py b/verif/generator/tosa_verif_build_tests.py
index fee551b..2fafacb 100644
--- a/verif/generator/tosa_verif_build_tests.py
+++ b/verif/generator/tosa_verif_build_tests.py
@@ -192,6 +192,13 @@
         help="set a particular zero point for all valid positive tests",
     )
 
+    parser.add_argument(
+        "--dump-const-tensors",
+        dest="dump_consts",
+        action="store_true",
+        help="output const tensors as numpy files for inspection",
+    )
+
     args = parser.parse_args(argv)
 
     return args
diff --git a/verif/tests/test_tosa_refmodel.py b/verif/tests/test_tosa_refmodel.py
index 46c197a..b2f33dd 100644
--- a/verif/tests/test_tosa_refmodel.py
+++ b/verif/tests/test_tosa_refmodel.py
@@ -2,6 +2,7 @@
 # Copyright (c) 2022, ARM Limited.
 # SPDX-License-Identifier: Apache-2.0
 import json
+import re
 from pathlib import Path
 from shutil import rmtree
 
@@ -25,6 +26,7 @@
 
 # Default tensor shape information
 SHAPE_LIST = ["10", "5"]
+SHAPE_DIMS = len(SHAPE_LIST)
 SHAPE_ARG = ",".join(SHAPE_LIST)
 SHAPE_OUT = "x".join(SHAPE_LIST)
 
@@ -32,11 +34,13 @@
 OUTPUT_DIR_PREFIX = "_pytest_vtest"
 OUTPUT_OFM_FILE = "result_refmodel_pytest.npy"
 OUTPUT_RESULT_FILE = "result_numpy_pytest.npy"
+OUTPUT_CONST_GLOB = "const-*.npy"
 
 TEST_DESC_FILENAME = "desc.json"
 
 # Conversion from refmodel type into the type abbreviation used in the test output
 REF_MODEL_TYPE_TO_OUT = {
+    "bool": "b",
     "int8": "i8",
     "uint8": "u8",
     "int16": "i16",
@@ -54,11 +58,12 @@
 class BuildTosaTest:
     """Wrapper for managing lifecycle of TOSA unit tests."""
 
-    def __init__(self, op_name, ref_model_type):
+    def __init__(self, op_name, ref_model_type, num_expected_tests):
         self.op_name = op_name
         self.ref_model_type = ref_model_type
+        self.num_expected_tests = num_expected_tests
         self.output_dir = None
-        self.test_dir = None
+        self.test_dirs = None
 
     def create_test(self):
         """Helper to generate a TOSA unit test."""
@@ -71,7 +76,7 @@
             / f"{OUTPUT_DIR_PREFIX}_{self.op_name}_{self.ref_model_type}"
         )
 
-        # Generate test without any zero-point
+        # Generate tests without any zero-point
         build_args = [
             "--filter",
             self.op_name,
@@ -81,22 +86,26 @@
             self.ref_model_type,
             "--zero-point",
             "0",
+            "--num-const-inputs-concat",
+            "1",
+            "--dump-const-tensors",
             "-o",
             str(self.output_dir),
         ]
-        print(build_args)
+        print(f"### Building tests: tosa_verif_build_tests {' '.join(build_args)}")
         tosa_builder(build_args)
 
         # Find the created test
         test_dir = self.output_dir / self.op_name
         # Can't assume exact name due to broadcasting and other changes to shape
-        test_glob = f"{self.op_name}_*_{REF_MODEL_TYPE_TO_OUT[self.ref_model_type]}"
-        tests = sorted(test_dir.glob(test_glob))
-        assert len(tests) == 1
-        assert tests[0].is_dir()
-        self.test_dir = tests[0]
+        test_glob = f"{self.op_name}_*_{REF_MODEL_TYPE_TO_OUT[self.ref_model_type]}*"
+        test_dirs = sorted(test_dir.glob(test_glob))
+        assert len(test_dirs) == self.num_expected_tests
+        for test_dir in test_dirs:
+            assert test_dir.is_dir()
+        self.test_dirs = test_dirs
 
-        return self.test_dir
+        return self.test_dirs
 
     def remove_test(self):
         if self.output_dir is not None and self.output_dir.is_dir():
@@ -110,30 +119,36 @@
                 print(f"Skipped clean up of {test_tree}")
 
 
-# Tests - op_name, ref_model_type
+# Tests - op_name, ref_model_type, num_expected_tests
 TEST_PARAMS = [
-    ("add", "int32"),
-    ("add", "float"),
-    ("abs", "int32"),
-    ("abs", "float"),
-    ("negate", "int8"),
-    ("negate", "int16"),
-    ("negate", "int32"),
-    ("negate", "float"),
+    ("add", "int32", 1),
+    ("add", "float", 1),
+    ("abs", "int32", 1),
+    ("abs", "float", 1),
+    ("negate", "int8", 1),
+    ("negate", "int16", 1),
+    ("negate", "int32", 1),
+    ("negate", "float", 1),
+    # One test per axis (shape dimensions)
+    ("concat", "bool", SHAPE_DIMS),
+    ("concat", "int8", SHAPE_DIMS),
+    ("concat", "int16", SHAPE_DIMS),
+    ("concat", "int32", SHAPE_DIMS),
+    ("concat", "float", SHAPE_DIMS),
 ]
 
 
 def id_2_name(id):
     """Convert test id to name - otherwise it will be tosaTestN."""
-    op_name, ref_model_type = id
+    op_name, ref_model_type, _ = id
     return f"{op_name}-{ref_model_type}"
 
 
 @pytest.fixture(params=TEST_PARAMS, ids=id_2_name)
 def tosaTest(request):
     """Fixture to generate the required test params and clean up."""
-    op_name, ref_model_type = request.param
-    tst = BuildTosaTest(op_name, ref_model_type)
+    op_name, ref_model_type, num_expected_tests = request.param
+    tst = BuildTosaTest(op_name, ref_model_type, num_expected_tests)
     yield tst
     tst.remove_test()
 
@@ -143,58 +158,73 @@
     """Operator testing versus Numpy."""
     op_name = tosaTest.op_name
 
-    # Generate a TOSA test
-    test_dir = tosaTest.create_test()
+    # Generate TOSA test(s) (mostly should be single test)
+    test_dirs = tosaTest.create_test()
 
-    # Run ref model
-    desc_file = test_dir / TEST_DESC_FILENAME
-    assert desc_file.is_file()
-    refmodel_cmd = [
-        str(REF_MODEL),
-        "--test_desc",
-        str(desc_file),
-        "--ofm_file",
-        OUTPUT_OFM_FILE,
-    ]
-    try:
-        run_sh_command(refmodel_cmd, verbose=True, capture_output=True)
-    except RunShCommandError as err:
-        assert False, f"Unexpected exception {err}"
+    for test_dir in test_dirs:
+        # Run ref model
+        desc_file = test_dir / TEST_DESC_FILENAME
+        assert desc_file.is_file()
+        refmodel_cmd = [
+            str(REF_MODEL),
+            "--test_desc",
+            str(desc_file),
+            "--ofm_file",
+            OUTPUT_OFM_FILE,
+        ]
+        try:
+            run_sh_command(refmodel_cmd, verbose=True, capture_output=True)
+        except RunShCommandError as err:
+            assert False, f"Unexpected exception {err}"
 
-    # Find output
-    ofm_file = test_dir / OUTPUT_OFM_FILE
-    assert ofm_file.is_file()
+        # Find output
+        ofm_file = test_dir / OUTPUT_OFM_FILE
+        assert ofm_file.is_file()
 
-    # Load inputs for Numpy
-    with desc_file.open("r") as fp:
-        test_desc = json.load(fp)
-    tensors = []
-    assert "ifm_file" in test_desc
-    for input_name in test_desc["ifm_file"]:
-        input_file = test_dir / input_name
-        assert input_file.is_file()
-        tensors.append(np.load(str(input_file)))
+        # Load inputs for Numpy
+        with desc_file.open("r") as fp:
+            test_desc = json.load(fp)
+        tensors = []
+        assert "ifm_file" in test_desc
+        for input_name in test_desc["ifm_file"]:
+            input_file = test_dir / input_name
+            assert input_file.is_file()
+            tensors.append(np.load(str(input_file)))
 
-    # Perform Numpy operation
-    if op_name == "abs":
-        assert len(tensors) == 1
-        result = np.abs(tensors[0])
-    elif op_name == "add":
-        assert len(tensors) == 2
-        result = np.add(tensors[0], tensors[1])
-    elif op_name == "negate":
-        assert len(tensors) == 1
-        result = np.negative(tensors[0])
-    else:
-        assert False, f"Unknown operation {op_name}"
+        # Load constants for Numpy
+        const_files = sorted(test_dir.glob(OUTPUT_CONST_GLOB))
+        consts = []
+        for const_file in const_files:
+            assert const_file.is_file()
+            consts.append(np.load(str(const_file)))
 
-    # Save Numpy result
-    result_file = test_dir / OUTPUT_RESULT_FILE
-    np.save(str(result_file), result)
-    assert result_file.is_file()
+        # Perform Numpy operation
+        if op_name == "abs":
+            assert len(tensors) == 1
+            result = np.abs(tensors[0])
+        elif op_name == "add":
+            assert len(tensors) == 2
+            result = np.add(tensors[0], tensors[1])
+        elif op_name == "concat":
+            assert len(consts) == 1
+            # Get axis from test directory name
+            match = re.search(r"axis([0-9]+)", test_dir.name)
+            assert match is not None
+            axis = int(match.group(1))
+            result = np.concatenate((*tensors, consts[0]), axis=axis)
+        elif op_name == "negate":
+            assert len(tensors) == 1
+            result = np.negative(tensors[0])
+        else:
+            assert False, f"Unknown operation {op_name}"
 
-    # Check Numpy result versus refmodel
-    check_result, tolerance, msg = tosa_check(
-        str(result_file), str(ofm_file), test_name=test_dir.name
-    )
-    assert check_result == TosaResult.PASS
+        # Save Numpy result
+        result_file = test_dir / OUTPUT_RESULT_FILE
+        np.save(str(result_file), result)
+        assert result_file.is_file()
+
+        # Check Numpy result versus refmodel
+        check_result, tolerance, msg = tosa_check(
+            str(result_file), str(ofm_file), test_name=test_dir.name
+        )
+        assert check_result == TosaResult.PASS