Support for compliance checking testing

Updated to conformance generator to not generate tests with results for
compliance tests.
Updated test runner to run compliance mode version (precise & abs mode)
of reference model to create test results to use against SUT results.
Updated reference model to enable abs_mode on correct desc.json flags.
Updated test checker to support compliance checking using verifier lib.
Seperated color printing from test checker.

Change-Id: I7e2fbfc6883916caa5d94d4ece122c48bf45f530
Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
diff --git a/verif/runner/tosa_test_runner.py b/verif/runner/tosa_test_runner.py
index 579dd60..30a7168 100644
--- a/verif/runner/tosa_test_runner.py
+++ b/verif/runner/tosa_test_runner.py
@@ -4,11 +4,43 @@
 import json
 from enum import IntEnum
 
-from checker.tosa_result_checker import LogColors
-from checker.tosa_result_checker import print_color
-from checker.tosa_result_checker import set_print_in_color
+import conformance.model_files as cmf
+import schemavalidation.schemavalidation as sch
+from checker.color_print import LogColors
+from checker.color_print import print_color
+from checker.color_print import set_print_in_color
+from checker.tosa_result_checker import set_print_result
 from checker.tosa_result_checker import test_check
 from json2fbbin import json2fbbin
+from runner.tosa_test_presets import TOSA_REFCOMPLIANCE_RUNNER
+
+
+def isComplianceModeDotProduct(testDesc):
+    """Checks the test descriptor for DOT_PRODUCT compliance mode."""
+    if (
+        "meta" in testDesc
+        and "compliance" in testDesc["meta"]
+        and "tensors" in testDesc["meta"]["compliance"]
+    ):
+        for _, t in testDesc["meta"]["compliance"]["tensors"].items():
+            if "mode" in t and t["mode"] == "DOT_PRODUCT":
+                return True
+        return False
+
+
+def getRunnerResultFilePath(resultFilePath, sutModule):
+    """Return the result file path with the runner specific naming."""
+    return resultFilePath.with_suffix(f".{sutModule}{resultFilePath.suffix}")
+
+
+def getBoundsResultFilePath(resultFilePath, sutModule=None):
+    """Return the bounds result file with/without runner specific naming."""
+    boundsFilePath = resultFilePath.parent / f"bounds_{resultFilePath.name}"
+    if sutModule is not None:
+        boundsFilePath = boundsFilePath.with_suffix(
+            f".{sutModule}{boundsFilePath.suffix}"
+        )
+    return boundsFilePath
 
 
 class TosaTestInvalid(Exception):
@@ -39,8 +71,13 @@
         self.testDir = str(testDirPath)
         self.testDirPath = testDirPath
         self.testName = self.testDirPath.name
+        self.verify_lib_path = cmf.find_tosa_file(
+            cmf.TosaFileType.VERIFY_LIBRARY, args.ref_model_path
+        )
 
         set_print_in_color(not args.no_color)
+        # Stop the result checker printing anything - we will do it
+        set_print_result(False)
 
         # Check if we want to run binary and if its already converted
         descFilePath = testDirPath / "desc.json"
@@ -53,6 +90,8 @@
             # Load the json test file
             with descFilePath.open("r") as fd:
                 self.testDesc = json.load(fd)
+            # Validate the json with the schema
+            sch.TestDescSchemaValidator().validate_config(self.testDesc)
         except Exception as e:
             raise TosaTestInvalid(str(descFilePath), e)
 
@@ -76,6 +115,16 @@
         self.descFile = str(descFilePath)
         self.descFilePath = descFilePath
 
+        # Check for compliance mode - need to run refmodel to get results
+        if "meta" in self.testDesc and "compliance" in self.testDesc["meta"]:
+            self.complianceMode = True
+            if "expected_result" in self.testDesc:
+                if self.args.verbose:
+                    print("Warning: fixing conflicting compliance mode in test.desc")
+                self.testDesc.pop("expected_result")
+        else:
+            self.complianceMode = False
+
     def skipTest(self):
         """Check if the test is skipped due to test type or profile selection."""
         expectedFailure = self.testDesc["expected_failure"]
@@ -96,7 +145,9 @@
     def testResult(self, tosaGraphResult, graphMessage=None):
         """Work out test result based on graph result and output files."""
         expectedFailure = self.testDesc["expected_failure"]
-        print_result_line = True
+        print_check_result = False
+
+        sutModule = self.__module__
 
         if tosaGraphResult == TosaTestRunner.TosaGraphResult.TOSA_VALID:
             if expectedFailure:
@@ -107,8 +158,25 @@
                 # but overriding this with any failures found
                 result = TosaTestRunner.Result.EXPECTED_PASS
                 messages = []
+
+                # Go through each output result checking it
                 for resultNum, resultFileName in enumerate(self.testDesc["ofm_file"]):
-                    if "expected_result_file" in self.testDesc:
+                    resultFilePath = self.testDirPath / resultFileName
+
+                    # Work out the file to check against (if any)
+                    if self.complianceMode and sutModule != TOSA_REFCOMPLIANCE_RUNNER:
+                        conformanceFilePath = getRunnerResultFilePath(
+                            resultFilePath, TOSA_REFCOMPLIANCE_RUNNER
+                        )
+                        if isComplianceModeDotProduct(self.testDesc):
+                            conformanceBoundsPath = getBoundsResultFilePath(
+                                resultFilePath, TOSA_REFCOMPLIANCE_RUNNER
+                            )
+                        else:
+                            # Not expecting a bounds file for this test
+                            conformanceBoundsPath = None
+                    elif "expected_result_file" in self.testDesc:
+                        conformanceBoundsPath = None
                         try:
                             conformanceFilePath = (
                                 self.testDirPath
@@ -123,15 +191,20 @@
                             print(msg)
                             break
                     else:
+                        # Nothing to check against
                         conformanceFilePath = None
-                    resultFilePath = self.testDirPath / resultFileName
+                        conformanceBoundsPath = None
 
                     if conformanceFilePath:
-                        print_result_line = False  # Checker will print one for us
+                        print_check_result = True  # Result from checker
                         chkResult, tolerance, msg = test_check(
                             conformanceFilePath,
                             resultFilePath,
                             test_name=self.testName,
+                            test_desc=self.testDesc,
+                            bnd_result_path=conformanceBoundsPath,
+                            ofm_name=self.testDesc["ofm_name"][resultNum],
+                            verify_lib_path=self.verify_lib_path,
                         )
                         # Change EXPECTED_PASS assumption if we have any failures
                         if chkResult != 0:
@@ -143,18 +216,31 @@
                         # No conformance file to verify, just check results file exists
                         if not resultFilePath.is_file():
                             result = TosaTestRunner.Result.UNEXPECTED_FAILURE
-                            msg = "Results file is missing: {}".format(resultFilePath)
+                            msg = f"Results file is missing: {resultFilePath}"
                             messages.append(msg)
                             print(msg)
 
                     if resultFilePath.is_file():
                         # Move the resultFilePath to allow subsequent system under
                         # tests to create them and to test they have been created
-                        resultFilePath = resultFilePath.rename(
-                            resultFilePath.with_suffix(
-                                ".{}{}".format(self.__module__, resultFilePath.suffix)
-                            )
+                        # and to enable compliance testing against refmodel results
+                        resultFilePath.rename(
+                            getRunnerResultFilePath(resultFilePath, sutModule)
                         )
+                        if (
+                            isComplianceModeDotProduct(self.testDesc)
+                            and sutModule == TOSA_REFCOMPLIANCE_RUNNER
+                        ):
+                            boundsFilePath = getBoundsResultFilePath(resultFilePath)
+                            if boundsFilePath.is_file():
+                                boundsFilePath = boundsFilePath.rename(
+                                    getBoundsResultFilePath(resultFilePath, sutModule)
+                                )
+                            else:
+                                result = TosaTestRunner.Result.INTERNAL_ERROR
+                                msg = f"Internal error: Missing expected dot product compliance bounds file {boundsFilePath}"
+                                messages.append(msg)
+                                print(msg)
 
                 resultMessage = "\n".join(messages) if len(messages) > 0 else None
         else:
@@ -168,16 +254,14 @@
                 result = TosaTestRunner.Result.UNEXPECTED_FAILURE
                 resultMessage = graphMessage
 
-        if print_result_line:
-            if (
-                result == TosaTestRunner.Result.EXPECTED_FAILURE
-                or result == TosaTestRunner.Result.EXPECTED_PASS
-            ):
-                print_color(
-                    LogColors.GREEN, "Result code PASS {}".format(self.testName)
-                )
-            else:
-                print_color(LogColors.RED, "Result code FAIL {}".format(self.testName))
+        status = "Result" if print_check_result else "Result code"
+        if (
+            result == TosaTestRunner.Result.EXPECTED_FAILURE
+            or result == TosaTestRunner.Result.EXPECTED_PASS
+        ):
+            print_color(LogColors.GREEN, f"{sutModule}: {status} PASS {self.testName}")
+        else:
+            print_color(LogColors.RED, f"{sutModule}: {status} FAIL {self.testName}")
 
         return result, resultMessage