Update tosa_verif_run_ref

Rename to tosa_verif_run_tests to match build_tests
Improve output and system under test support
Improve xunit support
Add results checker
Add utilities json2numpy and json2fbbin
Add set of python tests
Update README.md

Signed-off-by: Jeremy Johnson <jeremy.johnson@arm.com>
Change-Id: Ia09f8e6fd126579b3ba1c1cda95c1326802417ca
diff --git a/verif/runner/tosa_test_runner.py b/verif/runner/tosa_test_runner.py
index e8f921d..0fd7f13 100644
--- a/verif/runner/tosa_test_runner.py
+++ b/verif/runner/tosa_test_runner.py
@@ -1,68 +1,190 @@
-import os
-
-# Copyright (c) 2020, ARM Limited.
-#
-#    Licensed under the Apache License, Version 2.0 (the "License");
-#    you may not use this file except in compliance with the License.
-#    You may obtain a copy of the License at
-#
-#         http://www.apache.org/licenses/LICENSE-2.0
-#
-#    Unless required by applicable law or agreed to in writing, software
-#    distributed under the License is distributed on an "AS IS" BASIS,
-#    WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-#    See the License for the specific language governing permissions and
-#    limitations under the License.
-
+"""Template test runner class for running TOSA tests."""
+# Copyright (c) 2020-2022, ARM Limited.
+# SPDX-License-Identifier: Apache-2.0
 import json
-import shlex
-import subprocess
-from enum import IntEnum, unique
+from enum import IntEnum
+from pathlib import Path
+
+from checker.tosa_result_checker import LogColors
+from checker.tosa_result_checker import print_color
+from checker.tosa_result_checker import test_check
+from json2fbbin import json2fbbin
 
 
-def run_sh_command(args, full_cmd, capture_output=False):
-    """Utility function to run an external command. Optionally return captured stdout/stderr"""
+class TosaTestInvalid(Exception):
+    """Exception raised for errors loading test description.
 
-    # Quote the command line for printing
-    full_cmd_esc = [shlex.quote(x) for x in full_cmd]
+    Attributes:
+        path - full path to missing test description file
+        exception = underlying exception
+    """
 
-    if args.verbose:
-        print("### Running {}".format(" ".join(full_cmd_esc)))
-
-    if capture_output:
-        rc = subprocess.run(full_cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
-        if rc.returncode != 0:
-            print(rc.stdout.decode("utf-8"))
-            print(rc.stderr.decode("utf-8"))
-            raise Exception(
-                "Error running command: {}.\n{}".format(
-                    " ".join(full_cmd_esc), rc.stderr.decode("utf-8")
-                )
-            )
-        return (rc.stdout, rc.stderr)
-    else:
-        rc = subprocess.run(full_cmd)
-
-    return rc.returncode
+    def __init__(self, path, exception):
+        """Initialize test not found error."""
+        self.path = path
+        self.exception = exception
+        self.message = "Invalid test, could not read test description {}: {}".format(
+            self.path, str(self.exception)
+        )
+        super().__init__(self.message)
 
 
 class TosaTestRunner:
-    def __init__(self, args, runnerArgs, testDir):
+    """TOSA Test Runner template class for systems under test."""
 
+    def __init__(self, args, runnerArgs, testDir):
+        """Initialize and load JSON meta data file."""
         self.args = args
         self.runnerArgs = runnerArgs
         self.testDir = testDir
+        self.testName = Path(self.testDir).name
 
-        # Load the json test file
-        with open(os.path.join(testDir, "desc.json"), "r") as fd:
-            self.testDesc = json.load(fd)
+        # Check if we want to run binary and if its already converted
+        descFilePath = Path(testDir, "desc.json")
+        descBinFilePath = Path(testDir, "desc_binary.json")
+        if args.binary:
+            if descBinFilePath.is_file():
+                descFilePath = descBinFilePath
 
-    def runModel(self):
+        try:
+            # Load the json test file
+            with open(descFilePath, "r") as fd:
+                self.testDesc = json.load(fd)
+        except Exception as e:
+            raise TosaTestInvalid(str(descFilePath), e)
+
+        # Convert to binary if needed
+        tosaFilePath = Path(testDir, self.testDesc["tosa_file"])
+        if args.binary and tosaFilePath.suffix == ".json":
+            # Convert tosa JSON to binary
+            json2fbbin.json_to_fbbin(
+                Path(args.flatc_path),
+                Path(args.operator_fbs),
+                tosaFilePath,
+                Path(testDir),
+            )
+            # Write new desc_binary file
+            self.testDesc["tosa_file"] = tosaFilePath.stem + ".tosa"
+            with open(descBinFilePath, "w") as fd:
+                json.dump(self.testDesc, fd, indent=2)
+            descFilePath = descBinFilePath
+
+        # Set location of desc.json (or desc_binary.json) file in use
+        self.descFile = str(descFilePath)
+
+    def skipTest(self):
+        """Check if the test is skipped due to test type selection."""
+        expectedFailure = self.testDesc["expected_failure"]
+        if self.args.test_type == "negative" and not expectedFailure:
+            return True
+        elif self.args.test_type == "positive" and expectedFailure:
+            return True
+        return False
+
+    def runTestGraph(self):
+        """Override with function that calls system under test."""
         pass
 
+    def testResult(self, tosaGraphResult, graphMessage=None):
+        """Work out test result based on graph result and output files."""
+        expectedFailure = self.testDesc["expected_failure"]
+        print_result_line = True
+
+        if tosaGraphResult == TosaTestRunner.TosaGraphResult.TOSA_VALID:
+            if expectedFailure:
+                result = TosaTestRunner.Result.UNEXPECTED_PASS
+                resultMessage = "Expected failure test incorrectly passed"
+            else:
+                # Work through all the results produced by the testing, assuming success
+                # but overriding this with any failures found
+                result = TosaTestRunner.Result.EXPECTED_PASS
+                messages = []
+                for resultNum, resultFileName in enumerate(self.testDesc["ofm_file"]):
+                    if "expected_result_file" in self.testDesc:
+                        try:
+                            conformanceFile = Path(
+                                self.testDir,
+                                self.testDesc["expected_result_file"][resultNum],
+                            )
+                        except IndexError:
+                            result = TosaTestRunner.Result.INTERNAL_ERROR
+                            msg = "Internal error: Missing expected_result_file {} in {}".format(
+                                resultNum, self.descFile
+                            )
+                            messages.append(msg)
+                            print(msg)
+                            break
+                    else:
+                        conformanceFile = None
+                    resultFile = Path(self.testDir, resultFileName)
+
+                    if conformanceFile:
+                        print_result_line = False  # Checker will print one for us
+                        chkResult, tolerance, msg = test_check(
+                            str(conformanceFile),
+                            str(resultFile),
+                            test_name=self.testName,
+                        )
+                        # Change EXPECTED_PASS assumption if we have any failures
+                        if chkResult != 0:
+                            result = TosaTestRunner.Result.UNEXPECTED_FAILURE
+                            messages.append(msg)
+                            if self.args.verbose:
+                                print(msg)
+                    else:
+                        # No conformance file to verify, just check results file exists
+                        if not resultFile.is_file():
+                            result = TosaTestRunner.Result.UNEXPECTED_FAILURE
+                            msg = "Results file is missing: {}".format(resultFile)
+                            messages.append(msg)
+                            print(msg)
+
+                    if resultFile.is_file():
+                        # Move the resultFile to allow subsequent system under
+                        # tests to create them and to test they have been created
+                        resultFile = resultFile.rename(
+                            resultFile.with_suffix(
+                                ".{}{}".format(self.__module__, resultFile.suffix)
+                            )
+                        )
+
+                resultMessage = "\n".join(messages) if len(messages) > 0 else None
+        else:
+            if (
+                expectedFailure
+                and tosaGraphResult == TosaTestRunner.TosaGraphResult.TOSA_ERROR
+            ):
+                result = TosaTestRunner.Result.EXPECTED_FAILURE
+                resultMessage = None
+            else:
+                result = TosaTestRunner.Result.UNEXPECTED_FAILURE
+                resultMessage = graphMessage
+
+        if print_result_line:
+            if (
+                result == TosaTestRunner.Result.EXPECTED_FAILURE
+                or result == TosaTestRunner.Result.EXPECTED_PASS
+            ):
+                print_color(LogColors.GREEN, "Results PASS {}".format(self.testName))
+            else:
+                print_color(LogColors.RED, "Results FAIL {}".format(self.testName))
+
+        return result, resultMessage
+
     class Result(IntEnum):
+        """Test result codes."""
+
         EXPECTED_PASS = 0
         EXPECTED_FAILURE = 1
         UNEXPECTED_PASS = 2
         UNEXPECTED_FAILURE = 3
         INTERNAL_ERROR = 4
+        SKIPPED = 5
+
+    class TosaGraphResult(IntEnum):
+        """The tosa_graph_result codes."""
+
+        TOSA_VALID = 0
+        TOSA_UNPREDICTABLE = 1
+        TOSA_ERROR = 2
+        OTHER_ERROR = 3