diff --git a/CHANGELOG.md b/CHANGELOG.md index 809892d..5d06350 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -9,6 +9,9 @@ This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.htm ## [Unreleased] +### Added +- Add `--report` flag to write out a summary JSON file + ### Fixed - Properly catch failing test cases diff --git a/nftest/NFTestAssert.py b/nftest/NFTestAssert.py index 994868f..8ff47b3 100644 --- a/nftest/NFTestAssert.py +++ b/nftest/NFTestAssert.py @@ -1,20 +1,59 @@ """NF Test assert""" import datetime +import glob import subprocess -from typing import Callable, Optional + from logging import getLogger, DEBUG +from pathlib import Path +from typing import Callable, Optional, List -from nftest.common import calculate_checksum, resolve_single_path, popen_with_logger +from nftest.common import calculate_checksum, popen_with_logger from nftest.NFTestENV import NFTestENV -class NotUpdatedError(Exception): - "An exception indicating that file was not updated." +class NFTestAssertionError(Exception): + """Base class for assertions.""" + + +class NotUpdatedError(NFTestAssertionError): + """An exception indicating that file was not updated.""" + def __init__(self, path: Path): + self.path = path + + def __str__(self) -> str: + return f"{self.path} was not modified by this pipeline" + +class MismatchedContentsError(NFTestAssertionError): + """An exception that the contents are mismatched.""" + def __init__(self, actual: Path, expect: Path): + self.actual = actual + self.expect = expect + + def __str__(self) -> str: + return f"File comparison failed between {self.actual} and {self.expect}" + +class NonSpecificGlobError(NFTestAssertionError): + """An exception that the glob did not resolve to a single file.""" + def __init__(self, globstr: str, paths: List[str]): + self.globstr = globstr + self.paths = paths + + def __str__(self) -> str: + if self.paths: + return f"Expression `{self.globstr}` resolved to multiple files: {self.paths}" + + return f"Expression `{self.globstr}` did not resolve to any files" + + +def resolve_single_path(path: str) -> Path: + """Resolve wildcards in path and ensure only a single path is identified""" + expanded_paths = glob.glob(path) + if len(expanded_paths) != 1: + raise NonSpecificGlobError(path, expanded_paths) -class MismatchedContentsError(Exception): - "An exception that the contents are mismatched." + return Path(expanded_paths[0]) class NFTestAssert: @@ -58,16 +97,14 @@ def perform_assertions(self): self._logger.debug("Actual mod time: %s", file_mod_time) if self.startup_time >= file_mod_time: - raise NotUpdatedError( - f"{str(self.actual)} was not modified by this pipeline" - ) + raise NotUpdatedError(actual_path) # Assert that the files match if not self.get_assert_method()(actual_path, expect_path): self._logger.error("Assertion failed") self._logger.error("Actual: %s", self.actual) self._logger.error("Expect: %s", self.expect) - raise MismatchedContentsError("File comparison failed") + raise MismatchedContentsError(actual_path, expect_path) self._logger.debug("Assertion passed") @@ -97,4 +134,4 @@ def md5_function(actual, expect): return md5_function self._logger.error("assert method %s unknown.", self.method) - raise ValueError(f"assert method {self.method} unknown.") + raise NFTestAssertionError(f"assert method {self.method} unknown.") diff --git a/nftest/NFTestCase.py b/nftest/NFTestCase.py index a1491af..7cfcc54 100644 --- a/nftest/NFTestCase.py +++ b/nftest/NFTestCase.py @@ -14,7 +14,7 @@ from pathlib import Path from typing import Callable, List, TYPE_CHECKING, Tuple -from nftest.common import remove_nextflow_logs, popen_with_logger +from nftest.common import remove_nextflow_logs, popen_with_logger, TestResult from nftest.NFTestENV import NFTestENV from nftest.syslog import SyslogServer @@ -66,6 +66,7 @@ def __init__( self.clean_logs = clean_logs self.skip = skip self.verbose = verbose + self.status = TestResult.PENDING def resolve_actual(self, asserts: List[NFTestAssert] = None): """Resolve the file path for actual file""" @@ -101,10 +102,12 @@ def test(self) -> bool: """Run test cases.""" if self.skip: self._logger.info(" [ skipped ]") + self.status = TestResult.SKIPPED return True nextflow_process = self.submit() if nextflow_process.returncode != 0: + self.status = TestResult.ERRORED self._logger.error(" [ failed ]") return False @@ -114,8 +117,10 @@ def test(self) -> bool: except Exception as error: self._logger.error(error.args) self._logger.error(" [ failed ]") + self.status = TestResult.FAILED raise error self._logger.info(" [ succeed ]") + self.status = TestResult.PASSED return True def submit(self) -> sp.CompletedProcess: diff --git a/nftest/NFTestReport.py b/nftest/NFTestReport.py new file mode 100644 index 0000000..c742cf5 --- /dev/null +++ b/nftest/NFTestReport.py @@ -0,0 +1,70 @@ +"""Test summary report.""" + +import datetime +import json +import os + +from contextlib import contextmanager +from dataclasses import dataclass, field, asdict +from pathlib import Path +from typing import Dict + +from nftest.common import TestResult +from nftest.NFTestCase import NFTestCase + + +class DateEncoder(json.JSONEncoder): + """Simple encoder that handles datetimes.""" + def default(self, o): + if isinstance(o, datetime.datetime): + return o.isoformat() + + return super().default(o) + + +@dataclass +class NFTestReport: + """Test summary report.""" + + start: datetime.datetime = field( + default_factory=lambda: datetime.datetime.now(tz=datetime.timezone.utc) + ) + + passed_tests: Dict[str, float] = field(default_factory=dict) + skipped_tests: Dict[str, float] = field(default_factory=dict) + errored_tests: Dict[str, float] = field(default_factory=dict) + failed_tests: Dict[str, float] = field(default_factory=dict) + + def __bool__(self): + return not self.failed_tests + + @contextmanager + def track_test(self, test: NFTestCase): + """Context manager to track test statuses and runtimes.""" + start_time = datetime.datetime.now() + + result_map = { + TestResult.PASSED: self.passed_tests, + TestResult.SKIPPED: self.skipped_tests, + TestResult.ERRORED: self.errored_tests, + TestResult.FAILED: self.failed_tests, + TestResult.PENDING: {} + } + + try: + yield + finally: + duration = (datetime.datetime.now() - start_time).total_seconds() + result_map[test.status][test.name] = duration + + def write_report(self, reportfile: Path): + """Write the report out to the given file.""" + data = asdict(self) + + # Add extra parameters + data["cpus"] = os.cpu_count() + data["end"] = datetime.datetime.now(tz=datetime.timezone.utc) + data["success"] = not self.failed_tests and not self.errored_tests + + with reportfile.open(mode="wt", encoding="utf-8") as outfile: + json.dump(data, outfile, indent=2, cls=DateEncoder) diff --git a/nftest/NFTestRunner.py b/nftest/NFTestRunner.py index 80238c4..e0377fe 100644 --- a/nftest/NFTestRunner.py +++ b/nftest/NFTestRunner.py @@ -2,24 +2,27 @@ import shutil from logging import getLogger +from pathlib import Path from typing import List import yaml from nftest.NFTestGlobal import NFTestGlobal -from nftest.NFTestAssert import NFTestAssert +from nftest.NFTestAssert import NFTestAssert, NFTestAssertionError from nftest.NFTestCase import NFTestCase from nftest.NFTestENV import NFTestENV +from nftest.NFTestReport import NFTestReport from nftest.common import validate_yaml, validate_reference class NFTestRunner: """This holds all test cases and global settings from a single yaml file.""" - def __init__(self, cases: List[NFTestCase] = None): + def __init__(self, cases: List[NFTestCase] = None, report: bool = False): """Constructor""" self._global = None self._env = NFTestENV() self._logger = getLogger("NFTest") self.cases = cases or [] + self.save_report = report def load_from_config(self, config_yaml: str, target_cases: List[str]): """Load test info from config file.""" @@ -50,19 +53,31 @@ def load_from_config(self, config_yaml: str, target_cases: List[str]): continue self.cases.append(test_case) - def main(self): + def main(self) -> int: """Main entrance""" self.print_prolog() failure_count = 0 + report = NFTestReport() for case in self.cases: - try: - if not case.test(): + with report.track_test(case): + try: + if not case.test(): + failure_count += 1 + except NFTestAssertionError as err: + # In case of failed test case, continue with other cases + self._logger.debug(err) failure_count += 1 - except AssertionError: - # In case of failed test case, continue with other cases - failure_count += 1 + except Exception as err: + # Unhandled error + self._logger.exception(err) + raise + + assert failure_count == len(report.failed_tests) + len(report.errored_tests) + + if self.save_report: + report.write_report(Path(self._env.NFT_LOG).with_suffix(".json")) return failure_count diff --git a/nftest/__main__.py b/nftest/__main__.py index c82349f..f100175 100644 --- a/nftest/__main__.py +++ b/nftest/__main__.py @@ -63,6 +63,11 @@ def add_subparser_run(subparsers: argparse._SubParsersAction): default=None, nargs="?", ) + parser.add_argument( + "--report", + action="store_true", + help="Save out a detailed JSON test report alongside the log file" + ) parser.add_argument( "TEST_CASES", type=str, help="Exact test case to run.", nargs="*" ) @@ -73,7 +78,7 @@ def run(args): """Run""" find_config_yaml(args) setup_loggers() - runner = NFTestRunner() + runner = NFTestRunner(report=args.report) runner.load_from_config(args.config_file, args.TEST_CASES) sys.exit(runner.main()) diff --git a/nftest/common.py b/nftest/common.py index ff2ee62..b3e660e 100644 --- a/nftest/common.py +++ b/nftest/common.py @@ -1,12 +1,12 @@ """Common functions""" import argparse -import re -from typing import Tuple +import enum import glob import hashlib import logging import os +import re import selectors import shutil import subprocess @@ -14,12 +14,22 @@ import time from pathlib import Path +from typing import Tuple from nftest import __version__ from nftest.NFTestENV import NFTestENV from nftest.syslog import syslog_filter +class TestResult(enum.Enum): + """Enumeration for test results.""" + PENDING = enum.auto() + PASSED = enum.auto() + SKIPPED = enum.auto() + FAILED = enum.auto() + ERRORED = enum.auto() + + def validate_yaml(path: Path): # pylint: disable=unused-argument """Validate the yaml. Potentially use yaml schema https://rx.codesimply.com/ @@ -37,21 +47,6 @@ def remove_nextflow_logs() -> None: os.remove(file) -def resolve_single_path(path: str) -> Path: - """Resolve wildcards in path and ensure only a single path is identified""" - expanded_paths = glob.glob(path) - - if not expanded_paths: - raise ValueError(f"Expression `{path}` did not resolve to any files") - - if len(expanded_paths) > 1: - raise ValueError( - f"Expression `{path}` resolved to multiple files: {expanded_paths}" - ) - - return Path(expanded_paths[0]) - - def calculate_checksum(path: Path) -> str: """Calculate checksum recursively. Args: diff --git a/test/unit/test_NFTestAssert.py b/test/unit/test_NFTestAssert.py index e5a0b1d..3b04695 100644 --- a/test/unit/test_NFTestAssert.py +++ b/test/unit/test_NFTestAssert.py @@ -9,7 +9,12 @@ import pytest -from nftest.NFTestAssert import NFTestAssert, NotUpdatedError, MismatchedContentsError +from nftest.NFTestAssert import ( + NFTestAssert, + NotUpdatedError, + MismatchedContentsError, + NonSpecificGlobError, +) @pytest.fixture(name="custom_script") @@ -49,7 +54,7 @@ def fixture_custom_script(request, tmp_path): @pytest.fixture(name="method") def fixture_method(request): - "A fixture for the NFTestAssert `method` argument." + """A fixture for the NFTestAssert `method` argument.""" return request.param @@ -127,9 +132,9 @@ def fixture_configured_test( # Parameterization for the number of expected and actual files matching the # globs. Failure is expected for anything except 1. FILECOUNT_PARAMS = [ - pytest.param(0, marks=pytest.mark.xfailgroup.with_args(ValueError)), + pytest.param(0, marks=pytest.mark.xfailgroup.with_args(NonSpecificGlobError)), 1, - pytest.param(2, marks=pytest.mark.xfailgroup.with_args(ValueError)), + pytest.param(2, marks=pytest.mark.xfailgroup.with_args(NonSpecificGlobError)), ] @@ -171,7 +176,7 @@ def fixture_configured_test( def test_nftest_assert( configured_test, caplog, custom_script, actual_files, file_updated ): - "Test that assertions appropriately pass or fail based on the parameters." + """Test that assertions appropriately pass or fail based on the parameters.""" # Time is monotonic, right? assert configured_test.startup_time <= datetime.datetime.now( diff --git a/test/unit/test_common.py b/test/unit/test_common.py index 293d6e3..29ce561 100644 --- a/test/unit/test_common.py +++ b/test/unit/test_common.py @@ -5,23 +5,7 @@ import mock import pytest -from nftest.common import resolve_single_path, validate_reference - - -@pytest.mark.parametrize( - "glob_return_value,case_pass", [([], False), (["a", "b"], False), (["a"], True)] -) -@mock.patch("glob.glob") -def test_resolve_single_path(mock_glob, glob_return_value, case_pass): - """Tests for proper file identification""" - test_path = "/some/path" - mock_glob.return_value = glob_return_value - - if case_pass: - resolve_single_path(test_path) - else: - with pytest.raises(ValueError): - resolve_single_path(test_path) +from nftest.common import validate_reference @pytest.mark.parametrize(