diff --git a/changelog-entries/440.md b/changelog-entries/440.md new file mode 100644 index 000000000..dca4d5a53 --- /dev/null +++ b/changelog-entries/440.md @@ -0,0 +1 @@ +- Archive `precice-*-iterations.log` files into `iterations-logs/` and compare them by SHA-256 hash against reference sidecar (`.iterations-hashes.json`) for implicit-coupling regression checks (fixes #440). diff --git a/tools/tests/README.md b/tools/tests/README.md index 5675c47b6..b54ae28f2 100644 --- a/tools/tests/README.md +++ b/tools/tests/README.md @@ -105,7 +105,7 @@ In this case, building and running seems to work out, but the tests fail because The easiest way to debug a systemtest run is first to have a look at the output written into the action on GitHub. If this does not provide enough hints, the next step is to download the generated `system_tests_run__` artifact. Note that by default this will only be generated if the systemtests fail. -Inside the archive, a test-specific subfolder like `flow-over-heated-plate_fluid-openfoam-solid-fenics_2023-11-19-211723` contains two log files: a `stderr.log` and `stdout.log`. This can be a starting point for a further investigation. +Inside the archive, a test-specific subfolder like `flow-over-heated-plate_fluid-openfoam-solid-fenics_2023-11-19-211723` contains two log files: a `stderr.log` and `stdout.log`. This can be a starting point for a further investigation. For implicit-coupling runs, `precice-*-iterations.log` files are collected into `iterations-logs/` and compared by SHA-256 hash against reference data (when a corresponding `.iterations-hashes.json` sidecar exists); a mismatch fails the test with a clear error message. ## Adding new tests diff --git a/tools/tests/systemtests/Systemtest.py b/tools/tests/systemtests/Systemtest.py index 6abc5a029..d4a099900 100644 --- a/tools/tests/systemtests/Systemtest.py +++ b/tools/tests/systemtests/Systemtest.py @@ -1,5 +1,7 @@ +import hashlib +import json import subprocess -from typing import List, Dict, Optional +from typing import List, Dict, Optional, Tuple from jinja2 import Environment, FileSystemLoader from dataclasses import dataclass, field import shutil @@ -21,6 +23,7 @@ GLOBAL_TIMEOUT = 900 SHORT_TIMEOUT = 10 +ITERATIONS_LOGS_DIR = "iterations-logs" def slugify(value, allow_unicode=False): @@ -513,6 +516,89 @@ def __write_logs(self, stdout_data: List[str], stderr_data: List[str]): with open(self.system_test_dir / "stderr.log", 'w') as stderr_file: stderr_file.write("\n".join(stderr_data)) + + @staticmethod + def _sha256_file(path: Path) -> str: + """Compute SHA-256 hex digest of a file.""" + h = hashlib.sha256() + mv = memoryview(bytearray(128 * 1024)) + with open(path, 'rb', buffering=0) as f: + while n := f.readinto(mv): + h.update(mv[:n]) + return h.hexdigest() + + def _collect_iterations_logs(self, system_test_dir: Path) -> List[Tuple[str, Path]]: + """ + Collect precice-*-iterations.log files from each case directory. + Returns list of (relative_path, absolute_path) e.g. + ("solid-fenics/precice-Solid-iterations.log", path). + """ + collected = [] + for case in self.case_combination.cases: + case_dir = system_test_dir / Path(case.path).name + if not case_dir.exists(): + continue + for log_file in case_dir.glob("precice-*-iterations.log"): + if log_file.is_file(): + rel = f"{Path(case.path).name}/{log_file.name}" + collected.append((rel, log_file)) + return collected + + def __archive_iterations_logs(self): + """ + Copy precice-*-iterations.log files from case directories into + iterations-logs/ so they are available in CI artifacts (fixes #440). + Prefixes filenames with the case name when multiple cases are present + to avoid collisions. + """ + collected = self._collect_iterations_logs(self.system_test_dir) + if not collected: + logging.debug(f"No iterations logs found for {self}, skipping archiving") + return + dest_dir = self.system_test_dir / ITERATIONS_LOGS_DIR + dest_dir.mkdir(exist_ok=True) + for rel, src in collected: + dest_name = Path(rel).name + if len(collected) > 1: + prefix = Path(rel).parent.name + "_" + dest_name = prefix + dest_name + shutil.copy2(src, dest_dir / dest_name) + logging.debug(f"Archived {len(collected)} iterations log(s) to {dest_dir} for {self}") + + def __compare_iterations_hashes(self) -> tuple[bool, str | None]: + """ + Compare current iterations.log SHA-256 hashes against a reference sidecar + (.iterations-hashes.json). Returns (True, None) if comparison passes or sidecar + is absent. Returns (False, reason) if any hash mismatches or unexpected logs found. + """ + sidecar = self.reference_result.path.with_suffix(".iterations-hashes.json") + if not sidecar.exists(): + return True, None + try: + ref_hashes = json.loads(sidecar.read_text()) + except (json.JSONDecodeError, OSError) as e: + logging.warning(f"Could not read iterations hashes from {sidecar}: {e}") + return True, None + if not ref_hashes: + return True, None + collected = self._collect_iterations_logs(self.system_test_dir) + current = {rel: self._sha256_file(p) for rel, p in collected} + for rel, expected in ref_hashes.items(): + if rel not in current: + msg = f"Missing iterations log {rel!r} (expected from reference)" + logging.critical(f"{msg}; {self} fails") + return False, msg + if current[rel] != expected: + msg = f"Hash mismatch for {rel!r} (iterations.log regression)" + logging.critical(f"{msg}; {self} fails") + return False, msg + if len(current) != len(ref_hashes): + extra = set(current) - set(ref_hashes) + msg = f"Unexpected iterations log(s) {extra!r}" + logging.critical(f"{msg}; {self} fails") + return False, msg + return True, None + def __prepare_for_run(self, run_directory: Path): """ Prepares the run_directory with folders and datastructures needed for every systemtest execution @@ -562,6 +648,20 @@ def run(self, run_directory: Path): solver_time=docker_run_result.runtime, fieldcompare_time=0) + self.__archive_iterations_logs() + hash_ok, hash_error = self.__compare_iterations_hashes() + if not hash_ok: + self.__write_logs(std_out, std_err) + logging.critical(f"Iterations.log hash comparison failed: {hash_error}; {self} failed") + return SystemtestResult( + False, + std_out, + std_err, + self, + build_time=docker_build_result.runtime, + solver_time=docker_run_result.runtime, + fieldcompare_time=0) + fieldcompare_result = self._run_field_compare() std_out.extend(fieldcompare_result.stdout_data) std_err.extend(fieldcompare_result.stderr_data) @@ -625,6 +725,13 @@ def run_for_reference_results(self, run_directory: Path): solver_time=docker_run_result.runtime, fieldcompare_time=0) + self.__archive_iterations_logs() + collected = self._collect_iterations_logs(self.system_test_dir) + if collected: + hashes = {rel: self._sha256_file(p) for rel, p in collected} + sidecar = self.reference_result.path.with_suffix(".iterations-hashes.json") + sidecar.write_text(json.dumps(hashes, sort_keys=True, indent=2)) + logging.info(f"Wrote iterations hashes for {self.reference_result.path.name}") self.__write_logs(std_out, std_err) return SystemtestResult( True,