Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions changelog-entries/440.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
- Archive `precice-*-iterations.log` files into `iterations-logs/` and compare them by SHA-256 hash against reference sidecar (`.iterations-hashes.json`) for implicit-coupling regression checks (fixes #440).
2 changes: 1 addition & 1 deletion tools/tests/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ In this case, building and running seems to work out, but the tests fail because

The easiest way to debug a systemtest run is first to have a look at the output written into the action on GitHub.
If this does not provide enough hints, the next step is to download the generated `system_tests_run_<run_id>_<run_attempt>` artifact. Note that by default this will only be generated if the systemtests fail.
Inside the archive, a test-specific subfolder like `flow-over-heated-plate_fluid-openfoam-solid-fenics_2023-11-19-211723` contains two log files: a `stderr.log` and `stdout.log`. This can be a starting point for a further investigation.
Inside the archive, a test-specific subfolder like `flow-over-heated-plate_fluid-openfoam-solid-fenics_2023-11-19-211723` contains two log files: a `stderr.log` and `stdout.log`. This can be a starting point for a further investigation. For implicit-coupling runs, `precice-*-iterations.log` files are collected into `iterations-logs/` and compared by SHA-256 hash against reference data (when a corresponding `.iterations-hashes.json` sidecar exists); a mismatch fails the test with a clear error message.

## Adding new tests

Expand Down
108 changes: 107 additions & 1 deletion tools/tests/systemtests/Systemtest.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,7 @@
import hashlib
import json
import subprocess
from typing import List, Dict, Optional
from typing import List, Dict, Optional, Tuple
from jinja2 import Environment, FileSystemLoader
from dataclasses import dataclass, field
import shutil
Expand All @@ -21,6 +23,7 @@

GLOBAL_TIMEOUT = 900
SHORT_TIMEOUT = 10
ITERATIONS_LOGS_DIR = "iterations-logs"


def slugify(value, allow_unicode=False):
Expand Down Expand Up @@ -513,6 +516,89 @@ def __write_logs(self, stdout_data: List[str], stderr_data: List[str]):
with open(self.system_test_dir / "stderr.log", 'w') as stderr_file:
stderr_file.write("\n".join(stderr_data))


@staticmethod
def _sha256_file(path: Path) -> str:
"""Compute SHA-256 hex digest of a file."""
h = hashlib.sha256()
mv = memoryview(bytearray(128 * 1024))
with open(path, 'rb', buffering=0) as f:
while n := f.readinto(mv):
h.update(mv[:n])
return h.hexdigest()

def _collect_iterations_logs(self, system_test_dir: Path) -> List[Tuple[str, Path]]:
"""
Collect precice-*-iterations.log files from each case directory.
Returns list of (relative_path, absolute_path) e.g.
("solid-fenics/precice-Solid-iterations.log", path).
"""
collected = []
for case in self.case_combination.cases:
case_dir = system_test_dir / Path(case.path).name
if not case_dir.exists():
continue
for log_file in case_dir.glob("precice-*-iterations.log"):
if log_file.is_file():
rel = f"{Path(case.path).name}/{log_file.name}"
collected.append((rel, log_file))
return collected

def __archive_iterations_logs(self):
"""
Copy precice-*-iterations.log files from case directories into
iterations-logs/ so they are available in CI artifacts (fixes #440).
Prefixes filenames with the case name when multiple cases are present
to avoid collisions.
"""
collected = self._collect_iterations_logs(self.system_test_dir)
if not collected:
logging.debug(f"No iterations logs found for {self}, skipping archiving")
return
dest_dir = self.system_test_dir / ITERATIONS_LOGS_DIR
dest_dir.mkdir(exist_ok=True)
for rel, src in collected:
dest_name = Path(rel).name
if len(collected) > 1:
prefix = Path(rel).parent.name + "_"
dest_name = prefix + dest_name
shutil.copy2(src, dest_dir / dest_name)
logging.debug(f"Archived {len(collected)} iterations log(s) to {dest_dir} for {self}")

def __compare_iterations_hashes(self) -> bool:
"""
Compare current iterations.log SHA-256 hashes against a reference sidecar
(.iterations-hashes.json). Returns True if comparison passes or sidecar is absent.
Returns False if any hash mismatches or unexpected logs are found.
"""
sidecar = self.reference_result.path.with_suffix(".iterations-hashes.json")
if not sidecar.exists():
return True
try:
ref_hashes = json.loads(sidecar.read_text())
except (json.JSONDecodeError, OSError) as e:
logging.warning(f"Could not read iterations hashes from {sidecar}: {e}")
return True
if not ref_hashes:
return True
collected = self._collect_iterations_logs(self.system_test_dir)
current = {rel: self._sha256_file(p) for rel, p in collected}
for rel, expected in ref_hashes.items():
if rel not in current:
logging.critical(
f"Missing iterations log {rel!r} (expected from reference); {self} fails")
return False
if current[rel] != expected:
logging.critical(
f"Hash mismatch for {rel!r} (iterations.log regression); {self} fails")
return False
if len(current) != len(ref_hashes):
extra = set(current) - set(ref_hashes)
logging.critical(
f"Unexpected iterations log(s) {extra}; {self} fails")
return False
return True

def __prepare_for_run(self, run_directory: Path):
"""
Prepares the run_directory with folders and datastructures needed for every systemtest execution
Expand Down Expand Up @@ -562,6 +648,19 @@ def run(self, run_directory: Path):
solver_time=docker_run_result.runtime,
fieldcompare_time=0)

self.__archive_iterations_logs()
if not self.__compare_iterations_hashes():
self.__write_logs(std_out, std_err)
logging.critical(f"Iterations.log hash comparison failed (regression), {self} failed")
return SystemtestResult(
False,
std_out,
std_err,
self,
build_time=docker_build_result.runtime,
solver_time=docker_run_result.runtime,
fieldcompare_time=0)

fieldcompare_result = self._run_field_compare()
std_out.extend(fieldcompare_result.stdout_data)
std_err.extend(fieldcompare_result.stderr_data)
Expand Down Expand Up @@ -625,6 +724,13 @@ def run_for_reference_results(self, run_directory: Path):
solver_time=docker_run_result.runtime,
fieldcompare_time=0)

self.__archive_iterations_logs()
collected = self._collect_iterations_logs(self.system_test_dir)
if collected:
hashes = {rel: self._sha256_file(p) for rel, p in collected}
sidecar = self.reference_result.path.with_suffix(".iterations-hashes.json")
sidecar.write_text(json.dumps(hashes, sort_keys=True, indent=2))
logging.info(f"Wrote iterations hashes for {self.reference_result.path.name}")
self.__write_logs(std_out, std_err)
return SystemtestResult(
True,
Expand Down