@@ -65,6 +65,17 @@ def _get_gpu_ids():
6565 return ["0" ]
6666
6767
68+ def _setup_worker_logging (worker_id , tmp ):
69+ """Configure per-worker file logging for easier debugging of parallel runs."""
70+ log_file = tmp / f"tests_{ worker_id } .log"
71+ handler = logging .FileHandler (log_file , mode = "w" )
72+ handler .setFormatter (logging .Formatter ("%(asctime)s %(levelname)s %(name)s: %(message)s" ))
73+ root = logging .getLogger ()
74+ root .addHandler (handler )
75+ root .setLevel (logging .INFO )
76+ logging .info ("Worker %s logging to %s" , worker_id , log_file )
77+
78+
6879def pytest_configure (config ):
6980 global _compile_only , _fake_mode
7081
@@ -90,6 +101,7 @@ def pytest_configure(config):
90101 with cached_gpu_ids .open () as f :
91102 gpu_ids = json .load (f )
92103 os .environ ["CUDA_VISIBLE_DEVICES" ] = gpu_ids [worker_num % len (gpu_ids )]
104+ _setup_worker_logging (worker_id , tmp )
93105
94106 if _compile_only :
95107 import torch
@@ -117,6 +129,24 @@ def pytest_unconfigure(config):
117129 _fake_mode = None
118130
119131
132+ def pytest_collection_finish (session ):
133+ """Print a summary of collected tests grouped by file and function."""
134+ if not session .items :
135+ return
136+ from collections import defaultdict
137+
138+ counts = defaultdict (lambda : defaultdict (int ))
139+ for item in session .items :
140+ file_name = item .location [0 ]
141+ func_name = item .originalname if hasattr (item , "originalname" ) else item .name
142+ counts [file_name ][func_name ] += 1
143+ summary = {f : dict (funcs ) for f , funcs in sorted (counts .items ())}
144+ total = len (session .items )
145+ session .config .pluginmanager .get_plugin ("terminalreporter" ).write_line (
146+ f"Collected { total } tests: { json .dumps (summary , indent = 2 )} "
147+ )
148+
149+
120150@pytest .hookimpl (hookwrapper = True )
121151def pytest_runtest_setup (item ):
122152 """In --compile-only mode, swallow setup errors (e.g. fixtures allocating CUDA tensors)."""
0 commit comments