From 36f01eea41d558e2b60359bc91a9485bced692a9 Mon Sep 17 00:00:00 2001 From: Perry Date: Sat, 21 Mar 2026 21:49:29 -0700 Subject: [PATCH 1/4] Add hdf5_dtype parameter for depletion results file Thread hdf5_dtype ('float32' or 'float64', default 'float64') from Integrator/SIIntegrator through StepResult.save() to the HDF5 writer. Affects only the saved number and reaction rate datasets. --- openmc/deplete/abc.py | 30 +++++++++++++++++++++++++----- openmc/deplete/stepresult.py | 14 +++++++++++--- 2 files changed, 36 insertions(+), 8 deletions(-) diff --git a/openmc/deplete/abc.py b/openmc/deplete/abc.py index 056f7c2737a..6a202ae59e0 100644 --- a/openmc/deplete/abc.py +++ b/openmc/deplete/abc.py @@ -583,6 +583,10 @@ class Integrator(ABC): `source_rates` should be the same as the initial run. .. versionadded:: 0.15.1 + hdf5_dtype : str, optional + dtype for number and reaction rate datasets, float32 or float64 (default) + + .. versionadded:: 0.15.4 Attributes ---------- @@ -632,7 +636,12 @@ def __init__( timestep_units: str = 's', solver: str = "cram48", continue_timesteps: bool = False, + hdf5_dtype: str = 'float64', ): + if hdf5_dtype not in ('float32', 'float64'): + raise ValueError( + f"hdf5_dtype must be 'float32' or 'float64', got '{hdf5_dtype}'") + self.hdf5_dtype = hdf5_dtype if continue_timesteps and operator.prev_res is None: raise ValueError("Continuation run requires passing prev_results.") self.operator = operator @@ -895,7 +904,8 @@ def integrate( self._i_res + i, proc_time, write_rates=write_rates, - path=path + path=path, + hdf5_dtype=self.hdf5_dtype, ) # Update for next step @@ -918,7 +928,8 @@ def integrate( self._i_res + len(self), proc_time, write_rates=write_rates, - path=path + path=path, + hdf5_dtype=self.hdf5_dtype, ) self.operator.write_bos_data(len(self) + self._i_res) @@ -1116,6 +1127,10 @@ class SIIntegrator(Integrator): `source_rates` should be the same as the initial run. .. versionadded:: 0.15.1 + hdf5_dtype : str, optional + dtype for number and reaction rate datasets, float32 or float64 (default) + + .. versionadded:: 0.15.4 Attributes ---------- @@ -1159,12 +1174,15 @@ def __init__( n_steps: int = 10, solver: str = "cram48", continue_timesteps: bool = False, + hdf5_dtype: str = 'float64', ): check_type("n_steps", n_steps, Integral) check_greater_than("n_steps", n_steps, 0) super().__init__( operator, timesteps, power, power_density, source_rates, - timestep_units=timestep_units, solver=solver, continue_timesteps=continue_timesteps) + timestep_units=timestep_units, solver=solver, + continue_timesteps=continue_timesteps, + hdf5_dtype=hdf5_dtype) self.n_steps = n_steps def _get_bos_data_from_operator(self, step_index, step_power, n_bos): @@ -1255,7 +1273,8 @@ def integrate( self._i_res + i, proc_time, write_rates=write_rates, - path=path + path=path, + hdf5_dtype=self.hdf5_dtype, ) # Update for next step @@ -1273,7 +1292,8 @@ def integrate( self._i_res + len(self), proc_time, write_rates=write_rates, - path=path + path=path, + hdf5_dtype=self.hdf5_dtype, ) self.operator.write_bos_data(self._i_res + len(self)) diff --git a/openmc/deplete/stepresult.py b/openmc/deplete/stepresult.py index cd21df07be3..cc8bf40d636 100644 --- a/openmc/deplete/stepresult.py +++ b/openmc/deplete/stepresult.py @@ -74,6 +74,7 @@ def __init__(self): self.name_list = None self.data = None + self.hdf5_dtype = 'float64' def __repr__(self): t = self.time[0] @@ -344,17 +345,18 @@ def _write_hdf5_metadata(self, handle, write_rates): self.rates.index_rx[rxn]) # Construct array storage + _dtype = getattr(self, 'hdf5_dtype', 'float64') handle.create_dataset("number", (1, n_mats, n_nuc_number), maxshape=(None, n_mats, n_nuc_number), chunks=True, - dtype='float64') + dtype=_dtype) if include_rates and n_nuc_rxn > 0 and n_rxn > 0: handle.create_dataset( "reaction rates", (1, n_mats, n_nuc_rxn, n_rxn), maxshape=(None, n_mats, n_nuc_rxn, n_rxn), - chunks=True, dtype='float64') + chunks=True, dtype=_dtype) handle.create_dataset("eigenvalues", (1, 2), maxshape=(None, 2), dtype='float64') @@ -554,7 +556,8 @@ def save( step_ind, proc_time=None, write_rates: bool = False, - path: PathLike = "depletion_results.h5" + path: PathLike = "depletion_results.h5", + hdf5_dtype: str = 'float64', ): """Creates and writes depletion results to disk @@ -582,12 +585,17 @@ def save( Path to file to write. Defaults to 'depletion_results.h5'. .. versionadded:: 0.14.0 + hdf5_dtype : str, optional + dtype for number and reaction rate datasets, float32 or float64 (default) + + .. versionadded:: 0.15.4 """ # Get indexing terms vol_dict, nuc_list, burn_list, full_burn_list, name_list = op.get_results_info() # Create results results = StepResult() + results.hdf5_dtype = hdf5_dtype results.allocate(vol_dict, nuc_list, burn_list, full_burn_list, name_list) n_mat = len(burn_list) From 02f1d8b2dbb27a74049e6bb70b8c923b3e440947 Mon Sep 17 00:00:00 2001 From: Perry Date: Sat, 21 Mar 2026 21:51:01 -0700 Subject: [PATCH 2/4] Ensure CRAM solver operates in float64 np.asarray(n0, dtype=np.float64) replaces n0.copy() so the working vector is float64 regardless of input dtype. Guards against silent precision loss on restart when initial compositions are read from a float32 HDF5 dataset --- openmc/deplete/cram.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openmc/deplete/cram.py b/openmc/deplete/cram.py index cecc388f4c3..a0c9b98e24c 100644 --- a/openmc/deplete/cram.py +++ b/openmc/deplete/cram.py @@ -76,7 +76,7 @@ def __call__(self, A, n0, dt): """ A = dt * csc_array(A, dtype=np.float64) - y = n0.copy() + y = np.asarray(n0, dtype=np.float64) ident = eye_array(A.shape[0], format='csc') for alpha, theta in zip(self.alpha, self.theta): y += 2*np.real(alpha*sla.spsolve(A - theta*ident, y)) From b3edad9059bca662ebac80f0e56450249df563fe Mon Sep 17 00:00:00 2001 From: Perry Date: Sun, 22 Mar 2026 00:05:55 -0700 Subject: [PATCH 3/4] CRAM float64 guard fixup --- openmc/deplete/cram.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/openmc/deplete/cram.py b/openmc/deplete/cram.py index a0c9b98e24c..148243ab77b 100644 --- a/openmc/deplete/cram.py +++ b/openmc/deplete/cram.py @@ -76,7 +76,7 @@ def __call__(self, A, n0, dt): """ A = dt * csc_array(A, dtype=np.float64) - y = np.asarray(n0, dtype=np.float64) + y = np.array(n0, dtype=np.float64) ident = eye_array(A.shape[0], format='csc') for alpha, theta in zip(self.alpha, self.theta): y += 2*np.real(alpha*sla.spsolve(A - theta*ident, y)) From 130b7cc502eb9244a8f172d634252f9d8947cac6 Mon Sep 17 00:00:00 2001 From: Perry Date: Mon, 23 Mar 2026 22:57:32 -0700 Subject: [PATCH 4/4] Enable compression inside of depletion_results.h5 MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Allow optional gzip or lzf compression on number and reaction rate datasets in depletion_results.h5. Default is None (no compression) for backward compatibility. Compression is applied at write time via h5py's chunk-level filters — readers auto-decompress with no code changes. - Thread hdf5_compression through Integrator, SIIntegrator, and StepResult.save() to create_dataset() calls - Validate at construction time to fail fast before transport solve - Propagate hdf5_dtype and hdf5_compression in distribute() --- openmc/deplete/abc.py | 27 +++++++++++++++++++++++++-- openmc/deplete/stepresult.py | 31 +++++++++++++++++++++++++------ 2 files changed, 50 insertions(+), 8 deletions(-) diff --git a/openmc/deplete/abc.py b/openmc/deplete/abc.py index 6a202ae59e0..1eb23ec91a7 100644 --- a/openmc/deplete/abc.py +++ b/openmc/deplete/abc.py @@ -584,7 +584,12 @@ class Integrator(ABC): .. versionadded:: 0.15.1 hdf5_dtype : str, optional - dtype for number and reaction rate datasets, float32 or float64 (default) + dtype for number and reaction rate datasets, float32 or float64. + + .. versionadded:: 0.15.4 + hdf5_compression : str, optional + Compression for number and reaction rate datasets. + Accepted values are 'gzip' and 'lzf'. Ignored with parallel HDF5. .. versionadded:: 0.15.4 @@ -637,11 +642,17 @@ def __init__( solver: str = "cram48", continue_timesteps: bool = False, hdf5_dtype: str = 'float64', + hdf5_compression: str = None, ): if hdf5_dtype not in ('float32', 'float64'): raise ValueError( f"hdf5_dtype must be 'float32' or 'float64', got '{hdf5_dtype}'") + if hdf5_compression is not None and hdf5_compression not in ('gzip', 'lzf'): + raise ValueError( + f"hdf5_compression must be None, 'gzip', or 'lzf', " + f"got '{hdf5_compression}'") self.hdf5_dtype = hdf5_dtype + self.hdf5_compression = hdf5_compression if continue_timesteps and operator.prev_res is None: raise ValueError("Continuation run requires passing prev_results.") self.operator = operator @@ -906,6 +917,7 @@ def integrate( write_rates=write_rates, path=path, hdf5_dtype=self.hdf5_dtype, + hdf5_compression=self.hdf5_compression, ) # Update for next step @@ -930,6 +942,7 @@ def integrate( write_rates=write_rates, path=path, hdf5_dtype=self.hdf5_dtype, + hdf5_compression=self.hdf5_compression, ) self.operator.write_bos_data(len(self) + self._i_res) @@ -1130,6 +1143,12 @@ class SIIntegrator(Integrator): hdf5_dtype : str, optional dtype for number and reaction rate datasets, float32 or float64 (default) + .. versionadded:: 0.15.4 + hdf5_compression : str, optional + Compression filter for number and reaction rate datasets in + depletion_results.h5. Accepted values are 'gzip' and 'lzf'. + Default is None (no compression). Ignored with parallel HDF5. + .. versionadded:: 0.15.4 Attributes @@ -1175,6 +1194,7 @@ def __init__( solver: str = "cram48", continue_timesteps: bool = False, hdf5_dtype: str = 'float64', + hdf5_compression: str = None, ): check_type("n_steps", n_steps, Integral) check_greater_than("n_steps", n_steps, 0) @@ -1182,7 +1202,8 @@ def __init__( operator, timesteps, power, power_density, source_rates, timestep_units=timestep_units, solver=solver, continue_timesteps=continue_timesteps, - hdf5_dtype=hdf5_dtype) + hdf5_dtype=hdf5_dtype, + hdf5_compression=hdf5_compression) self.n_steps = n_steps def _get_bos_data_from_operator(self, step_index, step_power, n_bos): @@ -1275,6 +1296,7 @@ def integrate( write_rates=write_rates, path=path, hdf5_dtype=self.hdf5_dtype, + hdf5_compression=self.hdf5_compression, ) # Update for next step @@ -1294,6 +1316,7 @@ def integrate( write_rates=write_rates, path=path, hdf5_dtype=self.hdf5_dtype, + hdf5_compression=self.hdf5_compression, ) self.operator.write_bos_data(self._i_res + len(self)) diff --git a/openmc/deplete/stepresult.py b/openmc/deplete/stepresult.py index cc8bf40d636..25fb88c8e1f 100644 --- a/openmc/deplete/stepresult.py +++ b/openmc/deplete/stepresult.py @@ -75,6 +75,7 @@ def __init__(self): self.data = None self.hdf5_dtype = 'float64' + self.hdf5_compression = None def __repr__(self): t = self.time[0] @@ -190,7 +191,8 @@ def distribute(self, local_materials, ranges): # Direct transfer direct_attrs = ("time", "k", "source_rate", "index_nuc", - "mat_to_hdf5_ind", "mat_to_name", "proc_time") + "mat_to_hdf5_ind", "mat_to_name", "proc_time", + "hdf5_dtype", "hdf5_compression") for attr in direct_attrs: setattr(new, attr, getattr(self, attr)) # Get applicable slice of data @@ -260,12 +262,19 @@ def export_to_hdf5(self, filename, step, write_rates: bool = False): kwargs = {'mode': "w" if step == 0 else "a"} if h5py.get_config().mpi and comm.size > 1: - # Write results in parallel + # Write results in parallel — compression not supported + saved_compression = self.hdf5_compression + if self.hdf5_compression is not None: + if comm.rank == 0 and step == 0: + warnings.warn("HDF5 compression is not supported with " + "parallel I/O; writing without compression") + self.hdf5_compression = None kwargs['driver'] = 'mpio' kwargs['comm'] = comm with h5py.File(filename, **kwargs) as handle: self._to_hdf5(handle, step, parallel=True, write_rates=write_rates) + self.hdf5_compression = saved_compression else: # Gather results at root process all_results = comm.gather(self) @@ -345,18 +354,21 @@ def _write_hdf5_metadata(self, handle, write_rates): self.rates.index_rx[rxn]) # Construct array storage - _dtype = getattr(self, 'hdf5_dtype', 'float64') + _dtype = self.hdf5_dtype + _compression = self.hdf5_compression handle.create_dataset("number", (1, n_mats, n_nuc_number), maxshape=(None, n_mats, n_nuc_number), chunks=True, - dtype=_dtype) + dtype=_dtype, + compression=_compression) if include_rates and n_nuc_rxn > 0 and n_rxn > 0: handle.create_dataset( "reaction rates", (1, n_mats, n_nuc_rxn, n_rxn), maxshape=(None, n_mats, n_nuc_rxn, n_rxn), - chunks=True, dtype=_dtype) + chunks=True, dtype=_dtype, + compression=_compression) handle.create_dataset("eigenvalues", (1, 2), maxshape=(None, 2), dtype='float64') @@ -558,6 +570,7 @@ def save( write_rates: bool = False, path: PathLike = "depletion_results.h5", hdf5_dtype: str = 'float64', + hdf5_compression: str = None, ): """Creates and writes depletion results to disk @@ -586,7 +599,12 @@ def save( .. versionadded:: 0.14.0 hdf5_dtype : str, optional - dtype for number and reaction rate datasets, float32 or float64 (default) + dtype for number and reaction rate datasets, float32 or float64 + + .. versionadded:: 0.15.4 + hdf5_compression : str, optional + Compression for number and reaction rate datasets. + Accepted values are 'gzip' and 'lzf'. Ignored with parallel HDF5. .. versionadded:: 0.15.4 """ @@ -596,6 +614,7 @@ def save( # Create results results = StepResult() results.hdf5_dtype = hdf5_dtype + results.hdf5_compression = hdf5_compression results.allocate(vol_dict, nuc_list, burn_list, full_burn_list, name_list) n_mat = len(burn_list)