diff --git a/compass/landice/tests/ensemble_generator/__init__.py b/compass/landice/tests/ensemble_generator/__init__.py index 6dc9af8843..444a6eef64 100644 --- a/compass/landice/tests/ensemble_generator/__init__.py +++ b/compass/landice/tests/ensemble_generator/__init__.py @@ -1,6 +1,18 @@ +""" +Ensemble generator test group for MALI simulations. + +Provides test cases for generating ensembles for UQ and sensitivity studies. +""" + from compass.landice.tests.ensemble_generator.branch_ensemble import ( BranchEnsemble, ) +from compass.landice.tests.ensemble_generator.sgh_ensemble_analysis import ( + AnalysisEnsemble, +) +from compass.landice.tests.ensemble_generator.sgh_restart_ensemble import ( + RestartEnsemble, +) from compass.landice.tests.ensemble_generator.spinup_ensemble import ( SpinupEnsemble, ) @@ -14,6 +26,8 @@ class EnsembleGenerator(TestGroup): """ def __init__(self, mpas_core): """ + Parameters + ---------- mpas_core : compass.landice.Landice the MPAS core that this test group belongs to """ @@ -22,3 +36,5 @@ def __init__(self, mpas_core): self.add_test_case(SpinupEnsemble(test_group=self)) self.add_test_case(BranchEnsemble(test_group=self)) + self.add_test_case(AnalysisEnsemble(test_group=self)) + self.add_test_case(RestartEnsemble(test_group=self)) diff --git a/compass/landice/tests/ensemble_generator/branch_ensemble/__init__.py b/compass/landice/tests/ensemble_generator/branch_ensemble/__init__.py index 07c6a4ebec..07d8c9db62 100644 --- a/compass/landice/tests/ensemble_generator/branch_ensemble/__init__.py +++ b/compass/landice/tests/ensemble_generator/branch_ensemble/__init__.py @@ -10,6 +10,10 @@ from compass.landice.tests.ensemble_generator.ensemble_manager import ( EnsembleManager, ) +from compass.landice.tests.ensemble_generator.ensemble_template import ( + add_template_file, + get_branch_template_package, +) from compass.testcase import TestCase @@ -59,6 +63,9 @@ def configure(self): """ config = self.config + resource_module = get_branch_template_package(config) + add_template_file(config, resource_module, 'branch_ensemble.cfg') + section = config['branch_ensemble'] spinup_test_dir = section.get('spinup_test_dir') @@ -89,7 +96,8 @@ def configure(self): else: print(f"Adding {run_name}") # use this run - self.add_step(BranchRun(test_case=self, run_num=run_num)) + self.add_step(BranchRun(test_case=self, run_num=run_num, + resource_module=resource_module)) # Note: do not add to steps_to_run; ensemble_manager # will handle submitting and running the runs diff --git a/compass/landice/tests/ensemble_generator/branch_ensemble/branch_ensemble.cfg b/compass/landice/tests/ensemble_generator/branch_ensemble/branch_ensemble.cfg index 78953eda17..761685344f 100644 --- a/compass/landice/tests/ensemble_generator/branch_ensemble/branch_ensemble.cfg +++ b/compass/landice/tests/ensemble_generator/branch_ensemble/branch_ensemble.cfg @@ -1,27 +1,3 @@ -# config options for branching an ensemble -[branch_ensemble] - -# start and end numbers for runs to set up and run -# branch runs. -# It is assumed that spinup runs have already been -# conducted for these runs. -start_run = 0 -end_run = 3 - -# Path to thermal forcing file for the mesh to be used in the branch run -TF_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/ocean_thermal_forcing/UKESM1-0-LL_SSP585/1995-2300/Amery_4to20km_TF_UKESM1-0-LL_SSP585_2300.nc - -# Path to SMB forcing file for the mesh to be used in the branch run -SMB_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/atmosphere_forcing/UKESM1-0-LL_SSP585/1995-2300/Amery_4to20km_SMB_UKESM1-0-LL_SSP585_2300_noBareLandAdvance.nc - -# location of spinup ensemble to branch from -spinup_test_dir = /pscratch/sd/h/hoffman2/AMERY_corrected_forcing_6param_ensemble_2023-03-18/landice/ensemble_generator/ensemble - -# year of spinup simulation from which to branch runs -branch_year = 2050 - -# whether to only set up branch runs for filtered runs or all runs -set_up_filtered_only = True - -# path to pickle file containing filtering information generated by plot_ensemble.py -ensemble_pickle_file = None +# branch_ensemble options are loaded from the selected model configuration +# package under: +# compass.landice.tests.ensemble_generator.ensemble_templates..branch diff --git a/compass/landice/tests/ensemble_generator/branch_ensemble/branch_run.py b/compass/landice/tests/ensemble_generator/branch_ensemble/branch_run.py index 864a751ff0..360fc1bded 100644 --- a/compass/landice/tests/ensemble_generator/branch_ensemble/branch_run.py +++ b/compass/landice/tests/ensemble_generator/branch_ensemble/branch_run.py @@ -28,36 +28,9 @@ class BranchRun(Step): input_file_name : str name of the input file that was read from the config - basal_fric_exp : float - value of basal friction exponent to use - - mu_scale : float - value to scale muFriction by - - stiff_scale : float - value to scale stiffnessFactor by - - von_mises_threshold : float - value of von Mises stress threshold to use - - calv_spd_lim : float - value of calving speed limit to use - - gamma0 : float - value of gamma0 to use in ISMIP6 ice-shelf basal melt param. - - deltaT : float - value of deltaT to use in ISMIP6 ice-shelf basal melt param. """ - def __init__(self, test_case, run_num, - basal_fric_exp=None, - mu_scale=None, - stiff_scale=None, - von_mises_threshold=None, - calv_spd_lim=None, - gamma0=None, - deltaT=None): + def __init__(self, test_case, run_num, resource_module): """ Creates a new run within an ensemble @@ -68,8 +41,13 @@ def __init__(self, test_case, run_num, run_num : integer the run number for this ensemble member + + resource_module : str + Package containing configuration-specific branch namelist and + streams templates """ self.run_num = run_num + self.resource_module = resource_module # define step (run) name self.name = f'run{run_num:03}' @@ -108,9 +86,10 @@ def setup(self): with open(os.path.join(self.work_dir, 'restart_timestamp'), 'w') as f: f.write('2015-01-01_00:00:00') - # yaml file - shutil.copy(os.path.join(spinup_dir, 'albany_input.yaml'), - self.work_dir) + # albany_input.yaml may be absent in templates that do not use Albany. + albany_input = os.path.join(spinup_dir, 'albany_input.yaml') + if os.path.isfile(albany_input): + shutil.copy(albany_input, self.work_dir) # set up namelist # start with the namelist from the spinup @@ -120,8 +99,7 @@ def setup(self): 'namelist.landice')) # use the namelist in this module to update the spinup namelist options = compass.namelist.parse_replacements( - 'compass.landice.tests.ensemble_generator.branch_ensemble', - 'namelist.landice') + self.resource_module, 'namelist.landice') namelist = compass.namelist.replace(namelist, options) compass.namelist.write(namelist, os.path.join(self.work_dir, 'namelist.landice')) @@ -132,7 +110,7 @@ def setup(self): stream_replacements['TF_file_path'] = TF_file_path SMB_file_path = section.get('SMB_file_path') stream_replacements['SMB_file_path'] = SMB_file_path - strm_src = 'compass.landice.tests.ensemble_generator.branch_ensemble' + strm_src = self.resource_module self.add_streams_file(strm_src, 'streams.landice', out_name='streams.landice', diff --git a/compass/landice/tests/ensemble_generator/ensemble_member.py b/compass/landice/tests/ensemble_generator/ensemble_member.py index ca08833cff..92362a4492 100644 --- a/compass/landice/tests/ensemble_generator/ensemble_member.py +++ b/compass/landice/tests/ensemble_generator/ensemble_member.py @@ -40,12 +40,6 @@ class EnsembleMember(Step): stiff_scale : float value to scale stiffnessFactor by - von_mises_threshold : float - value of von Mises stress threshold to use - - calv_spd_lim : float - value of calving speed limit to use - gamma0 : float value of gamma0 to use in ISMIP6 ice-shelf basal melt param. @@ -54,11 +48,12 @@ class EnsembleMember(Step): """ def __init__(self, test_case, run_num, + resource_module, + namelist_option_values=None, + namelist_parameter_values=None, basal_fric_exp=None, mu_scale=None, stiff_scale=None, - von_mises_threshold=None, - calv_spd_lim=None, gamma0=None, meltflux=None, deltaT=None): @@ -73,6 +68,18 @@ def __init__(self, test_case, run_num, run_num : integer the run number for this ensemble member + resource_module : str + Package containing configuration-specific namelist, streams, + and albany input files + + namelist_option_values : dict, optional + A dictionary of namelist option names and values to be + overridden for this ensemble member + + namelist_parameter_values : dict, optional + A dictionary of run-info parameter names and values that + correspond to entries in ``namelist_option_values`` + basal_fric_exp : float value of basal friction exponent to use @@ -82,13 +89,6 @@ def __init__(self, test_case, run_num, stiff_scale : float value to scale stiffnessFactor by - von_mises_threshold : float - value of von Mises stress threshold to use - assumes same value for grounded and floating ice - - calv_spd_lim : float - value of calving speed limit to use - gamma0 : float value of gamma0 to use in ISMIP6 ice-shelf basal melt param. @@ -96,13 +96,18 @@ def __init__(self, test_case, run_num, value of deltaT to use in ISMIP6 ice-shelf basal melt param. """ self.run_num = run_num + self.resource_module = resource_module + if namelist_option_values is None: + namelist_option_values = {} + if namelist_parameter_values is None: + namelist_parameter_values = {} + self.namelist_option_values = dict(namelist_option_values) + self.namelist_parameter_values = dict(namelist_parameter_values) # store assigned param values for this run self.basal_fric_exp = basal_fric_exp self.mu_scale = mu_scale self.stiff_scale = stiff_scale - self.von_mises_threshold = von_mises_threshold - self.calv_spd_lim = calv_spd_lim self.gamma0 = gamma0 self.meltflux = meltflux self.deltaT = deltaT @@ -127,11 +132,12 @@ def setup(self): "'compass setup' again to set this experiment up.") return - resource_module = 'compass.landice.tests.ensemble_generator' + resource_module = self.resource_module # Get config for info needed for setting up simulation config = self.config - section = config['ensemble'] + section = config['ensemble_generator'] + spinup_section = config['spinup_ensemble'] # Create a python config (not compass config) file # for run-specific info useful for analysis/viz @@ -151,14 +157,18 @@ def setup(self): # Set up base run configuration self.add_namelist_file(resource_module, 'namelist.landice') - # copy over albany yaml file - # cannot use add_input functionality because need to modify the file - # in this function, and inputs don't get processed until after this - # function - with resources.path(resource_module, - 'albany_input.yaml') as package_path: - target = str(package_path) - shutil.copy(target, self.work_dir) + # albany_input.yaml is optional unless fric_exp perturbations are used. + albany_input_name = 'albany_input.yaml' + albany_input_path = os.path.join(self.work_dir, albany_input_name) + albany_source = resources.files(resource_module).joinpath( + albany_input_name) + + # Materialize a real filesystem path in case the package is not + # directly on the filesystem (e.g., zip/loader-backed). + with resources.as_file(albany_source) as albany_source_path: + has_albany_input = albany_source_path.is_file() + if has_albany_input: + shutil.copy(str(albany_source_path), self.work_dir) self.add_model_as_input() @@ -171,25 +181,15 @@ def setup(self): options['config_adaptive_timestep_CFL_fraction'] = \ f'{self.cfl_fraction}' - # von Mises stress threshold - if self.von_mises_threshold is not None: - options['config_grounded_von_Mises_threshold_stress'] = \ - f'{self.von_mises_threshold}' - options['config_floating_von_Mises_threshold_stress'] = \ - f'{self.von_mises_threshold}' - run_info_cfg.set('run_info', 'von_mises_threshold', - f'{self.von_mises_threshold}') - - # calving speed limit - if self.calv_spd_lim is not None: - options['config_calving_speed_limit'] = \ - f'{self.calv_spd_lim}' - run_info_cfg.set('run_info', 'calv_spd_limit', - f'{self.calv_spd_lim}') + # apply generic namelist float parameter perturbations + for option_name, value in self.namelist_option_values.items(): + options[option_name] = f'{value}' + for parameter_name, value in self.namelist_parameter_values.items(): + run_info_cfg.set('run_info', parameter_name, f'{value}') # adjust basal friction exponent # rename and copy base file - input_file_path = section.get('input_file_path') + input_file_path = spinup_section.get('input_file_path') input_file_name = input_file_path.split('/')[-1] base_fname = input_file_name.split('.')[:-1][0] new_input_fname = f'{base_fname}_MODIFIED.nc' @@ -199,13 +199,16 @@ def setup(self): # set input filename in streams and create streams file stream_replacements = {'input_file_init_cond': new_input_fname} if self.basal_fric_exp is not None: + if not has_albany_input: + raise ValueError( + "Parameter 'fric_exp' requires 'albany_input.yaml' " + f"in template package '{resource_module}'.") # adjust mu and exponent - orig_fric_exp = section.getfloat('orig_fric_exp') + orig_fric_exp = spinup_section.getfloat('orig_fric_exp') _adjust_friction_exponent(orig_fric_exp, self.basal_fric_exp, os.path.join(self.work_dir, new_input_fname), - os.path.join(self.work_dir, - 'albany_input.yaml')) + albany_input_path) run_info_cfg.set('run_info', 'basal_fric_exp', f'{self.basal_fric_exp}') @@ -227,26 +230,30 @@ def setup(self): # adjust gamma0 and deltaT # (only need to check one of these params) - basal_melt_param_file_path = section.get('basal_melt_param_file_path') - basal_melt_param_file_name = basal_melt_param_file_path.split('/')[-1] - base_fname = basal_melt_param_file_name.split('.')[:-1][0] - new_fname = f'{base_fname}_MODIFIED.nc' - shutil.copy(basal_melt_param_file_path, - os.path.join(self.work_dir, new_fname)) - _adjust_basal_melt_params(os.path.join(self.work_dir, new_fname), - self.gamma0, self.deltaT) - stream_replacements['basal_melt_param_file_name'] = new_fname - if self.gamma0 is not None: - run_info_cfg.set('run_info', 'gamma0', f'{self.gamma0}') - if self.deltaT is not None: - run_info_cfg.set('run_info', 'meltflux', f'{self.meltflux}') - run_info_cfg.set('run_info', 'deltaT', f'{self.deltaT}') + basal_melt_param_file_path = spinup_section.get( + 'basal_melt_param_file_path') + if os.path.exists(str(basal_melt_param_file_path)): + basal_melt_param_file_name = \ + basal_melt_param_file_path.split('/')[-1] + base_fname = basal_melt_param_file_name.split('.')[:-1][0] + new_fname = f'{base_fname}_MODIFIED.nc' + shutil.copy(basal_melt_param_file_path, + os.path.join(self.work_dir, new_fname)) + _adjust_basal_melt_params(os.path.join(self.work_dir, new_fname), + self.gamma0, self.deltaT) + stream_replacements['basal_melt_param_file_name'] = new_fname + if self.gamma0 is not None: + run_info_cfg.set('run_info', 'gamma0', f'{self.gamma0}') + if self.deltaT is not None: + run_info_cfg.set('run_info', 'meltflux', f'{self.meltflux}') + run_info_cfg.set('run_info', 'deltaT', f'{self.deltaT}') # set up forcing files (unmodified) - TF_file_path = section.get('TF_file_path') - stream_replacements['TF_file_path'] = TF_file_path - SMB_file_path = section.get('SMB_file_path') - stream_replacements['SMB_file_path'] = SMB_file_path + TF_file_path = spinup_section.get('TF_file_path') + if os.path.exists(str(TF_file_path)): + stream_replacements['TF_file_path'] = TF_file_path + SMB_file_path = spinup_section.get('SMB_file_path') + stream_replacements['SMB_file_path'] = SMB_file_path # store accumulated namelist and streams options self.add_namelist_options(options=options, diff --git a/compass/landice/tests/ensemble_generator/ensemble_template.py b/compass/landice/tests/ensemble_generator/ensemble_template.py new file mode 100644 index 0000000000..a3ae60083d --- /dev/null +++ b/compass/landice/tests/ensemble_generator/ensemble_template.py @@ -0,0 +1,95 @@ +from importlib.util import find_spec + + +def get_ensemble_template_name(config): + """ + Get the configured ensemble template name. + + Parameters + ---------- + config : compass.config.CompassConfigParser + Configuration options for a test case + + Returns + ------- + str + The selected ensemble template name + """ + section = 'ensemble_generator' + option = 'ensemble_template' + + if not config.has_section(section): + raise ValueError( + f"Missing required config section '{section}' for ensemble " + "generator configuration selection.") + + if not config.has_option(section, option): + raise ValueError( + f"Missing required config option '{option}' in section " + f"'{section}'.") + + template = config.get(section, option).strip() + if template == '': + raise ValueError('ensemble_template cannot be empty.') + + return template + + +def get_spinup_template_package(config): + """ + Get the package containing spinup ensemble template resources. + + Parameters + ---------- + config : compass.config.CompassConfigParser + Configuration options for a test case + + Returns + ------- + str + Package path for spinup resources + """ + template = get_ensemble_template_name(config) + return ('compass.landice.tests.ensemble_generator.ensemble_templates.' + f'{template}.spinup') + + +def get_branch_template_package(config): + """ + Get the package containing branch ensemble template resources. + + Parameters + ---------- + config : compass.config.CompassConfigParser + Configuration options for a test case + + Returns + ------- + str + Package path for branch resources + """ + template = get_ensemble_template_name(config) + return ('compass.landice.tests.ensemble_generator.ensemble_templates.' + f'{template}.branch') + + +def add_template_file(config, package, filename): + """ + Add a config file from the selected ensemble template package. + + Parameters + ---------- + config : compass.config.CompassConfigParser + Configuration options for a test case + + package : str + The package containing the requested configuration file + + filename : str + The configuration filename to add from the package + """ + if find_spec(package) is None: + raise ValueError( + f"Ensemble template package '{package}' was not found.") + + config.add_from_package(package, filename, exception=True) diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/__init__.py b/compass/landice/tests/ensemble_generator/ensemble_templates/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/default/__init__.py b/compass/landice/tests/ensemble_generator/ensemble_templates/default/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/default/branch/__init__.py b/compass/landice/tests/ensemble_generator/ensemble_templates/default/branch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/default/branch/branch_ensemble.cfg b/compass/landice/tests/ensemble_generator/ensemble_templates/default/branch/branch_ensemble.cfg new file mode 100644 index 0000000000..709c9fbd68 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/ensemble_templates/default/branch/branch_ensemble.cfg @@ -0,0 +1,33 @@ +# selector for ensemble template resources +[ensemble_generator] + +# subdirectory within ensemble_templates/ where branch_ensemble options are located +ensemble_template = default + +# config options for branching an ensemble +[branch_ensemble] + +# start and end numbers for runs to set up and run +# branch runs. +# It is assumed that spinup runs have already been +# conducted for these runs. +start_run = 0 +end_run = 3 + +# Path to thermal forcing file for the mesh to be used in the branch run +TF_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/ocean_thermal_forcing/UKESM1-0-LL_SSP585/1995-2300/Amery_4to20km_TF_UKESM1-0-LL_SSP585_2300.nc + +# Path to SMB forcing file for the mesh to be used in the branch run +SMB_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/atmosphere_forcing/UKESM1-0-LL_SSP585/1995-2300/Amery_4to20km_SMB_UKESM1-0-LL_SSP585_2300_noBareLandAdvance.nc + +# location of spinup ensemble to branch from +spinup_test_dir = /pscratch/sd/h/hoffman2/AMERY_corrected_forcing_6param_ensemble_2023-03-18/landice/ensemble_generator/ensemble + +# year of spinup simulation from which to branch runs +branch_year = 2050 + +# whether to only set up branch runs for filtered runs or all runs +set_up_filtered_only = True + +# path to pickle file containing filtering information generated by plot_ensemble.py +ensemble_pickle_file = None diff --git a/compass/landice/tests/ensemble_generator/branch_ensemble/namelist.landice b/compass/landice/tests/ensemble_generator/ensemble_templates/default/branch/namelist.landice similarity index 100% rename from compass/landice/tests/ensemble_generator/branch_ensemble/namelist.landice rename to compass/landice/tests/ensemble_generator/ensemble_templates/default/branch/namelist.landice diff --git a/compass/landice/tests/ensemble_generator/branch_ensemble/streams.landice b/compass/landice/tests/ensemble_generator/ensemble_templates/default/branch/streams.landice similarity index 100% rename from compass/landice/tests/ensemble_generator/branch_ensemble/streams.landice rename to compass/landice/tests/ensemble_generator/ensemble_templates/default/branch/streams.landice diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/__init__.py b/compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/compass/landice/tests/ensemble_generator/albany_input.yaml b/compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/albany_input.yaml similarity index 100% rename from compass/landice/tests/ensemble_generator/albany_input.yaml rename to compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/albany_input.yaml diff --git a/compass/landice/tests/ensemble_generator/ensemble_generator.cfg b/compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/ensemble_generator.cfg similarity index 64% rename from compass/landice/tests/ensemble_generator/ensemble_generator.cfg rename to compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/ensemble_generator.cfg index 4cbab8b830..72786e6a0c 100644 --- a/compass/landice/tests/ensemble_generator/ensemble_generator.cfg +++ b/compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/ensemble_generator.cfg @@ -1,28 +1,36 @@ +# selector for ensemble template resources +[ensemble_generator] + +# subdirectory within ensemble_templates/ where branch_ensemble options are located +ensemble_template = default + # config options for setting up an ensemble -[ensemble] # start and end numbers for runs to set up and run # Run numbers should be zero-based. # Additional runs can be added and run to an existing ensemble # without affecting existing runs, but trying to set up a run # that already exists will generate a warning and skip that run. -# If using uniform sampling, start_run should be 0 and end_run should be -# equal to (max_samples - 1), otherwise unexpected behavior may result. +# If using uniform or log-uniform sampling, start_run should be 0 and +# end_run should be equal to (max_samples - 1), otherwise unexpected +# behavior may result. # These values do not affect viz/analysis, which will include any # runs it finds. start_run = 0 end_run = 3 -# sampling_method can be either 'sobol' for a space-filling Sobol sequence -# or 'uniform' for uniform sampling. Uniform sampling is most appropriate -# for a single parameter sensitivity study. It will sample uniformly across -# all dimensions simultaneously, thus sampling only a small fraction of -# parameter space +# sampling_method can be 'sobol' for a space-filling Sobol sequence, +# 'uniform' for linear sampling, or 'log-uniform' for logarithmic sampling. +# Uniform and log-uniform are most appropriate for a single-parameter +# sensitivity study because they sample each active parameter using the +# same rank ordering, thus sampling only a small fraction of parameter space +# in higher dimensions. sampling_method = sobol # maximum number of samples to be considered. # max_samples needs to be greater or equal to (end_run + 1) -# When using uniform sampling, max_samples should equal (end_run + 1). +# When using uniform or log-uniform sampling, max_samples should equal +# (end_run + 1). # When using Sobol sequence, max_samples ought to be a power of 2. # max_samples should not be changed after the first set of ensemble. # So, when using Sobol sequence, max_samples might be set larger than @@ -53,6 +61,12 @@ basin = ISMIP6BasinBC # to inform the choice for a large production ensemble. cfl_fraction = 0.7 +# number of tasks that each ensemble member should be run with +# Eventually, compass could determine this, but we want explicit control for now +ntasks = 128 + +[spinup_ensemble] + # Path to the initial condition input file. # Eventually this could be hard-coded to use files on the input data # server, but initially we want flexibility to experiment with different @@ -72,65 +86,30 @@ TF_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_fr # Path to SMB forcing file for the mesh to be used SMB_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/atmosphere_forcing/RACMO_climatology_1995-2017/Amery_4to20km_RACMO2.3p2_ANT27_smb_climatology_1995-2017_no_xtime_noBareLandAdvance.nc -# number of tasks that each ensemble member should be run with -# Eventually, compass could determine this, but we want explicit control for now -ntasks = 128 - -# whether basal friction exponent is being varied -# [unitless] -use_fric_exp = True -# min value to vary over -fric_exp_min = 0.1 -# max value to vary over -fric_exp_max = 0.33333 - -# whether a scaling factor on muFriction is being varied -# [unitless: 1.0=no scaling] -use_mu_scale = True -# min value to vary over -mu_scale_min = 0.8 -# max value to vary over -mu_scale_max = 1.2 - -# whether a scaling factor on stiffnessFactor is being varied -# [unitless: 1.0=no scaling] -use_stiff_scale = True -# min value to vary over -stiff_scale_min = 0.8 -# max value to vary over -stiff_scale_max = 1.2 - -# whether the von Mises threshold stress (sigma_max) is being varied -# [units: Pa] -use_von_mises_threshold = True -# min value to vary over -von_mises_threshold_min = 80.0e3 -# max value to vary over -von_mises_threshold_max = 180.0e3 - -# whether the calving speed limit is being varied -# [units: km/yr] -use_calv_limit = False -# min value to vary over -calv_limit_min = 5.0 -# max value to vary over -calv_limit_max = 50.0 - -# whether ocean melt parameterization coefficient is being varied -# [units: m/yr] -use_gamma0 = True -# min value to vary over -gamma0_min = 9620.0 -# max value to vary over -gamma0_max = 471000.0 - -# whether target ice-shelf basal melt flux is being varied -# [units: Gt/yr] -use_meltflux = True -# min value to vary over -meltflux_min = 12. -# max value to vary over -meltflux_max = 58. -# ice-shelf area associated with target melt rates -# [units: m^2] +# For meltflux perturbations, this observed ice-shelf area is used when +# converting target melt flux to deltaT. iceshelf_area_obs = 60654.e6 + +# Parameter definitions are listed in this section in sampling order. +# Use the prefix "nl." for float parameters that map to namelist options. +# Each parameter must define " = min, max". +# Namelist parameters must also define +# ".option_name = namelist_option". +[ensemble.parameters] + +# special parameters (handled by custom code) +fric_exp = 0.1, 0.33333 +mu_scale = 0.8, 1.2 +stiff_scale = 0.8, 1.2 +gamma0 = 9620.0, 471000.0 +meltflux = 12.0, 58.0 + +# namelist float parameters (generic handling) +nl.von_mises_threshold = 80.0e3, 180.0e3 +nl.von_mises_threshold.option_name = \ + config_grounded_von_Mises_threshold_stress, \ + config_floating_von_Mises_threshold_stress + +# example for calving speed limit (units must match namelist units) +# nl.calv_spd_limit = 0.0001585, 0.001585 +# nl.calv_spd_limit.option_name = config_calving_speed_limit diff --git a/compass/landice/tests/ensemble_generator/namelist.landice b/compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/namelist.landice similarity index 100% rename from compass/landice/tests/ensemble_generator/namelist.landice rename to compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/namelist.landice diff --git a/compass/landice/tests/ensemble_generator/streams.landice b/compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/streams.landice similarity index 100% rename from compass/landice/tests/ensemble_generator/streams.landice rename to compass/landice/tests/ensemble_generator/ensemble_templates/default/spinup/streams.landice diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/__init__.py b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/__init__.py b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/branch_ensemble.cfg b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/branch_ensemble.cfg new file mode 100644 index 0000000000..709c9fbd68 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/branch_ensemble.cfg @@ -0,0 +1,33 @@ +# selector for ensemble template resources +[ensemble_generator] + +# subdirectory within ensemble_templates/ where branch_ensemble options are located +ensemble_template = default + +# config options for branching an ensemble +[branch_ensemble] + +# start and end numbers for runs to set up and run +# branch runs. +# It is assumed that spinup runs have already been +# conducted for these runs. +start_run = 0 +end_run = 3 + +# Path to thermal forcing file for the mesh to be used in the branch run +TF_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/ocean_thermal_forcing/UKESM1-0-LL_SSP585/1995-2300/Amery_4to20km_TF_UKESM1-0-LL_SSP585_2300.nc + +# Path to SMB forcing file for the mesh to be used in the branch run +SMB_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/atmosphere_forcing/UKESM1-0-LL_SSP585/1995-2300/Amery_4to20km_SMB_UKESM1-0-LL_SSP585_2300_noBareLandAdvance.nc + +# location of spinup ensemble to branch from +spinup_test_dir = /pscratch/sd/h/hoffman2/AMERY_corrected_forcing_6param_ensemble_2023-03-18/landice/ensemble_generator/ensemble + +# year of spinup simulation from which to branch runs +branch_year = 2050 + +# whether to only set up branch runs for filtered runs or all runs +set_up_filtered_only = True + +# path to pickle file containing filtering information generated by plot_ensemble.py +ensemble_pickle_file = None diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/namelist.landice b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/namelist.landice new file mode 100644 index 0000000000..9564610e57 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/namelist.landice @@ -0,0 +1,8 @@ +config_do_restart = .true. +config_start_time = 'file' +config_stop_time = '2300-01-01_00:00:00' +config_grounded_von_Mises_threshold_stress = 1.0e9 +config_min_adaptive_timestep = 21600 +config_calving_error_threshold = 1.0e9 +config_front_mass_bal_grounded = 'ismip6' +config_use_3d_thermal_forcing_for_face_melt = .true. diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/streams.landice b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/streams.landice new file mode 100644 index 0000000000..f1cb2af75f --- /dev/null +++ b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/branch/streams.landice @@ -0,0 +1,101 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/__init__.py b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/ensemble_generator.cfg b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/ensemble_generator.cfg new file mode 100644 index 0000000000..f58fdb1bb6 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/ensemble_generator.cfg @@ -0,0 +1,77 @@ +# selector for ensemble template resources +[ensemble_generator] + +# subdirectory within ensemble_templates/ where branch_ensemble options are located +ensemble_template = sgh_ensemble + +# config options for setting up an ensemble + +# start and end numbers for runs to set up and run +# Run numbers should be zero-based. +# Additional runs can be added and run to an existing ensemble +# without affecting existing runs, but trying to set up a run +# that already exists will generate a warning and skip that run. +# If using uniform sampling, start_run should be 0 and end_run should be +# equal to (max_samples - 1), otherwise unexpected behavior may result. +# These values do not affect viz/analysis, which will include any +# runs it finds. +start_run = 0 +end_run = 3 + +# sampling_method can be either 'sobol' for a space-filling Sobol sequence +# or 'uniform' for uniform sampling. Uniform sampling is most appropriate +# for a single parameter sensitivity study. It will sample uniformly across +# all dimensions simultaneously, thus sampling only a small fraction of +# parameter space +sampling_method = uniform + +# maximum number of samples to be considered. +# max_samples needs to be greater or equal to (end_run + 1) +# When using uniform sampling, max_samples should equal (end_run + 1). +# When using Sobol sequence, max_samples ought to be a power of 2. +# max_samples should not be changed after the first set of ensemble. +# So, when using Sobol sequence, max_samples might be set larger than +# (end_run + 1) if you plan to add more samples to the ensemble later. +max_samples = 4 + +# fraction of CFL-limited time step to be used by the adaptive timestepper +# This value is explicitly included here to force the user to consciously +# select the value to use. Model run time tends to be inversely proportional +# to scaling this value (e.g., 0.2 will be ~4x more expensive than 0.8). +# Value should be less than or equal to 1.0, and values greater than 0.9 are +# not recommended. +# Values of 0.7-0.9 typically work for most simulations, but some runs may +# fail. Values of 0.2-0.5 are more conservative and will allow more runs +# to succeed, but will result in substantially more expensive runs +# However, because the range of parameter combinations being simulated +# are likely to stress the model, a smaller number than usual may be +# necessary to effectively cover parameter space. +# A user may want to do a few small ensembles with different values +# to inform the choice for a large production ensemble. +cfl_fraction = 0.7 + +# number of tasks that each ensemble member should be run with +# Eventually, compass could determine this, but we want explicit control for now +ntasks = 128 + +[spinup_ensemble] + +# Path to the initial condition input file. +# Eventually this could be hard-coded to use files on the input data +# server, but initially we want flexibility to experiment with different +# inputs and forcings +input_file_path = /global/cfs/cdirs/fanssie/MALI_input_files/AIS_4to20km_r01/AIS_4kmto20km_hist04_hydroinputs_ais4km2015_bmifile_W0.05_Pw0.8_filledDepressions_Stol2021mean.nc + +regional_mask_file_path = /global/cfs/cdirs/fanssie/MALI_input_files/AIS_4to20km_r01/AIS_4to20km_r01_20220907.regionMask_ismip6.nc + +# Parameter definitions are listed in this section in sampling order. +# Use the prefix "nl." for float parameters that map to namelist options. +# Each parameter must define " = min, max". +# Namelist parameters must also define +# ".option_name = namelist_option". +[ensemble.parameters] + +# namelist float parameters (generic handling) +# EXAMPLE: +#nl.chnl_conduc_coeff = 0.005, 0.1 +#nl.chnl_conduc_coeff.option_name = config_SGH_chnl_conduc_coeff diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/namelist.landice b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/namelist.landice new file mode 100644 index 0000000000..656479abd7 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/namelist.landice @@ -0,0 +1,203 @@ +&velocity_solver + config_velocity_solver = 'none' + config_sia_tangent_slope_calculation = 'from_vertex_barycentric' + config_flowParamA_calculation = 'constant' + config_do_velocity_reconstruction_for_external_dycore = .false. + config_simple_velocity_type = 'uniform' + config_use_glp = .true. + config_beta_thawed_only = .false. + config_unrealistic_velocity = 00.00159 ! 50 km/yr + config_nonconvergence_error = .false. + config_effective_pressure_max = 1.0e36 +/ +&advection + config_thickness_advection = 'none' + config_tracer_advection = 'none' + config_restore_thickness_after_advection = .false. + config_zero_sfcMassBalApplied_over_bare_land = .true. +/ +&solidearth + config_uplift_method = 'none' + config_slm_coupling_interval = 2 + config_MALI_to_SLM_weights_file = 'mpas_to_grid.nc' + config_SLM_to_MALI_weights_file = 'grid_to_mpas.nc' +/ +&calving + config_calving = 'none' + config_apply_calving_mask = .false. + config_use_Albany_flowA_eqn_for_vM = .false. + config_calving_topography = -500.0 + config_calving_thickness = 0.0 + config_calving_eigencalving_parameter_source = 'scalar' + config_calving_eigencalving_parameter_scalar_value = 3.14e16 + config_calving_specified_source = 'const' + config_calving_velocity_const = 0.0 + config_data_calving = .false. + config_calving_timescale = 0.0 + config_restore_calving_front = .true. + config_restore_calving_front_prevent_retreat = .false. + config_remove_icebergs = .true. + config_remove_small_islands = .true. + config_calving_speed_limit = 0.00063492063 + config_grounded_von_Mises_threshold_stress = 1.0e6 + config_floating_von_Mises_threshold_stress = 1.0e6 + config_grounded_von_Mises_threshold_stress_source = 'scalar' + config_floating_von_Mises_threshold_stress_source = 'scalar' + config_finalize_damage_after_advection = .true. + config_preserve_damage = .false. + config_calculate_damage = .true. + config_damage_preserve_threshold = 0.0 + config_damage_calving_threshold = 0.95 + config_damage_stiffness_min = 0.1 + config_damage_rheology_coupling = .false. + config_damage_gl_setting = 'nye' + config_damage_calving_method = 'none' + config_damagecalvingParameter = 1.0e-4 + config_ismip6_retreat_k = -170.0 + config_calving_error_threshold = 100000.0 + config_distribute_unablatedVolumeDynCell = .true. +/ +&thermal_solver + config_thermal_solver = 'none' + config_thermal_calculate_bmb = .true. + config_temperature_init = 'file' + config_thermal_thickness = 0.0 + config_surface_air_temperature_source = 'file' + config_surface_air_temperature_value = 273.15 + config_surface_air_temperature_lapse_rate = 0.01 + config_basal_heat_flux_source = 'file' + config_basal_heat_flux_value = 0.0 + config_temp_diffusive_factor = 1.0e-5 + config_max_water_fraction = 1.0e-2 +/ +&iceshelf_melt + config_basal_mass_bal_float = 'none' + config_bmlt_float_flux = 0.0 + config_bmlt_float_xlimit = 0.0 + config_basal_mass_bal_seroussi_amplitude = 0.0 + config_basal_mass_bal_seroussi_period = 1.0 + config_basal_mass_bal_seroussi_phase = 0.0 + config_temperature_profile_melt_scale_factor = 6.0 + config_temperature_profile_sill_elevation = -700.0 + config_temperature_profile_plume_thickness = 30.0 + config_temperature_profile_draft_slope = 1.0e-2 + config_temperature_profile_thermocline_upper_depth = -200.0 + config_temperature_profile_thermocline_upper_temp = -1.0 + config_temperature_profile_thermocline_lower_depth = -600.0 + config_temperature_profile_thermocline_lower_temp = 1.2 + config_temperature_profile_variability_amplitude = 0.0 + config_temperature_profile_variability_period = 1.0 + config_temperature_profile_variability_phase = 0.0 + config_temperature_profile_GL_depth_fraction = 0.25 + config_front_mass_bal_grounded = 'none' + config_use_3d_thermal_forcing_for_face_melt = .true. + config_beta_ocean_thermal_forcing = 1.18 + config_add_ocean_thermal_forcing = 0.0 + config_alpha_subglacial_discharge = 0.39 + config_subglacial_discharge_coefficient = 3.0e-4 + config_subglacial_discharge_intercept = 0.15 + config_uniform_face_melt_rate = 0.0 +/ +&physical_parameters + config_ice_density = 910.0 + config_ocean_density = 1028.0 + config_sea_level = 0.0 + config_default_flowParamA = 3.1709792e-24 + config_flowLawExponent = 3.0 + config_dynamic_thickness = 10.0 +/ +&time_integration + config_dt = '0000-01-00_00:00:00' + config_time_integration = 'forward_euler' + config_adaptive_timestep = .false. + config_min_adaptive_timestep = 60 + config_max_adaptive_timestep = 3.154e7 + config_adaptive_timestep_CFL_fraction = 0.8 + config_adaptive_timestep_calvingCFL_fraction = 0.8 + config_adaptive_timestep_faceMeltingCFL_fraction = 1.0 + config_adaptive_timestep_include_DCFL = .false. + config_adaptive_timestep_include_calving = .false. + config_adaptive_timestep_include_face_melting = .false. + config_adaptive_timestep_force_interval = '0001-00-00_00:00:00' +/ +&time_management + config_do_restart = .false. + config_restart_timestamp_name = 'restart_timestamp' + config_start_time = '2015-01-01_00:00:00' + config_stop_time = '2300-01-01_00:00:00' + config_run_duration = 'none' + config_calendar_type = 'noleap' +/ +&io + config_stats_interval = 0 + config_write_stats_on_startup = .false. + config_stats_cell_ID = 1 + config_write_output_on_startup = .true. + config_pio_num_iotasks = 2 + config_pio_stride = 64 + config_year_digits = 4 + config_output_external_velocity_solver_data = .false. + config_write_albany_ascii_mesh = .false. +/ +&decomposition + config_num_halos = 3 + config_block_decomp_file_prefix = 'graph.info.part.' + config_number_of_blocks = 0 + config_explicit_proc_decomp = .false. + config_proc_decomp_file_prefix = 'graph.info.part.' +/ +&debug + config_print_thickness_advection_info = .false. + config_print_calving_info = .false. + config_print_thermal_info = .false. + config_always_compute_fem_grid = .true. + config_print_velocity_cleanup_details = .false. +/ +&subglacial_hydro + config_SGH = .true. + config_ocean_connection_N = .false. + config_SGH_adaptive_timestep_fraction = 1.0 + config_SGH_max_adaptive_timestep = 3.15e9 + config_SGH_tangent_slope_calculation = 'from_normal_slope' + config_SGH_pressure_calc = 'cavity' + config_SGH_alpha = 1.25 + config_SGH_beta = 1.5 + config_SGH_conduc_coeff = 0.0005 + config_SGH_conduc_coeff_drowned = 0.0 + config_SGH_till_drainage = 0.0 + config_SGH_till_max = 0.0 + config_SGH_advection = 'fo' + config_SGH_bed_roughness = 0.5 + config_SGH_bed_roughness_max = 0.1 + config_SGH_creep_coefficient = 0.04 + config_SGH_englacial_porosity = 0.05 + config_SGH_chnl_active = .true. + config_SGH_chnl_include_DCFL = .false. + config_SGH_chnl_alpha = 1.25 + config_SGH_chnl_beta = 1.5 + config_SGH_chnl_conduc_coeff = 0.05 + config_SGH_chnl_creep_coefficient = 0.04 + config_SGH_incipient_channel_width = 2.0 + config_SGH_chnl_area_shutoff = 500.0 + config_SGH_include_pressure_melt = .false. + config_SGH_shmip_forcing = 'none' + config_SGH_basal_melt = 'basal_heat' + config_SGH_allow_terrestrial_outflow = .true. + config_SGH_use_iceThicknessHydro = .false. + config_SGH_flowA_source='constant' + config_SGH_flowA_value=3.1709792e-24 +/ +&AM_globalStats + config_AM_globalStats_enable = .true. + config_AM_globalStats_compute_interval = 'output_interval' + config_AM_globalStats_stream_name = 'globalStatsOutput' + config_AM_globalStats_compute_on_startup = .true. + config_AM_globalStats_write_on_startup = .true. +/ +&AM_regionalStats + config_AM_regionalStats_enable = .false. + config_AM_regionalStats_compute_interval = 'output_interval' + config_AM_regionalStats_stream_name = 'regionalStatsOutput' + config_AM_regionalStats_compute_on_startup = .true. + config_AM_regionalStats_write_on_startup = .true. +/ diff --git a/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/streams.landice b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/streams.landice new file mode 100644 index 0000000000..6495f0f450 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/ensemble_templates/sgh_ensemble/spinup/streams.landice @@ -0,0 +1,82 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/compass/landice/tests/ensemble_generator/plot_ensemble.py b/compass/landice/tests/ensemble_generator/plot_ensemble.py index 14cdbaade4..ba65e5dc5e 100644 --- a/compass/landice/tests/ensemble_generator/plot_ensemble.py +++ b/compass/landice/tests/ensemble_generator/plot_ensemble.py @@ -133,12 +133,13 @@ sys.exit("A usable cfg file for the ensemble was not found. " "Please correct the configuration or disable this check.") ens_cfg.read(ens_cfg_file) -ens_info = ens_cfg['ensemble'] +ens_info = ens_cfg['ensemble_generator'] if 'basin' in ens_info: basin = ens_info['basin'] if basin == 'None': basin = None -input_file_path = ens_info['input_file_path'] +spinup_info = ens_cfg['spinup_ensemble'] +input_file_path = spinup_info['input_file_path'] if basin is None: print("No basin found. Not using observational data.") else: diff --git a/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/README.md b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/README.md new file mode 100644 index 0000000000..cdb76553f8 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/README.md @@ -0,0 +1,351 @@ +# SGH Ensemble Analysis + +Analyzes completed ensemble runs to evaluate steady-state behavior and data compatibility. + +## Overview + +This test case processes a completed ensemble (spinup or restart) and produces: + +- **Steady-state analysis**: Determines if each run reached equilibrium using water mass balance +- **Data compatibility**: Validates against observational constraints (if specularity data available) +- **Results summary**: JSON file categorizing all runs by completion status + +## Quick Start + +After your spinup ensemble completes: + +```bash +# 1. Create analysis config +cat > analysis.cfg << 'EOF' +[ensemble_generator] +ensemble_template = sgh_ensemble + +[analysis_ensemble] +ensemble_work_dir = /path/to/spinup_ensemble +config_file = /path/to/ensemble_generator.cfg +EOF + +# 2. Run analysis +compass setup -t landice/ensemble_generator/sgh_analysis \ + -w /work/analysis \ + -f analysis.cfg +compass run -w /work/analysis + +# 3. View results +cat /work/analysis/analysis_summary.json | python -m json.tool +``` + +## Configuration + +All parameters go in a single `analysis_ensemble.cfg` file: + +### Required Settings + +```ini +[analysis_ensemble] +ensemble_work_dir = /path/to/completed/ensemble +config_file = /path/to/ensemble_generator.cfg +``` + +The `ensemble_work_dir` should be the actual ensemble work directory (e.g., `/work/spinup_ensemble` or `/work/restart_ensemble`). + +### Optional: Steady-State Parameters + +```ini +[steady_state] +# Rolling window size (years) for mass balance check +window_years = 10.0 + +# Relative imbalance threshold +# Steady state when: |input - output| / (|input| + |output|) < threshold +# Default 0.05 = 5% relative error +imbalance_threshold = 0.05 + +# Generate plots (subglacial_water_mass_balance.png, etc.) +plot_results = False +``` + +**Tuning `window_years`**: +- Larger values (20-30 yrs): Smoother, less sensitive to noise +- Smaller values (5-10 yrs): More responsive to recent changes +- Default (10 yrs): Good for most simulations + +**Tuning `imbalance_threshold`**: +- Stricter (0.01): 1% relative imbalance → harder to achieve steady state +- Default (0.05): 5% relative imbalance → reasonable for geophysical models +- Looser (0.10): 10% relative imbalance → easier to achieve + +### Optional: Validation Parameters + +```ini +[validation] +# Balanced accuracy threshold for data compatibility +# Both east and west AIS must exceed this +# Range [0.0, 1.0], typical 0.65 +balanced_accuracy_threshold = 0.65 + +# Path to specularity content TIFF file +# If None or file doesn't exist, validation is skipped +spec_tiff_file = /path/to/specularity_content.tif + +# Generate validation plots +plot_validation = False +``` + +### Optional: Output Settings + +```ini +[output] +# Directory name for results (relative to work_dir) +results_directory = analysis_results +``` + +## Understanding Results + +### Output Files + +**`analysis_summary.json`** (main output): +```json +{ + "timestamp": "2026-03-19T14:30:00", + "ensemble_dir": "/work/spinup_ensemble", + "total_runs": 50, + "completed_runs": 25, + "incomplete_runs": 25, + "steady_state_runs": [0, 2, 5, 7, ...], + "data_compatible_runs": [0, 5, 8, ...], + "both_criteria_runs": [0, 5, ...], + "restart_needed_runs": [1, 3, 4, ...], + "analysis_parameters": { + "steady_state": {...}, + "validation": {...} + }, + "individual_results": { + "0": { + "steady_state": {...}, + "validation": {...} + }, + ... + } +} +``` + +### Console Output Example + +``` +ENSEMBLE ANALYSIS SUMMARY +Total runs: 50 + Completed: 25 + Incomplete: 25 + +Steady-state runs: 15/25 (60.0%) + [0, 2, 5, 7, 10, 12, 15, 17, 20, 22, 25, 27, 30, 32, 35] + +Data-compatible runs: 12/25 (48.0%) + [0, 5, 8, 12, 15, 20, 22, 25, 30, 35, 40, 42] + +Both criteria met: 10/25 (40.0%) + [0, 5, 12, 15, 20, 25, 30, 35] + +Runs needing restart: 10 + [1, 3, 4, 6, 9, 11, 13, 14, 18, 19] +``` + +## Analysis Criteria Explained + +### Steady-State Detection + +A run is at **steady state** when the water mass balance equation is approximately satisfied over a rolling time window: + +``` +Input (melt + channel_melt) ≈ Output (all discharge fluxes) +``` + +More precisely: +``` +relative_imbalance = |Input - Output| / (|Input| + |Output|) < threshold +``` + +The analysis checks this condition over the final portion of the simulation. If satisfied, the run is at steady state. + +**Why this matters**: +- Steady-state runs have reached equilibrium and won't improve much with more time +- Non-steady runs may benefit from restarting +- See `restart/` to schedule restarts for non-steady runs + +### Data Compatibility Validation + +If a specularity content TIFF file is provided, each run is validated by comparing: + +- **Model prediction**: Simulated subglacial water thickness +- **Observations**: Radar specularity content (proxy for wetness) + +The metric is **balanced accuracy (BA)**: +``` +BA = 0.5 * (true_positive_rate + true_negative_rate) +``` + +Both east and west Antarctic regions must have BA ≥ threshold. + +**Why this matters**: +- Not all steady-state runs match observations +- Data-compatible runs have both equilibrium AND observational support +- Runs meeting both criteria are highest confidence + +## Workflow: Analysis → Restart → Re-analyze + +Typical workflow for iterative ensemble refinement: + +```bash +# ============================================================ +# Iteration 1: Initial Spinup +# ============================================================ + +compass setup -t spinup_ensemble -w /work/ens1 -f spinup.cfg +compass run -w /work/ens1/spinup_ensemble +# ... wait for jobs (~hours to days depending on job queue) + +# ============================================================ +# Iteration 1: Analyze Results +# ============================================================ + +cat > /work/analysis1.cfg << 'EOF' +[ensemble_generator] +ensemble_template = sgh_ensemble + +[analysis_ensemble] +ensemble_work_dir = /work/ens1/spinup_ensemble +config_file = /work/spinup.cfg +EOF + +compass setup -t landice/ensemble_generator/sgh_analysis \ + -w /work/analysis1 -f /work/analysis1.cfg +compass run -w /work/analysis1 + +# Results: 50 runs, 25 completed +# 15 steady-state, 10 need restart + +# ============================================================ +# Iteration 2: Schedule & Run Restarts +# ============================================================ + +python3 << 'PYTHON' +from compass.landice.tests.ensemble_generator.sgh_restart import schedule_restarts + +config, runs = schedule_restarts( + '/work/analysis1/analysis_summary.json', + '/work/restart_ens' +) +PYTHON + +compass setup -t landice/ensemble_generator/sgh_restart \ + -w /work/restart -f /work/restart_ens/restart_ensemble.cfg +compass run -w /work/restart +# ... wait for restart jobs + +# ============================================================ +# Iteration 2: Re-analyze Restarts +# ============================================================ + +cat > /work/analysis2.cfg << 'EOF' +[ensemble_generator] +ensemble_template = sgh_ensemble + +[analysis_ensemble] +ensemble_work_dir = /work/restart/sgh_restart_ensemble +config_file = /work/restart_ens/restart_ensemble.cfg +EOF + +compass setup -t landice/ensemble_generator/sgh_analysis \ + -w /work/analysis2 -f /work/analysis2.cfg +compass run -w /work/analysis2 + +# Results: 10 restart jobs, 8 now steady-state, 2 need another restart + +# ============================================================ +# Final: Aggregate All Results +# ============================================================ + +python3 << 'PYTHON' +from compass.landice.tests.ensemble_generator.sgh_analysis import ResultsAggregator + +agg = ResultsAggregator('/work') +results = agg.aggregate() +agg.print_summary(results) +agg.save_aggregated(results) + +print(f"Final: {len(results['final_steady_state_runs'])}/50 at steady state") +print(f" {len(results['final_data_compatible_runs'])}/50 data-compatible") +PYTHON +``` + +## Advanced Usage + +### Custom Analysis Parameters + +Tighten steady-state criteria: + +```ini +[steady_state] +window_years = 20.0 # Longer window, more stable +imbalance_threshold = 0.01 # Stricter (1% vs 5%) +``` + +### Generating Plots + +Enable plot generation: + +```ini +[steady_state] +plot_results = True + +[validation] +plot_validation = True +``` + +This creates: +- `subglacial_water_mass_balance.png` +- `water_mass_balance_residual.png` +- `subglacial_hydrology_timeseries.png` +- `spec_subglacialHydro_validation.png` + +These help visualize model behavior and validation. + +### Analyzing Specific Ensembles + +Point to any completed ensemble work directory: + +```ini +[analysis_ensemble] +# Can be spinup, restart, or branch +ensemble_work_dir = /work/any_completed_ensemble +config_file = /work/any_ensemble_generator.cfg +``` + +## Troubleshooting + +### "No analysis results" + +- The ensemble hasn't completed yet (check job queue) +- The ensemble_work_dir is wrong (should point to actual work dir with run000/, run001/, etc.) + +### All runs marked "incomplete" + +- Runs may have failed (check log files in run directories) +- Check that `output/globalStats.nc` exists for each run + +### No validation results + +- Specularity TIFF file not found or not specified +- Set `spec_tiff_file = None` to skip validation + +### Plots not generated + +- Enable with `plot_results = True` or `plot_validation = True` +- Check that matplotlib and cmocean are installed + +## See Also + +- `restart/`: Schedule and run restarts for non-steady runs +- `spinup/`: Initial ensemble setup and execution +- `branch/`: Branch from spinup for projection scenarios diff --git a/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/__init__.py b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/__init__.py new file mode 100644 index 0000000000..03a13af95e --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/__init__.py @@ -0,0 +1,21 @@ +""" +SGH Ensemble Analysis Package. + +Provides analysis of completed ensemble runs as a proper compass test case: +- Steady-state detection from water mass balance +- Data compatibility validation against observations +- Results aggregation across ensemble iterations + +Usage: + compass setup -t landice/ensemble_generator/sgh_ensemble_analysis \\ + -w /work/analysis -f analysis_ensemble.cfg + compass run -w /work/analysis +""" + +from .results_aggregator import ResultsAggregator +from .test_case import AnalysisEnsemble + +__all__ = [ + 'AnalysisEnsemble', + 'ResultsAggregator', +] diff --git a/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/analysis_ensemble.cfg b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/analysis_ensemble.cfg new file mode 100644 index 0000000000..57be4f104b --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/analysis_ensemble.cfg @@ -0,0 +1,55 @@ +# Analysis ensemble configuration +# +# Analyzes a completed ensemble run (spinup or restart). +# +# Usage: +# compass setup -t analysis_ensemble -w /work/analysis -f analysis_ensemble.cfg +# compass run -w /work/analysis + +[ensemble_generator] +ensemble_template = sgh_ensemble + +[analysis_ensemble] + +# REQUIRED: Path to the ensemble work directory to analyze +# This should be a completed spinup_ensemble or restart_ensemble directory +ensemble_work_dir = /pscratch/sd/a/ahager/AIS_GHF_basalMelt_testing/sgh_stage1_32member_ensemble/landice/ensemble_generator/spinup_ensemble/ + +[steady_state] + +# Rolling window size in years for steady-state analysis +# Larger windows: smoother, less sensitive to short-term fluctuations +# Smaller windows: more responsive to recent changes +# Default: 10.0 years +window_years = 10.0 + +# Relative imbalance threshold for steady state +# Steady state when: |input - output| / (|input| + |output|) < threshold +# Range: [0.0, 1.0] +# Default: 0.05 = 5% relative imbalance +# Stricter: 0.01 = 1% +# Looser: 0.10 = 10% +imbalance_threshold = 0.05 + +# Whether to generate plots of analysis results +# Plots saved as: +# - subglacial_water_mass_balance.png +# - water_mass_balance_residual.png +# - subglacial_hydrology_timeseries.png +plot_results = True + +[validation] + +# Balanced accuracy threshold for data compatibility +# Both east and west Antarctic regions must exceed this threshold +# Range: [0.0, 1.0], typical: 0.65 +balanced_accuracy_threshold = 0.65 + +# Path to specularity content TIFF file +# Set to None or comment out if not available +# If not provided here, will check ensemble_generator.cfg as fallback +spec_tiff_file = /global/cfs/cdirs/fanssie/standard_datasets/AIS_datasets/antarctica_radar_specularity_content_young2016.tiff + +# Whether to generate validation plots +# Plot saved as: spec_subglacialHydro_validation.png +plot_validation = True diff --git a/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/analysis_step.py b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/analysis_step.py new file mode 100644 index 0000000000..af109755cc --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/analysis_step.py @@ -0,0 +1,424 @@ +""" +Analysis step that performs the actual ensemble analysis. +""" + +import configparser +import glob +import json +import os +import subprocess +import tempfile +from datetime import datetime + +from compass.step import Step + + +class AnalysisStep(Step): + """ + A step that analyzes a completed ensemble. + """ + + def __init__(self, test_case, ensemble_dir): + """ + Create an analysis step. + + Parameters + ---------- + test_case : compass.TestCase + The test case this step belongs to + + ensemble_dir : str + Directory containing completed ensemble runs + + config_file : str + Path to configuration file for analysis + """ + self.ensemble_dir = ensemble_dir + + super().__init__(test_case=test_case, name='analyze_ensemble') + + def setup(self): + """Setup phase - prepare for analysis.""" + # Get path to analysis scripts in this package + self.script_dir = os.path.dirname(os.path.abspath(__file__)) + + def run(self): + """Run the analysis.""" + logger = self.logger + + logger.info(f"Analyzing ensemble: {self.ensemble_dir}") + + if self.config_file is None: + raise FileNotFoundError( + f"Could not find ensemble config file for " + f"{self.ensemble_dir}" + ) + + logger.info(f"Using config file: {self.config_file}") + + # Load configurations + config_dict = self._load_config(self.config_file) + + # Get analysis configs with defaults + analysis_config = { + 'steady_state': self._merge_config( + config_dict.get('steady_state', {}), + self._get_default_steady_state_config() + ), + 'validation': self._merge_config( + config_dict.get('validation', {}), + self._get_default_validation_config() + ), + } + + logger.info(f"Loaded steady_state config: \ + {analysis_config['steady_state']}") + logger.info(f"Loaded validation config: \ + {analysis_config['validation']}") + + # Initialize results + summary = { + 'timestamp': datetime.now().isoformat(), + 'ensemble_dir': self.ensemble_dir, + 'total_runs': 0, + 'completed_runs': 0, + 'incomplete_runs': 0, + 'steady_state_runs': [], + 'not_steady_state_runs': [], + 'data_compatible_runs': [], + 'not_data_compatible_runs': [], + 'both_criteria_runs': [], + 'restart_needed_runs': [], + 'individual_results': {}, + 'analysis_parameters': { + 'steady_state': analysis_config.get( + 'steady_state', {}), + 'validation': analysis_config.get( + 'validation', {}), + } + } + + # Get all runs with output + all_runs = self._get_all_runs() + summary['total_runs'] = len(all_runs) + + logger.info(f"Found {len(all_runs)} total runs") + logger.info("Checking for output files...") + + runs_with_output = [] + runs_without_output = [] + + for run_dir in all_runs: + run_name = os.path.basename(run_dir) + run_num = int(run_name.replace('run', '')) + + if self._is_run_complete(run_dir): + runs_with_output.append((run_num, run_dir, run_name)) + else: + runs_without_output.append(run_num) + + summary['completed_runs'] = len(runs_with_output) + summary['incomplete_runs'] = len(runs_without_output) + + logger.info(f" {len(runs_with_output)} with output, " + f"{len(runs_without_output)} without output") + logger.info("Analyzing runs with output...") + + # Analyze each run with output + for run_num, run_dir, run_name in runs_with_output: + results = self._run_analysis_on_run( + run_dir, run_name, analysis_config) + summary['individual_results'][run_num] = results + + # Categorize based on steady state + ss_info = results.get('steady_state') + val_info = results.get('validation') + + if ss_info and ss_info.get('is_steady_state'): + summary['steady_state_runs'].append(run_num) + # If steady state, also check validation + if val_info and val_info.get('is_data_compatible'): + summary['data_compatible_runs'].append(run_num) + summary['both_criteria_runs'].append(run_num) + elif (val_info and 'error' not in val_info and + val_info.get('status') != 'no_spec_file'): + summary['not_data_compatible_runs'].append(run_num) + else: + # Not steady state - needs restart + summary['not_steady_state_runs'].append(run_num) + summary['restart_needed_runs'].append(run_num) + + # Print summary + self._print_summary(summary, logger) + + # Save summary to work directory + summary_file = os.path.join(self.work_dir, + 'analysis_summary.json') + with open(summary_file, 'w') as f: + json.dump(summary, f, indent=2) + + logger.info(f"Summary saved to {summary_file}") + + def _get_all_runs(self): + """Get sorted list of run directories.""" + run_dirs = sorted(glob.glob( + os.path.join(self.ensemble_dir, 'run*'))) + return [d for d in run_dirs if os.path.isdir(d)] + + def _is_run_complete(self, run_dir): + """Check if a run has completed successfully.""" + output_file = os.path.join(run_dir, 'output', + 'globalStats.nc') + + return os.path.exists(output_file) + + def _run_analysis_on_run(self, run_dir, run_name, + analysis_config): + """Run analysis on a completed run.""" + self.logger.info(f" Analyzing {run_name}...") + + output_file = os.path.join(run_dir, 'output', + 'globalStats.nc') + results = { + 'run_name': run_name, + 'output_exists': os.path.exists(output_file), + 'analysis_timestamp': datetime.now().isoformat(), + 'steady_state': None, + 'validation': None, + 'analysis_errors': [] + } + + # Steady state analysis + try: + ss_config = analysis_config.get('steady_state', {}) + window_years = ss_config.get('window_years', 10.0) + imbalance_threshold = ss_config.get( + 'imbalance_threshold', 0.05) + plot_results = ss_config.get('plot_results', False) + + ss_results = self._run_steadystate_analysis( + output_file, window_years, imbalance_threshold, + plot_results) + results['steady_state'] = ss_results + + except Exception as e: + results['analysis_errors'].append( + f"Steady-state analysis failed: {e}") + self.logger.warning(f" {e}") + + # Validation analysis + try: + val_config = analysis_config.get('validation', {}) + spec_tiff = val_config.get('spec_tiff_file', None) + ba_threshold = val_config.get( + 'balanced_accuracy_threshold', 0.65) + plot_validation = val_config.get( + 'plot_validation', False) + + if spec_tiff and os.path.exists(spec_tiff): + output_hist = os.path.join(run_dir, 'output', + 'history.nc') + if os.path.exists(output_hist): + val_results = self._run_validation_analysis( + output_hist, spec_tiff, ba_threshold, + plot_validation) + results['validation'] = val_results + else: + results['validation'] = { + 'status': 'no_history_file'} + else: + results['validation'] = {'status': 'no_spec_file', + 'spec_tiff': spec_tiff} + + except Exception as e: + results['analysis_errors'].append( + f"Validation analysis failed: {e}") + self.logger.warning(f" {e}") + + return results + + def _run_steadystate_analysis(self, output_file, window_years, + imbalance_threshold, plot=False): + """Run steady-state analysis via subprocess.""" + script = os.path.join( + self.script_dir, + 'analyze_subglacial_water_mass_balance.py') + + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', + delete=False) as f: + temp_json = f.name + + try: + cmd = [ + 'python', script, + '-f', output_file, + '--window_years', str(window_years), + '--imbalance_threshold', str(imbalance_threshold), + '--output_json', temp_json, + ] + + if plot: + cmd.append('--plot') + + result = subprocess.run(cmd, capture_output=True, + text=True, timeout=300) + + if result.returncode == 0 and os.path.exists(temp_json): + with open(temp_json, 'r') as f: + return json.load(f) + else: + raise RuntimeError( + f"Subprocess analysis failed: " + f"{result.stderr}") + + finally: + if os.path.exists(temp_json): + os.unlink(temp_json) + + def _run_validation_analysis(self, output_file, spec_tiff, + ba_threshold, plot=False): + """Run validation analysis via subprocess.""" + script = os.path.join( + self.script_dir, 'validate_mali_with_spec.py') + + with tempfile.NamedTemporaryFile(mode='w', suffix='.json', + delete=False) as f: + temp_json = f.name + + try: + cmd = [ + 'python', script, + '--maliFile', output_file, + '--specTiff', spec_tiff, + '--ba_threshold', str(ba_threshold), + '--output_json', temp_json + ] + + if plot: + cmd.append('--plot') + + result = subprocess.run(cmd, capture_output=True, + text=True, timeout=300) + + if result.returncode == 0 and os.path.exists(temp_json): + with open(temp_json, 'r') as f: + return json.load(f) + else: + return {'status': 'failed', 'error': result.stderr} + + finally: + if os.path.exists(temp_json): + os.unlink(temp_json) + + @staticmethod + def _load_config(config_file): + """Load configuration file.""" + config = configparser.ConfigParser() + config.read(config_file) + + config_dict = {} + for section in config.sections(): + config_dict[section] = {} + for key, value in config.items(section): + try: + config_dict[section][key] = float(value) + except ValueError: + try: + config_dict[section][key] = ( + config.getboolean(section, key)) + except ValueError: + if value.lower() == 'none': + config_dict[section][key] = None + else: + config_dict[section][key] = value + + return config_dict + + @staticmethod + def _merge_config(user_config, defaults): + """ + Merge user config with defaults. + User config values take precedence. + + Parameters + ---------- + user_config : dict + User-provided configuration + defaults : dict + Default configuration values + + Returns + ------- + dict + Merged configuration + """ + merged = defaults.copy() + merged.update(user_config) + return merged + + @staticmethod + def _get_default_steady_state_config(): + """Get default steady-state configuration.""" + return { + 'window_years': 10.0, + 'imbalance_threshold': 0.05, + 'plot_results': False, + } + + @staticmethod + def _get_default_validation_config(): + """Get default validation configuration.""" + return { + 'balanced_accuracy_threshold': 0.65, + 'spec_tiff_file': None, + 'plot_validation': False, + } + + @staticmethod + def _print_summary(summary, logger): + """Print analysis summary.""" + logger.info("") + logger.info("=" * 70) + logger.info("ENSEMBLE ANALYSIS SUMMARY") + logger.info("=" * 70) + logger.info(f"Total runs: {summary['total_runs']}") + logger.info(f" Completed: {summary['completed_runs']}") + logger.info(f" Incomplete: {summary['incomplete_runs']}") + logger.info("") + + if summary['completed_runs'] > 0: + tot_runs = summary['completed_runs'] + ss_runs = len(summary['steady_state_runs']) + dc_runs = len(summary['data_compatible_runs']) + both_runs = len(summary['both_criteria_runs']) + + pct_ss = 100.0 * ss_runs / tot_runs + pct_dc = 100.0 * dc_runs / tot_runs + pct_both = 100.0 * both_runs / tot_runs + + logger.info( + f"Steady-state runs: {ss_runs}/{tot_runs} " + f"({pct_ss:.1f}%)") + if summary['steady_state_runs']: + logger.info(f" {summary['steady_state_runs']}") + logger.info("") + logger.info( + f"Data-compatible runs: {dc_runs}/{tot_runs} " + f"({pct_dc:.1f}%)") + if summary['data_compatible_runs']: + logger.info(f" {summary['data_compatible_runs']}") + logger.info("") + logger.info( + f"Both criteria met: {both_runs}/{tot_runs} " + f"({pct_both:.1f}%)") + if summary['both_criteria_runs']: + logger.info(f" {summary['both_criteria_runs']}") + logger.info("") + logger.info( + f"Runs needing restart: " + f"{len(summary['restart_needed_runs'])}") + if summary['restart_needed_runs']: + logger.info(f" {summary['restart_needed_runs']}") + + logger.info("=" * 70) + logger.info("") diff --git a/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/analyze_subglacial_water_mass_balance.py b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/analyze_subglacial_water_mass_balance.py new file mode 100644 index 0000000000..89ff0324a1 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/analyze_subglacial_water_mass_balance.py @@ -0,0 +1,514 @@ +#!/usr/bin/env python +''' +Analyze subglacial hydrology time-series from a landice globalStats file +and determine if the simulation has reached steady state. + +Steady state is defined as when the water mass balance equation is +approximately satisfied over a 10-year rolling average: + +melt + chnlMelt ≈ distFluxMarine + chnlFluxMarine + distFluxLand + chnlFluxLand +''' + +from __future__ import ( + absolute_import, + division, + print_function, + unicode_literals, +) + +import argparse +import json +import sys + +import netCDF4 as nc +import numpy as np +from matplotlib import pyplot as plt + +rhow = 1000.0 +secyr = 3600.0 * 24.0 * 365.0 + +parser = argparse.ArgumentParser(description=__doc__) +parser.add_argument( + "-f", + dest="filename", + help="input filename", + default="globalStats.nc", + metavar="FILENAME") +parser.add_argument( + "-u", + dest="units", + help="units for mass: kg, Gt", + default="Gt", + metavar="UNITS") +parser.add_argument( + "--window_years", + dest="window_years", + type=float, + default=10.0, + help="Rolling window size in years for steady-state check (default: 10.0)") +parser.add_argument( + "--imbalance_threshold", + dest="imbalance_threshold", + type=float, + default=0.05, + help=["Relative imbalance threshold for \ + steady state (default: 0.05 = 5% relative error)"]) +parser.add_argument( + "--output_json", + dest="output_json", + help="JSON file to save steady-state analysis results", + default="steadystate_results.json") +parser.add_argument("--plot", dest="plot", action='store_true', default=False, + help="Generate plots (default: False)") +args = parser.parse_args() + +# Scaling assuming variables are in kg +if args.units == "kg": + massUnit = "kg" + fluxUnit = "kg yr$^{-1}$" + unitScaling = 1.0 +elif args.units == "Gt": + massUnit = "Gt" + fluxUnit = "Gt yr$^{-1}$" + unitScaling = 1.0e-12 +else: + sys.exit("Unknown mass unit") + +print("Using mass units of: ", massUnit) + +dataset = nc.Dataset(args.filename) + +# Read variables +# convert everything to kg and years before unit conversion +totalSubglacialWaterMass = \ + dataset.variables['totalSubglacialWaterVolume'][:] * rhow * unitScaling +melt = dataset.variables['totalBasalMeltInput'][:] * unitScaling * secyr +distFluxMarine = dataset.variables['totalDistWaterFluxMarineMargin'][:] * \ + unitScaling * secyr +chnlFluxMarine = dataset.variables['totalChnlWaterFluxMarineMargin'][:] * \ + unitScaling * secyr +distFluxLand = dataset.variables['totalDistWaterFluxTerrestrialMargin'][:] * \ + unitScaling * secyr +chnlFluxLand = dataset.variables['totalChnlWaterFluxTerrestrialMargin'][:] * \ + unitScaling * secyr +chnlMelt = dataset.variables['totalChannelMelt'][:] * unitScaling * secyr +flotFrac = dataset.variables['avgFlotationFraction'][:] +lakeArea = dataset.variables['totalSubglacialLakeArea'][:] / 1000.0**2 # km^2 +lakeMass = dataset.variables['totalSubglacialLakeVolume'][:] * \ + rhow * unitScaling +grdArea = dataset.variables['groundedIceArea'][:] / 1000.0**2 # km^2 + +deltat = dataset.variables['deltat'][:] / secyr # in years +yr = dataset.variables['daysSinceStart'][:] / 365.0 + +subglacialWaterMassRate = np.zeros((len(melt),)) + +for i in range(len(totalSubglacialWaterMass) - 1): + subglacialWaterMassRate[i] = ((totalSubglacialWaterMass[i + 1] - + totalSubglacialWaterMass[i]) / deltat[i]) + +# ============================================================================ +# STEADY-STATE ANALYSIS +# ============================================================================ + + +def calculate_rolling_average(data, yr_array, window_years): + """ + Calculate rolling average over a specified time window. + + Parameters + ---------- + data : array + Data to average + yr_array : array + Time array in years + window_years : float + Window size in years + + Returns + ------- + rolling_avg : array + Rolling average values + yr_windows : array + Time values at center of each window + """ + rolling_avg = np.full_like(data, np.nan) + yr_windows = np.full_like(data, np.nan) + + for i in range(len(data)): + # Find points within window_years of current point + mask = np.abs(yr_array - yr_array[i]) <= window_years / 2.0 + if np.sum(mask) > 0: + rolling_avg[i] = np.mean(data[mask]) + yr_windows[i] = yr_array[i] + + return rolling_avg, yr_windows + + +def check_steady_state( + yr, + melt_in, + chnl_melt_in, + dist_flux_marine_out, + chnl_flux_marine_out, + dist_flux_land_out, + chnl_flux_land_out, + window_years=10.0, + imbalance_threshold=0.05): + """ + Check if simulation has reached steady state based on water mass balance. + + Steady state is defined as when the mass balance equation is approximately + satisfied over a rolling time window: + + Input (melt + chnlMelt) ≈ Output (sum of outfluxes) + + Parameters + ---------- + yr : array + Time array in years + melt_in : array + Basal melt flux + chnl_melt_in : array + Channel melt flux + dist_flux_marine_out : array + Distributed water flux at marine margin + chnl_flux_marine_out : array + Channel water flux at marine margin + dist_flux_land_out : array + Distributed water flux at terrestrial margin + chnl_flux_land_out : array + Channel water flux at terrestrial margin + window_years : float + Rolling window size in years + imbalance_threshold : float + Relative imbalance threshold (e.g., 0.05 = 5%) + + Returns + ------- + is_steady : bool + Whether simulation appears to be at steady state + steady_state_metrics : dict + Dictionary containing steady-state metrics and analysis + analysis_data : dict + Dictionary with time series data for plotting + """ + + # Calculate totals + total_input = melt_in + chnl_melt_in + total_output = (dist_flux_marine_out + chnl_flux_marine_out + + dist_flux_land_out + chnl_flux_land_out) + + # Calculate rolling averages + input_rolling, _ = calculate_rolling_average(total_input, yr, window_years) + output_rolling, yr_rolling = calculate_rolling_average( + total_output, yr, window_years) + + # Calculate mass balance residual + residual = total_input - total_output + residual_rolling, _ = calculate_rolling_average(residual, yr, window_years) + + # Calculate relative imbalance: |input - output| / |input + output| + denominator = np.abs(input_rolling) + np.abs(output_rolling) + relative_imbalance = np.full_like(denominator, np.nan) + valid = denominator > 0 + relative_imbalance[valid] = (np.abs(residual_rolling[valid]) / + denominator[valid]) + + # Determine steady state: when relative imbalance is below threshold + # for the final portion of the simulation + # No steady-state if run doesn't last 1.5x window length + if yr[-1] < 1.5 * window_years: + is_steady = False + final_imbalance = np.nan + elif np.sum(np.isfinite(relative_imbalance)) > 0: + final_portion = \ + relative_imbalance[-max(10, len(relative_imbalance) // 10):] + is_steady = np.nanmean(final_portion) < imbalance_threshold + final_imbalance = np.nanmean(final_portion) + else: + is_steady = False + final_imbalance = np.nan + + # Find when steady state first achieved (if at all) + steady_state_idx = None + if np.sum(relative_imbalance < imbalance_threshold) > 0: + steady_state_idx = np.where( + relative_imbalance < imbalance_threshold)[0][0] + time_to_steady = yr[steady_state_idx] + else: + time_to_steady = np.nan + + metrics = { + 'is_steady_state': is_steady, + 'window_years': float(window_years), + 'imbalance_threshold': float(imbalance_threshold), + 'final_year': float(yr[-1]), + 'time_to_steady_state_years': float(time_to_steady) + if not np.isnan(time_to_steady) else None, + 'final_relative_imbalance': float(final_imbalance), + 'final_input_flux': float(input_rolling[-1]) + if np.isfinite(input_rolling[-1]) else None, + 'final_output_flux': float(output_rolling[-1]) + if np.isfinite(output_rolling[-1]) else None, + 'final_residual': float(residual_rolling[-1]) + if np.isfinite(residual_rolling[-1]) else None, + } + + analysis_data = { + 'yr': yr, + 'input': total_input, + 'output': total_output, + 'residual': residual, + 'input_rolling': input_rolling, + 'output_rolling': output_rolling, + 'residual_rolling': residual_rolling, + 'relative_imbalance': relative_imbalance, + } + + return is_steady, metrics, analysis_data + + +# Perform steady-state check +is_steady, steady_metrics, analysis_data = check_steady_state( + yr, melt, chnlMelt, distFluxMarine, chnlFluxMarine, + distFluxLand, chnlFluxLand, + window_years=args.window_years, + imbalance_threshold=args.imbalance_threshold +) + +print("\n" + "=" * 60) +print("STEADY-STATE ANALYSIS") +print("=" * 60) +print(f"Window size: {args.window_years} years") +print(f"Imbalance threshold: {args.imbalance_threshold * 100:.1f}%") +print(f"Final simulation year: {steady_metrics['final_year']:.1f}") +print( + f"Final relative imbalance: { + steady_metrics['final_relative_imbalance'] * 100:.2f}%") +if steady_metrics['time_to_steady_state_years'] is not None: + print( + f"Time to reach steady state: { + steady_metrics['time_to_steady_state_years']:.1f} years") +else: + print("Time to reach steady state: NOT REACHED") +print(f"Is at steady state: {'YES' if is_steady else 'NO'}") +print("=" * 60 + "\n") + +# ============================================================================ +# PLOTTING +# ============================================================================ + +if args.plot: + # Plot 1: Mass balance time-series + fig, ax = plt.subplots(1, 1, layout='tight', figsize=(10, 6)) + + # Input + plt.plot(yr, melt, 'r:', label='basal melt', linewidth=1.5) + plt.plot(yr, chnlMelt, 'r--', label='channel melt', linewidth=1.5) + total_melt = melt + chnlMelt + plt.plot(yr, total_melt, 'r-', label='total melt (input)', linewidth=2) + + # Output + plt.plot( + yr, + distFluxMarine, + 'b--', + label='marine sheet outflux', + linewidth=1.5, + alpha=0.7) + plt.plot( + yr, + distFluxLand, + 'b:', + label='land sheet outflux', + linewidth=1.5, + alpha=0.7) + plt.plot( + yr, + chnlFluxMarine, + 'c--', + label='marine chnl outflux', + linewidth=1.5, + alpha=0.7) + plt.plot( + yr, + chnlFluxLand, + 'c:', + label='land chnl outflux', + linewidth=1.5, + alpha=0.7) + total_outflux = (distFluxMarine + distFluxLand + + chnlFluxMarine + chnlFluxLand) + + plt.plot(yr, total_outflux, 'b-', lw=2.5, label='total outflux (output)') + + plt.plot(yr[1:-1], subglacialWaterMassRate[1:-1], + 'g-', label='dV/dt', linewidth=2) + + # Plot rolling averages + plt.plot( + analysis_data['yr'], + analysis_data['input_rolling'], + 'r-', + alpha=0.4, + linewidth=1, + label=f'input rolling avg ({ + args.window_years} yr)') + plt.plot( + analysis_data['yr'], + analysis_data['output_rolling'], + 'b-', + alpha=0.4, + linewidth=1, + label=f'output rolling avg ({ + args.window_years} yr)') + + plt.legend(loc='best', fontsize=9, ncol=2) + plt.xlabel('Year') + plt.ylabel(f'Mass flux ({fluxUnit})') + plt.title('Subglacial Water Mass Balance') + plt.grid(True, alpha=0.3) + plt.tight_layout() + plt.savefig( + "subglacial_water_mass_balance.png", + dpi=300, + bbox_inches="tight") + + # Plot 2: Mass balance residual + fig, axes = plt.subplots( + 2, 1, layout='tight', figsize=( + 10, 8), sharex=True) + + # Absolute residual + axes[0].plot( + yr, + analysis_data['residual'], + 'k-', + linewidth=1.5, + label='Residual (input - output)') + axes[0].plot( + analysis_data['yr'], + analysis_data['residual_rolling'], + 'r-', + linewidth=2, + label=f'Rolling average ({ + args.window_years} yr)') + axes[0].axhline(0, color='gray', linestyle='--', alpha=0.5) + axes[0].fill_between( + yr, + 0, + analysis_data['residual'], + alpha=0.2, + color='gray') + axes[0].set_ylabel(f'Residual ({fluxUnit})') + axes[0].legend(loc='best') + axes[0].grid(True, alpha=0.3) + axes[0].set_title('Water Mass Balance Residual') + + # Relative imbalance + axes[1].plot( + analysis_data['yr'], + analysis_data['relative_imbalance'] * + 100, + 'k-', + linewidth=1.5) + axes[1].axhline(args.imbalance_threshold * 100, + color='red', + linestyle='--', + linewidth=2, + label='Steady-state threshold \ + ({args.imbalance_threshold * 100:.1f}%)') + axes[1].fill_between( + analysis_data['yr'], + 0, + analysis_data['relative_imbalance'] * + 100, + alpha=0.2, + color='gray') + axes[1].set_ylabel('Relative Imbalance (%)') + axes[1].set_xlabel('Year') + axes[1].legend(loc='best') + axes[1].grid(True, alpha=0.3) + axes[1].set_title('Relative Mass Balance Imbalance') + + plt.savefig( + "water_mass_balance_residual.png", + dpi=300, + bbox_inches="tight") + + # Plot 3: Other time-series + fig, axes = plt.subplots( + 2, 2, sharex=True, layout='tight', figsize=( + 10, 7)) + axes = axes.flatten() + + ax = 0 + axes[ax].plot(yr, flotFrac) + axes[ax].set_ylabel('Flotation fraction') + axes[ax].grid(True, alpha=0.3) + + ax += 1 + axes[ax].plot(yr, totalSubglacialWaterMass) + axes[ax].set_ylabel(f'Water mass ({massUnit})') + axes[ax].grid(True, alpha=0.3) + + ax += 1 + axes[ax].plot(yr, lakeArea) + axes[ax].set_ylabel('Lake area (km$^2$)') + axes[ax].grid(True, alpha=0.3) + # second axis for % area + ax2 = axes[ax].twinx() + ax2.plot(yr, lakeArea / grdArea, ':', color="blue") + ax2.set_ylabel("Lake area percentage", color="blue") + ax2.tick_params(axis="y", colors="blue") + + ax += 1 + axes[ax].plot(yr, lakeMass) + axes[ax].set_ylabel(f'Lake mass ({massUnit})') + axes[ax].grid(True, alpha=0.3) + + for ax in axes: + ax.set_xlabel("Year") + + plt.savefig( + "subglacial_hydrology_timeseries.png", + dpi=300, + bbox_inches="tight") + +# ============================================================================ +# SAVE RESULTS +# ============================================================================ + +results = { + 'analysis_type': 'steady_state_water_mass_balance', + 'is_steady_state': is_steady, + 'metrics': steady_metrics, + 'file': args.filename, +} + + +def convert_to_serializable(obj): + """Convert numpy/non-serializable types to JSON-serializable types.""" + if isinstance(obj, dict): + return {k: convert_to_serializable(v) for k, v in obj.items()} + elif isinstance(obj, (list, tuple)): + return [convert_to_serializable(item) for item in obj] + elif isinstance(obj, (np.bool_, bool)): + return bool(obj) + elif isinstance(obj, (np.integer, int)): + return int(obj) + elif isinstance(obj, (np.floating, float)): + return float(obj) + else: + return obj + + +results = convert_to_serializable(results) + +with open(args.output_json, 'w') as f: + json.dump(results, f, indent=2) + +print(f"Results saved to {args.output_json}") + +dataset.close() diff --git a/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/provenance b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/provenance new file mode 100644 index 0000000000..b9eacac48a --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/provenance @@ -0,0 +1,1011 @@ +*********************************************************************** +MPAS git version: archive/MALI-Dev/tc_2025_mali_ismip6_ais_2300-4410-g41b26f6179 + +command: /global/cfs/cdirs/fanssie/users/ahager/mambaforge/envs/dev_compass_1.9.0-alpha.2/bin/compass setup -t landice/ensemble_generator/sgh_ensemble_analysis -f analysis_ensemble.cfg + +test cases: + path: landice/ensemble_generator/sgh_ensemble_analysis + name: sgh_ensemble_analysis + MPAS core: landice + test group: ensemble_generator + subdir: sgh_ensemble_analysis + steps: + +conda list: +# packages in environment at /global/cfs/cdirs/fanssie/users/ahager/mambaforge/envs/dev_compass_1.9.0-alpha.2: +# +# Name Version Build Channel +_libgcc_mutex 0.1 conda_forge conda-forge +_openmp_mutex 4.5 2_gnu conda-forge +_python_abi3_support 1.0 hd8ed1ab_2 conda-forge +alabaster 1.0.0 pyhd8ed1ab_1 conda-forge +alsa-lib 1.2.14 hb9d3cd8_0 conda-forge +anyio 4.11.0 pyhcf101f3_0 conda-forge +aom 3.9.1 hac33072_0 conda-forge +argon2-cffi 25.1.0 pyhd8ed1ab_0 conda-forge +argon2-cffi-bindings 25.1.0 py313h07c4f96_2 conda-forge +arpack 3.9.1 nompi_hf03ea27_102 conda-forge +arrow 1.4.0 pyhcf101f3_0 conda-forge +asttokens 3.0.0 pyhd8ed1ab_1 conda-forge +async-lru 2.0.5 pyh29332c3_0 conda-forge +attr 2.5.2 h39aace5_0 conda-forge +attrs 25.4.0 pyh71513ae_0 conda-forge +autopep8 2.3.2 pypi_0 pypi +aws-c-auth 0.9.1 h194c533_5 conda-forge +aws-c-cal 0.9.8 h346e085_0 conda-forge +aws-c-common 0.12.5 hb03c661_1 conda-forge +aws-c-compression 0.3.1 h7e655bb_8 conda-forge +aws-c-event-stream 0.5.6 h1deb5b9_4 conda-forge +aws-c-http 0.10.7 had4b759_1 conda-forge +aws-c-io 0.23.2 hbff472d_2 conda-forge +aws-c-mqtt 0.13.3 h8ba2272_8 conda-forge +aws-c-s3 0.8.6 h493c25d_7 conda-forge +aws-c-sdkutils 0.2.4 h7e655bb_3 conda-forge +aws-checksums 0.2.7 h7e655bb_4 conda-forge +aws-crt-cpp 0.35.0 h719b17a_2 conda-forge +aws-sdk-cpp 1.11.606 h522d481_6 conda-forge +azure-core-cpp 1.16.1 h3a458e0_0 conda-forge +azure-identity-cpp 1.13.2 h3a5f585_1 conda-forge +azure-storage-blobs-cpp 12.15.0 h2a74896_1 conda-forge +azure-storage-common-cpp 12.11.0 h3d7a050_1 conda-forge +azure-storage-files-datalake-cpp 12.13.0 hf38f1be_1 conda-forge +babel 2.17.0 pyhd8ed1ab_0 conda-forge +beautifulsoup4 4.14.2 pyha770c72_0 conda-forge +bleach 6.2.0 pyh29332c3_4 conda-forge +blosc 1.21.6 he440d0b_1 conda-forge +bokeh 3.8.1 pyhd8ed1ab_0 conda-forge +brotli 1.2.0 h41a2e66_0 conda-forge +brotli-bin 1.2.0 hf2c8021_0 conda-forge +brotli-python 1.2.0 py313h09d1b84_0 conda-forge +brunsli 0.1 hd1e3526_2 conda-forge +bzip2 1.0.8 hda65f42_8 conda-forge +c-ares 1.34.5 hb9d3cd8_0 conda-forge +c-blosc2 2.22.0 h4cfbee9_0 conda-forge +ca-certificates 2026.1.4 hbd8a1cb_0 conda-forge +cached-property 1.5.2 hd8ed1ab_1 conda-forge +cached_property 1.5.2 pyha770c72_1 conda-forge +cairo 1.18.4 h3394656_0 conda-forge +cartopy 0.25.0 py313h08cd8bf_1 conda-forge +cartopy_offlinedata 0.25.0 pyhd8ed1ab_0 conda-forge +certifi 2026.1.4 pyhd8ed1ab_0 conda-forge +cfchecker 4.1.0 pyhd8ed1ab_1 conda-forge +cffi 2.0.0 py313hf46b229_1 conda-forge +cfgv 3.3.1 pyhd8ed1ab_1 conda-forge +cftime 1.6.4 py313h29aa505_2 conda-forge +cfunits 3.3.7 pyhd8ed1ab_1 conda-forge +charls 2.4.2 h59595ed_0 conda-forge +charset-normalizer 3.4.4 pyhd8ed1ab_0 conda-forge +click 8.3.0 pyh707e725_0 conda-forge +cloudpickle 3.1.2 pyhd8ed1ab_0 conda-forge +cmocean 4.0.3 pyhd8ed1ab_1 conda-forge +colorama 0.4.6 pyhd8ed1ab_1 conda-forge +colorspacious 1.1.2 pyhecae5ae_1 conda-forge +comm 0.2.3 pyhe01879c_0 conda-forge +compass 1.9.0a2 pypi_0 pypi +contourpy 1.3.3 py313h7037e92_3 conda-forge +cpython 3.13.9 py313hd8ed1ab_101 conda-forge +cycler 0.12.1 pyhd8ed1ab_1 conda-forge +cytoolz 1.1.0 py313h07c4f96_1 conda-forge +dask 2025.11.0 pyhcf101f3_0 conda-forge +dask-core 2025.11.0 pyhcf101f3_0 conda-forge +dav1d 1.2.1 hd590300_0 conda-forge +dbus 1.16.2 h3c4dab8_0 conda-forge +debugpy 1.8.17 py313h5d5ffb9_0 conda-forge +decorator 5.2.1 pyhd8ed1ab_0 conda-forge +defusedxml 0.7.1 pyhd8ed1ab_0 conda-forge +distlib 0.4.0 pyhd8ed1ab_0 conda-forge +distributed 2025.11.0 pyhcf101f3_0 conda-forge +docutils 0.21.2 pyhd8ed1ab_1 conda-forge +entrypoints 0.4 pyhd8ed1ab_1 conda-forge +esmf 8.9.0 nompi_h8d4c64c_3 conda-forge +exceptiongroup 1.3.0 pyhd8ed1ab_0 conda-forge +executing 2.2.1 pyhd8ed1ab_0 conda-forge +ffmpeg 8.0.0 gpl_h5c0ada0_706 conda-forge +filelock 3.20.0 pyhd8ed1ab_0 conda-forge +flake8 7.3.0 pyhd8ed1ab_0 conda-forge +font-ttf-dejavu-sans-mono 2.37 hab24e00_0 conda-forge +font-ttf-inconsolata 3.000 h77eed37_0 conda-forge +font-ttf-source-code-pro 2.038 h77eed37_0 conda-forge +font-ttf-ubuntu 0.83 h77eed37_3 conda-forge +fontconfig 2.15.0 h7e30c49_1 conda-forge +fonts-conda-ecosystem 1 0 conda-forge +fonts-conda-forge 1 hc364b38_1 conda-forge +fonttools 4.60.1 py313h3dea7bd_0 conda-forge +fqdn 1.5.1 pyhd8ed1ab_1 conda-forge +freetype 2.14.1 ha770c72_0 conda-forge +fribidi 1.0.16 hb03c661_0 conda-forge +fsspec 2025.10.0 pyhd8ed1ab_0 conda-forge +future 1.0.0 pyhd8ed1ab_2 conda-forge +gdk-pixbuf 2.44.4 h2b0a6b4_0 conda-forge +geometric_features 1.6.1 pyhd8ed1ab_0 conda-forge +geos 3.14.1 h480dda7_0 conda-forge +gettext 0.25.1 h3f43e3d_1 conda-forge +gettext-tools 0.25.1 h3f43e3d_1 conda-forge +gflags 2.2.2 h5888daf_1005 conda-forge +giflib 5.2.2 hd590300_0 conda-forge +git 2.52.0 pl5321h28be001_0 conda-forge +glog 0.7.1 hbabe93e_0 conda-forge +glpk 5.0 h445213a_0 conda-forge +glslang 16.0.0 hfd11570_0 conda-forge +gmp 6.3.0 hac33072_2 conda-forge +graphite2 1.3.14 hecca717_2 conda-forge +gsl 2.7 he838d99_0 conda-forge +gsw 3.6.20 py313h29aa505_1 conda-forge +h11 0.16.0 pyhd8ed1ab_0 conda-forge +h2 4.3.0 pyhcf101f3_0 conda-forge +h5py 3.15.1 nompi_py313h253c126_101 conda-forge +harfbuzz 12.2.0 h15599e2_0 conda-forge +hdf4 4.2.15 h2a13503_7 conda-forge +hdf5 1.14.6 nompi_h6e4c0c1_103 conda-forge +hpack 4.1.0 pyhd8ed1ab_0 conda-forge +httpcore 1.0.9 pyh29332c3_0 conda-forge +httpx 0.28.1 pyhd8ed1ab_0 conda-forge +hyperframe 6.1.0 pyhd8ed1ab_0 conda-forge +icu 75.1 he02047a_0 conda-forge +identify 2.6.15 pyhd8ed1ab_0 conda-forge +idna 3.11 pyhd8ed1ab_0 conda-forge +igraph 1.0.0 hfe3e89f_0 conda-forge +imagecodecs 2025.11.11 py313h2d3cd63_0 conda-forge +imageio 2.37.0 pyhfb79c49_0 conda-forge +imagesize 1.4.1 pyhd8ed1ab_0 conda-forge +importlib-metadata 8.7.0 pyhe01879c_1 conda-forge +importlib_resources 6.5.2 pyhd8ed1ab_0 conda-forge +iniconfig 2.3.0 pyhd8ed1ab_0 conda-forge +inpoly 0.2.0 py313h29aa505_9 conda-forge +intel-gmmlib 22.8.2 hb700be7_0 conda-forge +intel-media-driver 25.3.4 hecca717_0 conda-forge +ipykernel 7.1.0 pyha191276_0 conda-forge +ipython 9.9.0 pyh53cf698_0 conda-forge +ipython_pygments_lexers 1.1.1 pyhd8ed1ab_0 conda-forge +ipywidgets 8.1.8 pyhd8ed1ab_0 conda-forge +isoduration 20.11.0 pyhd8ed1ab_1 conda-forge +isort 7.0.0 pyhd8ed1ab_0 conda-forge +jedi 0.19.2 pyhd8ed1ab_1 conda-forge +jigsawpy 1.1.0 pypi_0 pypi +jinja2 3.1.6 pyhd8ed1ab_0 conda-forge +json5 0.12.1 pyhd8ed1ab_0 conda-forge +jsonpointer 3.0.0 py313h78bf25f_2 conda-forge +jsonschema 4.25.1 pyhe01879c_0 conda-forge +jsonschema-specifications 2025.9.1 pyhcf101f3_0 conda-forge +jsonschema-with-format-nongpl 4.25.1 he01879c_0 conda-forge +jupyter 1.1.1 pyhd8ed1ab_1 conda-forge +jupyter-lsp 2.3.0 pyhcf101f3_0 conda-forge +jupyter_client 8.6.3 pyhd8ed1ab_1 conda-forge +jupyter_console 6.6.3 pyhd8ed1ab_1 conda-forge +jupyter_core 5.9.1 pyhc90fa1f_0 conda-forge +jupyter_events 0.12.0 pyh29332c3_0 conda-forge +jupyter_server 2.17.0 pyhcf101f3_0 conda-forge +jupyter_server_terminals 0.5.3 pyhd8ed1ab_1 conda-forge +jupyterlab 4.4.10 pyhd8ed1ab_0 conda-forge +jupyterlab_pygments 0.3.0 pyhd8ed1ab_2 conda-forge +jupyterlab_server 2.28.0 pyhcf101f3_0 conda-forge +jupyterlab_widgets 3.0.16 pyhcf101f3_1 conda-forge +jxrlib 1.1 hd590300_3 conda-forge +keyutils 1.6.3 hb9d3cd8_0 conda-forge +kiwisolver 1.4.9 py313hc8edb43_2 conda-forge +krb5 1.21.3 h659f571_0 conda-forge +lame 3.100 h166bdaf_1003 conda-forge +lark 1.3.1 pyhd8ed1ab_0 conda-forge +lazy-loader 0.4 pyhd8ed1ab_2 conda-forge +lcms2 2.17 h717163a_0 conda-forge +ld_impl_linux-64 2.44 h1aa0949_5 conda-forge +lerc 4.0.0 h0aef613_1 conda-forge +level-zero 1.26.0 hb700be7_0 conda-forge +libabseil 20250512.1 cxx17_hba17884_0 conda-forge +libaec 1.1.4 h3f801dc_0 conda-forge +libarrow 22.0.0 h99e40f8_3_cpu conda-forge +libarrow-acero 22.0.0 h635bf11_3_cpu conda-forge +libarrow-compute 22.0.0 h8c2c5c3_3_cpu conda-forge +libarrow-dataset 22.0.0 h635bf11_3_cpu conda-forge +libarrow-substrait 22.0.0 h3f74fd7_3_cpu conda-forge +libasprintf 0.25.1 h3f43e3d_1 conda-forge +libasprintf-devel 0.25.1 h3f43e3d_1 conda-forge +libass 0.17.4 h96ad9f0_0 conda-forge +libavif16 1.3.0 h6395336_2 conda-forge +libblas 3.9.0 38_h4a7cf45_openblas conda-forge +libbrotlicommon 1.2.0 h09219d5_0 conda-forge +libbrotlidec 1.2.0 hd53d788_0 conda-forge +libbrotlienc 1.2.0 h02bd7ab_0 conda-forge +libcap 2.77 h3ff7636_0 conda-forge +libcblas 3.9.0 38_h0358290_openblas conda-forge +libcrc32c 1.1.2 h9c3ff4c_0 conda-forge +libcurl 8.17.0 h4e3cde8_0 conda-forge +libdeflate 1.25 h17f619e_0 conda-forge +libdrm 2.4.125 hb03c661_1 conda-forge +libedit 3.1.20250104 pl5321h7949ede_0 conda-forge +libegl 1.7.0 ha4b6fd6_2 conda-forge +libev 4.33 hd590300_2 conda-forge +libevent 2.1.12 hf998b51_1 conda-forge +libexpat 2.7.1 hecca717_0 conda-forge +libffi 3.5.2 h9ec8514_0 conda-forge +libflac 1.4.3 h59595ed_0 conda-forge +libfreetype 2.14.1 ha770c72_0 conda-forge +libfreetype6 2.14.1 h73754d4_0 conda-forge +libgcc 15.2.0 h767d61c_7 conda-forge +libgcc-ng 15.2.0 h69a702a_7 conda-forge +libgettextpo 0.25.1 h3f43e3d_1 conda-forge +libgettextpo-devel 0.25.1 h3f43e3d_1 conda-forge +libgfortran 15.2.0 h69a702a_7 conda-forge +libgfortran-ng 15.2.0 h69a702a_7 conda-forge +libgfortran5 15.2.0 hcd61629_7 conda-forge +libgl 1.7.0 ha4b6fd6_2 conda-forge +libglib 2.86.1 h32235b2_2 conda-forge +libglvnd 1.7.0 ha4b6fd6_2 conda-forge +libglx 1.7.0 ha4b6fd6_2 conda-forge +libgomp 15.2.0 h767d61c_7 conda-forge +libgoogle-cloud 2.39.0 hdb79228_0 conda-forge +libgoogle-cloud-storage 2.39.0 hdbdcf42_0 conda-forge +libgrpc 1.73.1 h3288cfb_1 conda-forge +libhwloc 2.12.1 default_h7f8ec31_1002 conda-forge +libhwy 1.3.0 h4c17acf_1 conda-forge +libiconv 1.18 h3b78370_2 conda-forge +libjpeg-turbo 3.1.2 hb03c661_0 conda-forge +libjxl 0.11.1 hf08fa70_5 conda-forge +liblapack 3.9.0 38_h47877c9_openblas conda-forge +liblzma 5.8.1 hb9d3cd8_2 conda-forge +libmpdec 4.0.0 hb9d3cd8_0 conda-forge +libnetcdf 4.9.3 nompi_h11f7409_103 conda-forge +libnghttp2 1.67.0 had1ee68_0 conda-forge +libogg 1.3.5 hd0c01bc_1 conda-forge +libopenblas 0.3.30 pthreads_h94d23a6_3 conda-forge +libopentelemetry-cpp 1.21.0 hb9b0907_1 conda-forge +libopentelemetry-cpp-headers 1.21.0 ha770c72_1 conda-forge +libopenvino 2025.2.0 hb617929_1 conda-forge +libopenvino-auto-batch-plugin 2025.2.0 hed573e4_1 conda-forge +libopenvino-auto-plugin 2025.2.0 hed573e4_1 conda-forge +libopenvino-hetero-plugin 2025.2.0 hd41364c_1 conda-forge +libopenvino-intel-cpu-plugin 2025.2.0 hb617929_1 conda-forge +libopenvino-intel-gpu-plugin 2025.2.0 hb617929_1 conda-forge +libopenvino-intel-npu-plugin 2025.2.0 hb617929_1 conda-forge +libopenvino-ir-frontend 2025.2.0 hd41364c_1 conda-forge +libopenvino-onnx-frontend 2025.2.0 h1862bb8_1 conda-forge +libopenvino-paddle-frontend 2025.2.0 h1862bb8_1 conda-forge +libopenvino-pytorch-frontend 2025.2.0 hecca717_1 conda-forge +libopenvino-tensorflow-frontend 2025.2.0 h0767aad_1 conda-forge +libopenvino-tensorflow-lite-frontend 2025.2.0 hecca717_1 conda-forge +libopus 1.5.2 hd0c01bc_0 conda-forge +libparquet 22.0.0 h7376487_3_cpu conda-forge +libpciaccess 0.18 hb9d3cd8_0 conda-forge +libpng 1.6.50 h421ea60_1 conda-forge +libprotobuf 6.31.1 h49aed37_2 conda-forge +libre2-11 2025.11.05 h7b12aa8_0 conda-forge +librsvg 2.60.0 h61e6d4b_0 conda-forge +libsndfile 1.2.2 hc60ed4a_1 conda-forge +libsodium 1.0.20 h4ab18f5_0 conda-forge +libsqlite 3.51.0 hee844dc_0 conda-forge +libssh2 1.11.1 hcf80075_0 conda-forge +libstdcxx 15.2.0 h8f9b012_7 conda-forge +libstdcxx-ng 15.2.0 h4852527_7 conda-forge +libsystemd0 257.10 hd0affe5_2 conda-forge +libthrift 0.22.0 h454ac66_1 conda-forge +libtiff 4.7.1 h9d88235_1 conda-forge +libudev1 257.10 hd0affe5_2 conda-forge +libudunits2 2.2.28 h40f5838_3 conda-forge +libunwind 1.8.3 h65a8314_0 conda-forge +liburing 2.12 hb700be7_0 conda-forge +libusb 1.0.29 h73b1eb8_0 conda-forge +libutf8proc 2.11.0 hb04c3b8_0 conda-forge +libuuid 2.41.2 he9a06e4_0 conda-forge +libva 2.22.0 h4f16b4b_2 conda-forge +libvorbis 1.3.7 h54a6638_2 conda-forge +libvpl 2.15.0 h54a6638_1 conda-forge +libvpx 1.14.1 hac33072_0 conda-forge +libvulkan-loader 1.4.328.1 h5279c79_0 conda-forge +libwebp-base 1.6.0 hd42ef1d_0 conda-forge +libxcb 1.17.0 h8a09558_0 conda-forge +libxcrypt 4.4.36 hd590300_1 conda-forge +libxkbcommon 1.13.0 hca5e8e5_0 conda-forge +libxml2 2.15.1 h26afc86_0 conda-forge +libxml2-16 2.15.1 ha9997c6_0 conda-forge +libxml2-devel 2.15.1 h26afc86_0 conda-forge +libxslt 1.1.43 h711ed8c_1 conda-forge +libzip 1.11.2 h6991a6a_0 conda-forge +libzlib 1.3.1 hb9d3cd8_2 conda-forge +libzopfli 1.0.3 h9c3ff4c_0 conda-forge +locket 1.0.0 pyhd8ed1ab_0 conda-forge +lxml 6.0.2 py313h4a16004_2 conda-forge +lz4 4.4.5 py313h28739b2_0 conda-forge +lz4-c 1.10.0 h5888daf_1 conda-forge +m2r 0.3.1 pyhd8ed1ab_1 conda-forge +mache 1.32.0 pyhd8ed1ab_0 conda-forge +markupsafe 3.0.3 py313h3dea7bd_0 conda-forge +matplotlib-base 3.10.8 py313h683a580_0 conda-forge +matplotlib-inline 0.2.1 pyhd8ed1ab_0 conda-forge +mccabe 0.7.0 pyhd8ed1ab_1 conda-forge +metis 5.1.0 hd0bcaf9_1007 conda-forge +mistune 0.8.4 pyh1a96a4e_1006 conda-forge +moab 5.6.0 nompi_tempest_h3811aa1_0 conda-forge +mock 5.2.0 pyhd8ed1ab_0 conda-forge +mpas_tools 1.3.2 py313h76c60f6_1 conda-forge +mpg123 1.32.9 hc50e24c_0 conda-forge +msgpack-python 1.1.2 py313h7037e92_1 conda-forge +munkres 1.1.4 pyhd8ed1ab_1 conda-forge +narwhals 2.11.0 pyhcf101f3_0 conda-forge +nbclient 0.10.2 pyhd8ed1ab_0 conda-forge +nbconvert-core 6.5.3 pyhd8ed1ab_0 conda-forge +nbformat 5.10.4 pyhd8ed1ab_1 conda-forge +nco 5.3.6 hfb7d223_0 conda-forge +ncurses 6.5 h2d0b736_3 conda-forge +ncview 2.1.8 h4debd67_12 conda-forge +nest-asyncio 1.6.0 pyhd8ed1ab_1 conda-forge +netcdf-fortran 4.6.2 nompi_h90de81b_102 conda-forge +netcdf4 1.7.4 nompi_py313h16051e2_102 conda-forge +networkx 3.5 pyhe01879c_0 conda-forge +nlohmann_json 3.12.0 h54a6638_1 conda-forge +nodeenv 1.9.1 pyhd8ed1ab_1 conda-forge +notebook 7.4.7 pyhd8ed1ab_0 conda-forge +notebook-shim 0.2.4 pyhd8ed1ab_1 conda-forge +numpy 2.4.1 py313hf6604e3_0 conda-forge +ocl-icd 2.3.3 hb9d3cd8_0 conda-forge +opencl-headers 2025.06.13 h5888daf_0 conda-forge +openh264 2.6.0 hc22cd8d_0 conda-forge +openjpeg 2.5.4 h55fea9a_0 conda-forge +openssl 3.6.0 h26f9b46_0 conda-forge +orc 2.2.1 hd747db4_0 conda-forge +otps 2021.10 h4839124_0 e3sm/label/compass +overrides 7.7.0 pyhd8ed1ab_1 conda-forge +packaging 25.0 pyh29332c3_1 conda-forge +pandas 2.3.3 py313h08cd8bf_1 conda-forge +pandocfilters 1.5.0 pyhd8ed1ab_0 conda-forge +pango 1.56.4 hadf4263_0 conda-forge +parso 0.8.5 pyhcf101f3_0 conda-forge +partd 1.4.2 pyhd8ed1ab_0 conda-forge +pcre2 10.46 h1321c63_0 conda-forge +perl 5.32.1 7_hd590300_perl5 conda-forge +pexpect 4.9.0 pyhd8ed1ab_1 conda-forge +pillow 12.0.0 py313h50355cd_0 conda-forge +pip 25.3 pyh145f28c_0 conda-forge +pixman 0.46.4 h54a6638_1 conda-forge +platformdirs 4.5.0 pyhcf101f3_0 conda-forge +pluggy 1.6.0 pyhd8ed1ab_0 conda-forge +popt 1.16 h0b475e3_2002 conda-forge +pre-commit 4.5.1 pyha770c72_0 conda-forge +progressbar2 4.5.0 pyhd8ed1ab_1 conda-forge +proj 9.7.0 hb72c0af_0 conda-forge +prometheus-cpp 1.3.0 ha5d0236_0 conda-forge +prometheus_client 0.23.1 pyhd8ed1ab_0 conda-forge +prompt-toolkit 3.0.52 pyha770c72_0 conda-forge +prompt_toolkit 3.0.52 hd8ed1ab_0 conda-forge +psutil 7.1.3 py313h54dd161_0 conda-forge +pthread-stubs 0.4 hb9d3cd8_1002 conda-forge +ptyprocess 0.7.0 pyhd8ed1ab_1 conda-forge +pugixml 1.15 h3f63f65_0 conda-forge +pulseaudio-client 17.0 h9a8bead_2 conda-forge +pure_eval 0.2.3 pyhd8ed1ab_1 conda-forge +pyamg 5.3.0 py313hfaae9d9_1 conda-forge +pyarrow 22.0.0 py313h78bf25f_0 conda-forge +pyarrow-core 22.0.0 py313he109ebe_0_cpu conda-forge +pycodestyle 2.14.0 pyhd8ed1ab_0 conda-forge +pycparser 2.22 pyh29332c3_1 conda-forge +pyevtk 1.6.0 pyhb0bfe47_1 conda-forge +pyflakes 3.4.0 pyhd8ed1ab_0 conda-forge +pygments 2.19.2 pyhd8ed1ab_0 conda-forge +pyparsing 3.2.5 pyhcf101f3_0 conda-forge +pyproj 3.7.2 py313h77f6078_2 conda-forge +pyremap 2.1.0 pyhd8ed1ab_0 conda-forge +pyshp 3.0.2 pyhd8ed1ab_0 conda-forge +pysocks 1.7.1 pyha55dd90_7 conda-forge +pytest 9.0.2 pyhcf101f3_0 conda-forge +python 3.13.9 hc97d973_101_cp313 conda-forge +python-dateutil 2.9.0.post0 pyhe01879c_2 conda-forge +python-fastjsonschema 2.21.2 pyhe01879c_0 conda-forge +python-gil 3.13.9 h4df99d1_101 conda-forge +python-igraph 1.0.0 py313h7033f15_0 conda-forge +python-json-logger 2.0.7 pyhd8ed1ab_0 conda-forge +python-tzdata 2025.2 pyhd8ed1ab_0 conda-forge +python-utils 3.9.1 pyhff2d567_1 conda-forge +python_abi 3.13 8_cp313 conda-forge +pytz 2025.2 pyhd8ed1ab_0 conda-forge +pywavelets 1.9.0 py313h29aa505_2 conda-forge +pyyaml 6.0.3 py313h3dea7bd_0 conda-forge +pyzmq 27.1.0 py312hfb55c3c_0 conda-forge +qhull 2020.2 h434a139_5 conda-forge +rav1e 0.7.1 h8fae777_3 conda-forge +re2 2025.11.05 h5301d42_0 conda-forge +readline 8.2 h8c095d6_2 conda-forge +referencing 0.37.0 pyhcf101f3_0 conda-forge +requests 2.32.5 pyhcf101f3_1 conda-forge +rfc3339-validator 0.1.4 pyhd8ed1ab_1 conda-forge +rfc3986-validator 0.1.1 pyh9f0ad1d_0 conda-forge +rfc3987-syntax 1.1.0 pyhe01879c_1 conda-forge +roman-numerals-py 3.1.0 pyhd8ed1ab_0 conda-forge +rpds-py 0.28.0 py313h843e2db_2 conda-forge +rsync 3.4.1 h81c0278_2 conda-forge +ruamel.yaml 0.18.17 py313h07c4f96_0 conda-forge +ruamel.yaml.clib 0.2.14 py313h07c4f96_0 conda-forge +s2n 1.6.0 h8399546_1 conda-forge +scikit-image 0.26.0 np2py313hb172dc5_0 conda-forge +scipy 1.17.0 py313h4b8bb8b_1 conda-forge +sdl2 2.32.56 h54a6638_0 conda-forge +sdl3 3.2.26 h68140b3_0 conda-forge +send2trash 1.8.3 pyh0d859eb_1 conda-forge +setuptools 80.9.0 pyhff2d567_0 conda-forge +shaderc 2025.4 h3e344bc_0 conda-forge +shapely 2.1.2 py313had47c43_2 conda-forge +six 1.17.0 pyhe01879c_1 conda-forge +snappy 1.2.2 h03e3b7b_1 conda-forge +sniffio 1.3.1 pyhd8ed1ab_2 conda-forge +snowballstemmer 3.0.1 pyhd8ed1ab_0 conda-forge +sortedcontainers 2.4.0 pyhd8ed1ab_1 conda-forge +soupsieve 2.8 pyhd8ed1ab_0 conda-forge +sphinx 8.2.3 pyhd8ed1ab_0 conda-forge +sphinx_rtd_theme 3.0.2 pyha770c72_0 conda-forge +sphinxcontrib-applehelp 2.0.0 pyhd8ed1ab_1 conda-forge +sphinxcontrib-devhelp 2.0.0 pyhd8ed1ab_1 conda-forge +sphinxcontrib-htmlhelp 2.1.0 pyhd8ed1ab_1 conda-forge +sphinxcontrib-jquery 4.1 pyhd8ed1ab_1 conda-forge +sphinxcontrib-jsmath 1.0.1 pyhd8ed1ab_1 conda-forge +sphinxcontrib-qthelp 2.0.0 pyhd8ed1ab_1 conda-forge +sphinxcontrib-serializinghtml 1.1.10 pyhd8ed1ab_1 conda-forge +spirv-tools 2025.4 hb700be7_0 conda-forge +sqlite 3.51.0 heff268d_0 conda-forge +stack_data 0.6.3 pyhd8ed1ab_1 conda-forge +svt-av1 3.1.2 hecca717_0 conda-forge +tbb 2022.3.0 h8d10470_1 conda-forge +tblib 3.2.2 pyhcf101f3_0 conda-forge +tempest-remap 2.2.0 h397758c_8 conda-forge +termcolor 3.2.0 pyhd8ed1ab_0 conda-forge +terminado 0.18.1 pyh0d859eb_0 conda-forge +texttable 1.7.0 pyhd8ed1ab_1 conda-forge +tifffile 2025.10.16 pyhd8ed1ab_0 conda-forge +tinycss2 1.4.0 pyhd8ed1ab_0 conda-forge +tk 8.6.13 noxft_ha0e22de_103 conda-forge +tomli 2.3.0 pyhcf101f3_0 conda-forge +toolz 1.1.0 pyhd8ed1ab_1 conda-forge +tornado 6.5.2 py313h07c4f96_2 conda-forge +tqdm 4.67.1 pyhd8ed1ab_1 conda-forge +traitlets 5.14.3 pyhd8ed1ab_1 conda-forge +typing-extensions 4.15.0 h396c80c_0 conda-forge +typing_extensions 4.15.0 pyhcf101f3_0 conda-forge +typing_utils 0.1.0 pyhd8ed1ab_1 conda-forge +tzdata 2025b h78e105d_0 conda-forge +udunits2 2.2.28 h40f5838_3 conda-forge +ukkonen 1.0.1 py313h7037e92_6 conda-forge +uri-template 1.3.0 pyhd8ed1ab_1 conda-forge +urllib3 2.5.0 pyhd8ed1ab_0 conda-forge +virtualenv 20.35.4 pyhd8ed1ab_0 conda-forge +wayland 1.24.0 hd6090a7_1 conda-forge +wayland-protocols 1.45 hd8ed1ab_0 conda-forge +wcwidth 0.2.14 pyhd8ed1ab_0 conda-forge +webcolors 25.10.0 pyhd8ed1ab_0 conda-forge +webencodings 0.5.1 pyhd8ed1ab_3 conda-forge +websocket-client 1.9.0 pyhd8ed1ab_0 conda-forge +widgetsnbextension 4.0.15 pyhd8ed1ab_0 conda-forge +x264 1!164.3095 h166bdaf_2 conda-forge +x265 3.5 h924138e_3 conda-forge +xarray 2025.12.0 pyhcf101f3_0 conda-forge +xkeyboard-config 2.46 hb03c661_0 conda-forge +xorg-libice 1.1.2 hb9d3cd8_0 conda-forge +xorg-libsm 1.2.6 he73a12e_0 conda-forge +xorg-libx11 1.8.12 h4f16b4b_0 conda-forge +xorg-libxau 1.0.12 hb03c661_1 conda-forge +xorg-libxaw 1.0.16 hb9d3cd8_0 conda-forge +xorg-libxcursor 1.2.3 hb9d3cd8_0 conda-forge +xorg-libxdmcp 1.1.5 hb03c661_1 conda-forge +xorg-libxext 1.3.6 hb9d3cd8_0 conda-forge +xorg-libxfixes 6.0.2 hb03c661_0 conda-forge +xorg-libxmu 1.2.1 hb9d3cd8_1 conda-forge +xorg-libxpm 3.5.17 hb9d3cd8_1 conda-forge +xorg-libxrandr 1.5.4 hb9d3cd8_0 conda-forge +xorg-libxrender 0.9.12 hb9d3cd8_0 conda-forge +xorg-libxscrnsaver 1.2.4 hb9d3cd8_0 conda-forge +xorg-libxt 1.3.1 hb9d3cd8_0 conda-forge +xxhash 0.8.3 hb47aa4a_0 conda-forge +xyzservices 2025.10.0 pyhd8ed1ab_0 conda-forge +yaml 0.2.5 h280c20c_3 conda-forge +zeromq 4.3.5 h387f397_9 conda-forge +zfp 1.0.1 h909a3a2_3 conda-forge +zict 3.0.0 pyhd8ed1ab_1 conda-forge +zipp 3.23.0 pyhd8ed1ab_0 conda-forge +zlib 1.3.1 hb9d3cd8_2 conda-forge +zlib-ng 2.2.5 hde8ca8f_0 conda-forge +zstandard 0.25.0 py313h54dd161_1 conda-forge +zstd 1.5.7 hb8e6e7a_2 conda-forge + +*********************************************************************** + +*********************************************************************** +MPAS git version: archive/MALI-Dev/tc_2025_mali_ismip6_ais_2300-4410-g41b26f6179 + +command: /global/cfs/cdirs/fanssie/users/ahager/mambaforge/envs/dev_compass_1.9.0-alpha.2/bin/compass setup -t landice/ensemble_generator/sgh_ensemble_analysis -f analysis_ensemble.cfg + +test cases: + path: landice/ensemble_generator/sgh_ensemble_analysis + name: sgh_ensemble_analysis + MPAS core: landice + test group: ensemble_generator + subdir: sgh_ensemble_analysis + steps: + +conda list: +# packages in environment at /global/cfs/cdirs/fanssie/users/ahager/mambaforge/envs/dev_compass_1.9.0-alpha.2: +# +# Name Version Build Channel +_libgcc_mutex 0.1 conda_forge conda-forge +_openmp_mutex 4.5 2_gnu conda-forge +_python_abi3_support 1.0 hd8ed1ab_2 conda-forge +alabaster 1.0.0 pyhd8ed1ab_1 conda-forge +alsa-lib 1.2.14 hb9d3cd8_0 conda-forge +anyio 4.11.0 pyhcf101f3_0 conda-forge +aom 3.9.1 hac33072_0 conda-forge +argon2-cffi 25.1.0 pyhd8ed1ab_0 conda-forge +argon2-cffi-bindings 25.1.0 py313h07c4f96_2 conda-forge +arpack 3.9.1 nompi_hf03ea27_102 conda-forge +arrow 1.4.0 pyhcf101f3_0 conda-forge +asttokens 3.0.0 pyhd8ed1ab_1 conda-forge +async-lru 2.0.5 pyh29332c3_0 conda-forge +attr 2.5.2 h39aace5_0 conda-forge +attrs 25.4.0 pyh71513ae_0 conda-forge +autopep8 2.3.2 pypi_0 pypi +aws-c-auth 0.9.1 h194c533_5 conda-forge +aws-c-cal 0.9.8 h346e085_0 conda-forge +aws-c-common 0.12.5 hb03c661_1 conda-forge +aws-c-compression 0.3.1 h7e655bb_8 conda-forge +aws-c-event-stream 0.5.6 h1deb5b9_4 conda-forge +aws-c-http 0.10.7 had4b759_1 conda-forge +aws-c-io 0.23.2 hbff472d_2 conda-forge +aws-c-mqtt 0.13.3 h8ba2272_8 conda-forge +aws-c-s3 0.8.6 h493c25d_7 conda-forge +aws-c-sdkutils 0.2.4 h7e655bb_3 conda-forge +aws-checksums 0.2.7 h7e655bb_4 conda-forge +aws-crt-cpp 0.35.0 h719b17a_2 conda-forge +aws-sdk-cpp 1.11.606 h522d481_6 conda-forge +azure-core-cpp 1.16.1 h3a458e0_0 conda-forge +azure-identity-cpp 1.13.2 h3a5f585_1 conda-forge +azure-storage-blobs-cpp 12.15.0 h2a74896_1 conda-forge +azure-storage-common-cpp 12.11.0 h3d7a050_1 conda-forge +azure-storage-files-datalake-cpp 12.13.0 hf38f1be_1 conda-forge +babel 2.17.0 pyhd8ed1ab_0 conda-forge +beautifulsoup4 4.14.2 pyha770c72_0 conda-forge +bleach 6.2.0 pyh29332c3_4 conda-forge +blosc 1.21.6 he440d0b_1 conda-forge +bokeh 3.8.1 pyhd8ed1ab_0 conda-forge +brotli 1.2.0 h41a2e66_0 conda-forge +brotli-bin 1.2.0 hf2c8021_0 conda-forge +brotli-python 1.2.0 py313h09d1b84_0 conda-forge +brunsli 0.1 hd1e3526_2 conda-forge +bzip2 1.0.8 hda65f42_8 conda-forge +c-ares 1.34.5 hb9d3cd8_0 conda-forge +c-blosc2 2.22.0 h4cfbee9_0 conda-forge +ca-certificates 2026.1.4 hbd8a1cb_0 conda-forge +cached-property 1.5.2 hd8ed1ab_1 conda-forge +cached_property 1.5.2 pyha770c72_1 conda-forge +cairo 1.18.4 h3394656_0 conda-forge +cartopy 0.25.0 py313h08cd8bf_1 conda-forge +cartopy_offlinedata 0.25.0 pyhd8ed1ab_0 conda-forge +certifi 2026.1.4 pyhd8ed1ab_0 conda-forge +cfchecker 4.1.0 pyhd8ed1ab_1 conda-forge +cffi 2.0.0 py313hf46b229_1 conda-forge +cfgv 3.3.1 pyhd8ed1ab_1 conda-forge +cftime 1.6.4 py313h29aa505_2 conda-forge +cfunits 3.3.7 pyhd8ed1ab_1 conda-forge +charls 2.4.2 h59595ed_0 conda-forge +charset-normalizer 3.4.4 pyhd8ed1ab_0 conda-forge +click 8.3.0 pyh707e725_0 conda-forge +cloudpickle 3.1.2 pyhd8ed1ab_0 conda-forge +cmocean 4.0.3 pyhd8ed1ab_1 conda-forge +colorama 0.4.6 pyhd8ed1ab_1 conda-forge +colorspacious 1.1.2 pyhecae5ae_1 conda-forge +comm 0.2.3 pyhe01879c_0 conda-forge +compass 1.9.0a2 pypi_0 pypi +contourpy 1.3.3 py313h7037e92_3 conda-forge +cpython 3.13.9 py313hd8ed1ab_101 conda-forge +cycler 0.12.1 pyhd8ed1ab_1 conda-forge +cytoolz 1.1.0 py313h07c4f96_1 conda-forge +dask 2025.11.0 pyhcf101f3_0 conda-forge +dask-core 2025.11.0 pyhcf101f3_0 conda-forge +dav1d 1.2.1 hd590300_0 conda-forge +dbus 1.16.2 h3c4dab8_0 conda-forge +debugpy 1.8.17 py313h5d5ffb9_0 conda-forge +decorator 5.2.1 pyhd8ed1ab_0 conda-forge +defusedxml 0.7.1 pyhd8ed1ab_0 conda-forge +distlib 0.4.0 pyhd8ed1ab_0 conda-forge +distributed 2025.11.0 pyhcf101f3_0 conda-forge +docutils 0.21.2 pyhd8ed1ab_1 conda-forge +entrypoints 0.4 pyhd8ed1ab_1 conda-forge +esmf 8.9.0 nompi_h8d4c64c_3 conda-forge +exceptiongroup 1.3.0 pyhd8ed1ab_0 conda-forge +executing 2.2.1 pyhd8ed1ab_0 conda-forge +ffmpeg 8.0.0 gpl_h5c0ada0_706 conda-forge +filelock 3.20.0 pyhd8ed1ab_0 conda-forge +flake8 7.3.0 pyhd8ed1ab_0 conda-forge +font-ttf-dejavu-sans-mono 2.37 hab24e00_0 conda-forge +font-ttf-inconsolata 3.000 h77eed37_0 conda-forge +font-ttf-source-code-pro 2.038 h77eed37_0 conda-forge +font-ttf-ubuntu 0.83 h77eed37_3 conda-forge +fontconfig 2.15.0 h7e30c49_1 conda-forge +fonts-conda-ecosystem 1 0 conda-forge +fonts-conda-forge 1 hc364b38_1 conda-forge +fonttools 4.60.1 py313h3dea7bd_0 conda-forge +fqdn 1.5.1 pyhd8ed1ab_1 conda-forge +freetype 2.14.1 ha770c72_0 conda-forge +fribidi 1.0.16 hb03c661_0 conda-forge +fsspec 2025.10.0 pyhd8ed1ab_0 conda-forge +future 1.0.0 pyhd8ed1ab_2 conda-forge +gdk-pixbuf 2.44.4 h2b0a6b4_0 conda-forge +geometric_features 1.6.1 pyhd8ed1ab_0 conda-forge +geos 3.14.1 h480dda7_0 conda-forge +gettext 0.25.1 h3f43e3d_1 conda-forge +gettext-tools 0.25.1 h3f43e3d_1 conda-forge +gflags 2.2.2 h5888daf_1005 conda-forge +giflib 5.2.2 hd590300_0 conda-forge +git 2.52.0 pl5321h28be001_0 conda-forge +glog 0.7.1 hbabe93e_0 conda-forge +glpk 5.0 h445213a_0 conda-forge +glslang 16.0.0 hfd11570_0 conda-forge +gmp 6.3.0 hac33072_2 conda-forge +graphite2 1.3.14 hecca717_2 conda-forge +gsl 2.7 he838d99_0 conda-forge +gsw 3.6.20 py313h29aa505_1 conda-forge +h11 0.16.0 pyhd8ed1ab_0 conda-forge +h2 4.3.0 pyhcf101f3_0 conda-forge +h5py 3.15.1 nompi_py313h253c126_101 conda-forge +harfbuzz 12.2.0 h15599e2_0 conda-forge +hdf4 4.2.15 h2a13503_7 conda-forge +hdf5 1.14.6 nompi_h6e4c0c1_103 conda-forge +hpack 4.1.0 pyhd8ed1ab_0 conda-forge +httpcore 1.0.9 pyh29332c3_0 conda-forge +httpx 0.28.1 pyhd8ed1ab_0 conda-forge +hyperframe 6.1.0 pyhd8ed1ab_0 conda-forge +icu 75.1 he02047a_0 conda-forge +identify 2.6.15 pyhd8ed1ab_0 conda-forge +idna 3.11 pyhd8ed1ab_0 conda-forge +igraph 1.0.0 hfe3e89f_0 conda-forge +imagecodecs 2025.11.11 py313h2d3cd63_0 conda-forge +imageio 2.37.0 pyhfb79c49_0 conda-forge +imagesize 1.4.1 pyhd8ed1ab_0 conda-forge +importlib-metadata 8.7.0 pyhe01879c_1 conda-forge +importlib_resources 6.5.2 pyhd8ed1ab_0 conda-forge +iniconfig 2.3.0 pyhd8ed1ab_0 conda-forge +inpoly 0.2.0 py313h29aa505_9 conda-forge +intel-gmmlib 22.8.2 hb700be7_0 conda-forge +intel-media-driver 25.3.4 hecca717_0 conda-forge +ipykernel 7.1.0 pyha191276_0 conda-forge +ipython 9.9.0 pyh53cf698_0 conda-forge +ipython_pygments_lexers 1.1.1 pyhd8ed1ab_0 conda-forge +ipywidgets 8.1.8 pyhd8ed1ab_0 conda-forge +isoduration 20.11.0 pyhd8ed1ab_1 conda-forge +isort 7.0.0 pyhd8ed1ab_0 conda-forge +jedi 0.19.2 pyhd8ed1ab_1 conda-forge +jigsawpy 1.1.0 pypi_0 pypi +jinja2 3.1.6 pyhd8ed1ab_0 conda-forge +json5 0.12.1 pyhd8ed1ab_0 conda-forge +jsonpointer 3.0.0 py313h78bf25f_2 conda-forge +jsonschema 4.25.1 pyhe01879c_0 conda-forge +jsonschema-specifications 2025.9.1 pyhcf101f3_0 conda-forge +jsonschema-with-format-nongpl 4.25.1 he01879c_0 conda-forge +jupyter 1.1.1 pyhd8ed1ab_1 conda-forge +jupyter-lsp 2.3.0 pyhcf101f3_0 conda-forge +jupyter_client 8.6.3 pyhd8ed1ab_1 conda-forge +jupyter_console 6.6.3 pyhd8ed1ab_1 conda-forge +jupyter_core 5.9.1 pyhc90fa1f_0 conda-forge +jupyter_events 0.12.0 pyh29332c3_0 conda-forge +jupyter_server 2.17.0 pyhcf101f3_0 conda-forge +jupyter_server_terminals 0.5.3 pyhd8ed1ab_1 conda-forge +jupyterlab 4.4.10 pyhd8ed1ab_0 conda-forge +jupyterlab_pygments 0.3.0 pyhd8ed1ab_2 conda-forge +jupyterlab_server 2.28.0 pyhcf101f3_0 conda-forge +jupyterlab_widgets 3.0.16 pyhcf101f3_1 conda-forge +jxrlib 1.1 hd590300_3 conda-forge +keyutils 1.6.3 hb9d3cd8_0 conda-forge +kiwisolver 1.4.9 py313hc8edb43_2 conda-forge +krb5 1.21.3 h659f571_0 conda-forge +lame 3.100 h166bdaf_1003 conda-forge +lark 1.3.1 pyhd8ed1ab_0 conda-forge +lazy-loader 0.4 pyhd8ed1ab_2 conda-forge +lcms2 2.17 h717163a_0 conda-forge +ld_impl_linux-64 2.44 h1aa0949_5 conda-forge +lerc 4.0.0 h0aef613_1 conda-forge +level-zero 1.26.0 hb700be7_0 conda-forge +libabseil 20250512.1 cxx17_hba17884_0 conda-forge +libaec 1.1.4 h3f801dc_0 conda-forge +libarrow 22.0.0 h99e40f8_3_cpu conda-forge +libarrow-acero 22.0.0 h635bf11_3_cpu conda-forge +libarrow-compute 22.0.0 h8c2c5c3_3_cpu conda-forge +libarrow-dataset 22.0.0 h635bf11_3_cpu conda-forge +libarrow-substrait 22.0.0 h3f74fd7_3_cpu conda-forge +libasprintf 0.25.1 h3f43e3d_1 conda-forge +libasprintf-devel 0.25.1 h3f43e3d_1 conda-forge +libass 0.17.4 h96ad9f0_0 conda-forge +libavif16 1.3.0 h6395336_2 conda-forge +libblas 3.9.0 38_h4a7cf45_openblas conda-forge +libbrotlicommon 1.2.0 h09219d5_0 conda-forge +libbrotlidec 1.2.0 hd53d788_0 conda-forge +libbrotlienc 1.2.0 h02bd7ab_0 conda-forge +libcap 2.77 h3ff7636_0 conda-forge +libcblas 3.9.0 38_h0358290_openblas conda-forge +libcrc32c 1.1.2 h9c3ff4c_0 conda-forge +libcurl 8.17.0 h4e3cde8_0 conda-forge +libdeflate 1.25 h17f619e_0 conda-forge +libdrm 2.4.125 hb03c661_1 conda-forge +libedit 3.1.20250104 pl5321h7949ede_0 conda-forge +libegl 1.7.0 ha4b6fd6_2 conda-forge +libev 4.33 hd590300_2 conda-forge +libevent 2.1.12 hf998b51_1 conda-forge +libexpat 2.7.1 hecca717_0 conda-forge +libffi 3.5.2 h9ec8514_0 conda-forge +libflac 1.4.3 h59595ed_0 conda-forge +libfreetype 2.14.1 ha770c72_0 conda-forge +libfreetype6 2.14.1 h73754d4_0 conda-forge +libgcc 15.2.0 h767d61c_7 conda-forge +libgcc-ng 15.2.0 h69a702a_7 conda-forge +libgettextpo 0.25.1 h3f43e3d_1 conda-forge +libgettextpo-devel 0.25.1 h3f43e3d_1 conda-forge +libgfortran 15.2.0 h69a702a_7 conda-forge +libgfortran-ng 15.2.0 h69a702a_7 conda-forge +libgfortran5 15.2.0 hcd61629_7 conda-forge +libgl 1.7.0 ha4b6fd6_2 conda-forge +libglib 2.86.1 h32235b2_2 conda-forge +libglvnd 1.7.0 ha4b6fd6_2 conda-forge +libglx 1.7.0 ha4b6fd6_2 conda-forge +libgomp 15.2.0 h767d61c_7 conda-forge +libgoogle-cloud 2.39.0 hdb79228_0 conda-forge +libgoogle-cloud-storage 2.39.0 hdbdcf42_0 conda-forge +libgrpc 1.73.1 h3288cfb_1 conda-forge +libhwloc 2.12.1 default_h7f8ec31_1002 conda-forge +libhwy 1.3.0 h4c17acf_1 conda-forge +libiconv 1.18 h3b78370_2 conda-forge +libjpeg-turbo 3.1.2 hb03c661_0 conda-forge +libjxl 0.11.1 hf08fa70_5 conda-forge +liblapack 3.9.0 38_h47877c9_openblas conda-forge +liblzma 5.8.1 hb9d3cd8_2 conda-forge +libmpdec 4.0.0 hb9d3cd8_0 conda-forge +libnetcdf 4.9.3 nompi_h11f7409_103 conda-forge +libnghttp2 1.67.0 had1ee68_0 conda-forge +libogg 1.3.5 hd0c01bc_1 conda-forge +libopenblas 0.3.30 pthreads_h94d23a6_3 conda-forge +libopentelemetry-cpp 1.21.0 hb9b0907_1 conda-forge +libopentelemetry-cpp-headers 1.21.0 ha770c72_1 conda-forge +libopenvino 2025.2.0 hb617929_1 conda-forge +libopenvino-auto-batch-plugin 2025.2.0 hed573e4_1 conda-forge +libopenvino-auto-plugin 2025.2.0 hed573e4_1 conda-forge +libopenvino-hetero-plugin 2025.2.0 hd41364c_1 conda-forge +libopenvino-intel-cpu-plugin 2025.2.0 hb617929_1 conda-forge +libopenvino-intel-gpu-plugin 2025.2.0 hb617929_1 conda-forge +libopenvino-intel-npu-plugin 2025.2.0 hb617929_1 conda-forge +libopenvino-ir-frontend 2025.2.0 hd41364c_1 conda-forge +libopenvino-onnx-frontend 2025.2.0 h1862bb8_1 conda-forge +libopenvino-paddle-frontend 2025.2.0 h1862bb8_1 conda-forge +libopenvino-pytorch-frontend 2025.2.0 hecca717_1 conda-forge +libopenvino-tensorflow-frontend 2025.2.0 h0767aad_1 conda-forge +libopenvino-tensorflow-lite-frontend 2025.2.0 hecca717_1 conda-forge +libopus 1.5.2 hd0c01bc_0 conda-forge +libparquet 22.0.0 h7376487_3_cpu conda-forge +libpciaccess 0.18 hb9d3cd8_0 conda-forge +libpng 1.6.50 h421ea60_1 conda-forge +libprotobuf 6.31.1 h49aed37_2 conda-forge +libre2-11 2025.11.05 h7b12aa8_0 conda-forge +librsvg 2.60.0 h61e6d4b_0 conda-forge +libsndfile 1.2.2 hc60ed4a_1 conda-forge +libsodium 1.0.20 h4ab18f5_0 conda-forge +libsqlite 3.51.0 hee844dc_0 conda-forge +libssh2 1.11.1 hcf80075_0 conda-forge +libstdcxx 15.2.0 h8f9b012_7 conda-forge +libstdcxx-ng 15.2.0 h4852527_7 conda-forge +libsystemd0 257.10 hd0affe5_2 conda-forge +libthrift 0.22.0 h454ac66_1 conda-forge +libtiff 4.7.1 h9d88235_1 conda-forge +libudev1 257.10 hd0affe5_2 conda-forge +libudunits2 2.2.28 h40f5838_3 conda-forge +libunwind 1.8.3 h65a8314_0 conda-forge +liburing 2.12 hb700be7_0 conda-forge +libusb 1.0.29 h73b1eb8_0 conda-forge +libutf8proc 2.11.0 hb04c3b8_0 conda-forge +libuuid 2.41.2 he9a06e4_0 conda-forge +libva 2.22.0 h4f16b4b_2 conda-forge +libvorbis 1.3.7 h54a6638_2 conda-forge +libvpl 2.15.0 h54a6638_1 conda-forge +libvpx 1.14.1 hac33072_0 conda-forge +libvulkan-loader 1.4.328.1 h5279c79_0 conda-forge +libwebp-base 1.6.0 hd42ef1d_0 conda-forge +libxcb 1.17.0 h8a09558_0 conda-forge +libxcrypt 4.4.36 hd590300_1 conda-forge +libxkbcommon 1.13.0 hca5e8e5_0 conda-forge +libxml2 2.15.1 h26afc86_0 conda-forge +libxml2-16 2.15.1 ha9997c6_0 conda-forge +libxml2-devel 2.15.1 h26afc86_0 conda-forge +libxslt 1.1.43 h711ed8c_1 conda-forge +libzip 1.11.2 h6991a6a_0 conda-forge +libzlib 1.3.1 hb9d3cd8_2 conda-forge +libzopfli 1.0.3 h9c3ff4c_0 conda-forge +locket 1.0.0 pyhd8ed1ab_0 conda-forge +lxml 6.0.2 py313h4a16004_2 conda-forge +lz4 4.4.5 py313h28739b2_0 conda-forge +lz4-c 1.10.0 h5888daf_1 conda-forge +m2r 0.3.1 pyhd8ed1ab_1 conda-forge +mache 1.32.0 pyhd8ed1ab_0 conda-forge +markupsafe 3.0.3 py313h3dea7bd_0 conda-forge +matplotlib-base 3.10.8 py313h683a580_0 conda-forge +matplotlib-inline 0.2.1 pyhd8ed1ab_0 conda-forge +mccabe 0.7.0 pyhd8ed1ab_1 conda-forge +metis 5.1.0 hd0bcaf9_1007 conda-forge +mistune 0.8.4 pyh1a96a4e_1006 conda-forge +moab 5.6.0 nompi_tempest_h3811aa1_0 conda-forge +mock 5.2.0 pyhd8ed1ab_0 conda-forge +mpas_tools 1.3.2 py313h76c60f6_1 conda-forge +mpg123 1.32.9 hc50e24c_0 conda-forge +msgpack-python 1.1.2 py313h7037e92_1 conda-forge +munkres 1.1.4 pyhd8ed1ab_1 conda-forge +narwhals 2.11.0 pyhcf101f3_0 conda-forge +nbclient 0.10.2 pyhd8ed1ab_0 conda-forge +nbconvert-core 6.5.3 pyhd8ed1ab_0 conda-forge +nbformat 5.10.4 pyhd8ed1ab_1 conda-forge +nco 5.3.6 hfb7d223_0 conda-forge +ncurses 6.5 h2d0b736_3 conda-forge +ncview 2.1.8 h4debd67_12 conda-forge +nest-asyncio 1.6.0 pyhd8ed1ab_1 conda-forge +netcdf-fortran 4.6.2 nompi_h90de81b_102 conda-forge +netcdf4 1.7.4 nompi_py313h16051e2_102 conda-forge +networkx 3.5 pyhe01879c_0 conda-forge +nlohmann_json 3.12.0 h54a6638_1 conda-forge +nodeenv 1.9.1 pyhd8ed1ab_1 conda-forge +notebook 7.4.7 pyhd8ed1ab_0 conda-forge +notebook-shim 0.2.4 pyhd8ed1ab_1 conda-forge +numpy 2.4.1 py313hf6604e3_0 conda-forge +ocl-icd 2.3.3 hb9d3cd8_0 conda-forge +opencl-headers 2025.06.13 h5888daf_0 conda-forge +openh264 2.6.0 hc22cd8d_0 conda-forge +openjpeg 2.5.4 h55fea9a_0 conda-forge +openssl 3.6.0 h26f9b46_0 conda-forge +orc 2.2.1 hd747db4_0 conda-forge +otps 2021.10 h4839124_0 e3sm/label/compass +overrides 7.7.0 pyhd8ed1ab_1 conda-forge +packaging 25.0 pyh29332c3_1 conda-forge +pandas 2.3.3 py313h08cd8bf_1 conda-forge +pandocfilters 1.5.0 pyhd8ed1ab_0 conda-forge +pango 1.56.4 hadf4263_0 conda-forge +parso 0.8.5 pyhcf101f3_0 conda-forge +partd 1.4.2 pyhd8ed1ab_0 conda-forge +pcre2 10.46 h1321c63_0 conda-forge +perl 5.32.1 7_hd590300_perl5 conda-forge +pexpect 4.9.0 pyhd8ed1ab_1 conda-forge +pillow 12.0.0 py313h50355cd_0 conda-forge +pip 25.3 pyh145f28c_0 conda-forge +pixman 0.46.4 h54a6638_1 conda-forge +platformdirs 4.5.0 pyhcf101f3_0 conda-forge +pluggy 1.6.0 pyhd8ed1ab_0 conda-forge +popt 1.16 h0b475e3_2002 conda-forge +pre-commit 4.5.1 pyha770c72_0 conda-forge +progressbar2 4.5.0 pyhd8ed1ab_1 conda-forge +proj 9.7.0 hb72c0af_0 conda-forge +prometheus-cpp 1.3.0 ha5d0236_0 conda-forge +prometheus_client 0.23.1 pyhd8ed1ab_0 conda-forge +prompt-toolkit 3.0.52 pyha770c72_0 conda-forge +prompt_toolkit 3.0.52 hd8ed1ab_0 conda-forge +psutil 7.1.3 py313h54dd161_0 conda-forge +pthread-stubs 0.4 hb9d3cd8_1002 conda-forge +ptyprocess 0.7.0 pyhd8ed1ab_1 conda-forge +pugixml 1.15 h3f63f65_0 conda-forge +pulseaudio-client 17.0 h9a8bead_2 conda-forge +pure_eval 0.2.3 pyhd8ed1ab_1 conda-forge +pyamg 5.3.0 py313hfaae9d9_1 conda-forge +pyarrow 22.0.0 py313h78bf25f_0 conda-forge +pyarrow-core 22.0.0 py313he109ebe_0_cpu conda-forge +pycodestyle 2.14.0 pyhd8ed1ab_0 conda-forge +pycparser 2.22 pyh29332c3_1 conda-forge +pyevtk 1.6.0 pyhb0bfe47_1 conda-forge +pyflakes 3.4.0 pyhd8ed1ab_0 conda-forge +pygments 2.19.2 pyhd8ed1ab_0 conda-forge +pyparsing 3.2.5 pyhcf101f3_0 conda-forge +pyproj 3.7.2 py313h77f6078_2 conda-forge +pyremap 2.1.0 pyhd8ed1ab_0 conda-forge +pyshp 3.0.2 pyhd8ed1ab_0 conda-forge +pysocks 1.7.1 pyha55dd90_7 conda-forge +pytest 9.0.2 pyhcf101f3_0 conda-forge +python 3.13.9 hc97d973_101_cp313 conda-forge +python-dateutil 2.9.0.post0 pyhe01879c_2 conda-forge +python-fastjsonschema 2.21.2 pyhe01879c_0 conda-forge +python-gil 3.13.9 h4df99d1_101 conda-forge +python-igraph 1.0.0 py313h7033f15_0 conda-forge +python-json-logger 2.0.7 pyhd8ed1ab_0 conda-forge +python-tzdata 2025.2 pyhd8ed1ab_0 conda-forge +python-utils 3.9.1 pyhff2d567_1 conda-forge +python_abi 3.13 8_cp313 conda-forge +pytz 2025.2 pyhd8ed1ab_0 conda-forge +pywavelets 1.9.0 py313h29aa505_2 conda-forge +pyyaml 6.0.3 py313h3dea7bd_0 conda-forge +pyzmq 27.1.0 py312hfb55c3c_0 conda-forge +qhull 2020.2 h434a139_5 conda-forge +rav1e 0.7.1 h8fae777_3 conda-forge +re2 2025.11.05 h5301d42_0 conda-forge +readline 8.2 h8c095d6_2 conda-forge +referencing 0.37.0 pyhcf101f3_0 conda-forge +requests 2.32.5 pyhcf101f3_1 conda-forge +rfc3339-validator 0.1.4 pyhd8ed1ab_1 conda-forge +rfc3986-validator 0.1.1 pyh9f0ad1d_0 conda-forge +rfc3987-syntax 1.1.0 pyhe01879c_1 conda-forge +roman-numerals-py 3.1.0 pyhd8ed1ab_0 conda-forge +rpds-py 0.28.0 py313h843e2db_2 conda-forge +rsync 3.4.1 h81c0278_2 conda-forge +ruamel.yaml 0.18.17 py313h07c4f96_0 conda-forge +ruamel.yaml.clib 0.2.14 py313h07c4f96_0 conda-forge +s2n 1.6.0 h8399546_1 conda-forge +scikit-image 0.26.0 np2py313hb172dc5_0 conda-forge +scipy 1.17.0 py313h4b8bb8b_1 conda-forge +sdl2 2.32.56 h54a6638_0 conda-forge +sdl3 3.2.26 h68140b3_0 conda-forge +send2trash 1.8.3 pyh0d859eb_1 conda-forge +setuptools 80.9.0 pyhff2d567_0 conda-forge +shaderc 2025.4 h3e344bc_0 conda-forge +shapely 2.1.2 py313had47c43_2 conda-forge +six 1.17.0 pyhe01879c_1 conda-forge +snappy 1.2.2 h03e3b7b_1 conda-forge +sniffio 1.3.1 pyhd8ed1ab_2 conda-forge +snowballstemmer 3.0.1 pyhd8ed1ab_0 conda-forge +sortedcontainers 2.4.0 pyhd8ed1ab_1 conda-forge +soupsieve 2.8 pyhd8ed1ab_0 conda-forge +sphinx 8.2.3 pyhd8ed1ab_0 conda-forge +sphinx_rtd_theme 3.0.2 pyha770c72_0 conda-forge +sphinxcontrib-applehelp 2.0.0 pyhd8ed1ab_1 conda-forge +sphinxcontrib-devhelp 2.0.0 pyhd8ed1ab_1 conda-forge +sphinxcontrib-htmlhelp 2.1.0 pyhd8ed1ab_1 conda-forge +sphinxcontrib-jquery 4.1 pyhd8ed1ab_1 conda-forge +sphinxcontrib-jsmath 1.0.1 pyhd8ed1ab_1 conda-forge +sphinxcontrib-qthelp 2.0.0 pyhd8ed1ab_1 conda-forge +sphinxcontrib-serializinghtml 1.1.10 pyhd8ed1ab_1 conda-forge +spirv-tools 2025.4 hb700be7_0 conda-forge +sqlite 3.51.0 heff268d_0 conda-forge +stack_data 0.6.3 pyhd8ed1ab_1 conda-forge +svt-av1 3.1.2 hecca717_0 conda-forge +tbb 2022.3.0 h8d10470_1 conda-forge +tblib 3.2.2 pyhcf101f3_0 conda-forge +tempest-remap 2.2.0 h397758c_8 conda-forge +termcolor 3.2.0 pyhd8ed1ab_0 conda-forge +terminado 0.18.1 pyh0d859eb_0 conda-forge +texttable 1.7.0 pyhd8ed1ab_1 conda-forge +tifffile 2025.10.16 pyhd8ed1ab_0 conda-forge +tinycss2 1.4.0 pyhd8ed1ab_0 conda-forge +tk 8.6.13 noxft_ha0e22de_103 conda-forge +tomli 2.3.0 pyhcf101f3_0 conda-forge +toolz 1.1.0 pyhd8ed1ab_1 conda-forge +tornado 6.5.2 py313h07c4f96_2 conda-forge +tqdm 4.67.1 pyhd8ed1ab_1 conda-forge +traitlets 5.14.3 pyhd8ed1ab_1 conda-forge +typing-extensions 4.15.0 h396c80c_0 conda-forge +typing_extensions 4.15.0 pyhcf101f3_0 conda-forge +typing_utils 0.1.0 pyhd8ed1ab_1 conda-forge +tzdata 2025b h78e105d_0 conda-forge +udunits2 2.2.28 h40f5838_3 conda-forge +ukkonen 1.0.1 py313h7037e92_6 conda-forge +uri-template 1.3.0 pyhd8ed1ab_1 conda-forge +urllib3 2.5.0 pyhd8ed1ab_0 conda-forge +virtualenv 20.35.4 pyhd8ed1ab_0 conda-forge +wayland 1.24.0 hd6090a7_1 conda-forge +wayland-protocols 1.45 hd8ed1ab_0 conda-forge +wcwidth 0.2.14 pyhd8ed1ab_0 conda-forge +webcolors 25.10.0 pyhd8ed1ab_0 conda-forge +webencodings 0.5.1 pyhd8ed1ab_3 conda-forge +websocket-client 1.9.0 pyhd8ed1ab_0 conda-forge +widgetsnbextension 4.0.15 pyhd8ed1ab_0 conda-forge +x264 1!164.3095 h166bdaf_2 conda-forge +x265 3.5 h924138e_3 conda-forge +xarray 2025.12.0 pyhcf101f3_0 conda-forge +xkeyboard-config 2.46 hb03c661_0 conda-forge +xorg-libice 1.1.2 hb9d3cd8_0 conda-forge +xorg-libsm 1.2.6 he73a12e_0 conda-forge +xorg-libx11 1.8.12 h4f16b4b_0 conda-forge +xorg-libxau 1.0.12 hb03c661_1 conda-forge +xorg-libxaw 1.0.16 hb9d3cd8_0 conda-forge +xorg-libxcursor 1.2.3 hb9d3cd8_0 conda-forge +xorg-libxdmcp 1.1.5 hb03c661_1 conda-forge +xorg-libxext 1.3.6 hb9d3cd8_0 conda-forge +xorg-libxfixes 6.0.2 hb03c661_0 conda-forge +xorg-libxmu 1.2.1 hb9d3cd8_1 conda-forge +xorg-libxpm 3.5.17 hb9d3cd8_1 conda-forge +xorg-libxrandr 1.5.4 hb9d3cd8_0 conda-forge +xorg-libxrender 0.9.12 hb9d3cd8_0 conda-forge +xorg-libxscrnsaver 1.2.4 hb9d3cd8_0 conda-forge +xorg-libxt 1.3.1 hb9d3cd8_0 conda-forge +xxhash 0.8.3 hb47aa4a_0 conda-forge +xyzservices 2025.10.0 pyhd8ed1ab_0 conda-forge +yaml 0.2.5 h280c20c_3 conda-forge +zeromq 4.3.5 h387f397_9 conda-forge +zfp 1.0.1 h909a3a2_3 conda-forge +zict 3.0.0 pyhd8ed1ab_1 conda-forge +zipp 3.23.0 pyhd8ed1ab_0 conda-forge +zlib 1.3.1 hb9d3cd8_2 conda-forge +zlib-ng 2.2.5 hde8ca8f_0 conda-forge +zstandard 0.25.0 py313h54dd161_1 conda-forge +zstd 1.5.7 hb8e6e7a_2 conda-forge + +*********************************************************************** diff --git a/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/results_aggregator.py b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/results_aggregator.py new file mode 100644 index 0000000000..baf6f7b6a5 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/results_aggregator.py @@ -0,0 +1,153 @@ +""" +Aggregate results across multiple ensemble iterations. +""" + +import glob +import json +import os + + +class ResultsAggregator: + """ + Combine results from multiple ensemble iterations (initial + restarts). + """ + + def __init__(self, base_dir): + """ + Initialize aggregator. + + Parameters + ---------- + base_dir : str + Parent directory containing analysis work directories + (where you ran compass setup/run for analysis_ensemble) + """ + self.base_dir = base_dir + + def find_summary_files(self): + """ + Find all analysis_summary.json files. + + Searches for: + - /analysis_ensemble1/analysis_summary.json + - /analysis_ensemble2/analysis_summary.json + etc. + + Returns + ------- + list of str + Paths to summary files, sorted + """ + # Look in subdirectories (analysis work dirs) + summaries = glob.glob( + os.path.join( + self.base_dir, + '*/analysis_summary.json')) + return sorted(summaries) + + def aggregate(self): + """ + Aggregate results from all analysis iterations. + + Returns + ------- + dict + Aggregated results + """ + summaries = self.find_summary_files() + + if not summaries: + print("No summary files found") + return None + + aggregated = { + 'iterations': [], + 'total_completed': 0, + 'total_steady_state': 0, + 'total_data_compatible': 0, + 'total_both_criteria': 0, + 'final_steady_state_runs': [], + 'final_data_compatible_runs': [], + 'final_both_criteria_runs': [], + } + + all_steady = set() + all_compatible = set() + all_both = set() + + for summary_file in summaries: + with open(summary_file, 'r') as f: + summary = json.load(f) + + iteration = { + 'timestamp': summary['timestamp'], + 'ensemble_dir': summary['ensemble_dir'], + 'completed': summary['completed_runs'], + 'steady_state': len(summary['steady_state_runs']), + 'data_compatible': len(summary['data_compatible_runs']), + 'both_criteria': len(summary['both_criteria_runs']), + } + + aggregated['iterations'].append(iteration) + aggregated['total_completed'] += summary['completed_runs'] + aggregated['total_steady_state'] += len( + summary['steady_state_runs']) + aggregated['total_data_compatible'] += len( + summary['data_compatible_runs']) + aggregated['total_both_criteria'] += len( + summary['both_criteria_runs']) + + all_steady.update(summary['steady_state_runs']) + all_compatible.update(summary['data_compatible_runs']) + all_both.update(summary['both_criteria_runs']) + + aggregated['final_steady_state_runs'] = sorted(list(all_steady)) + aggregated['final_data_compatible_runs'] = sorted(list(all_compatible)) + aggregated['final_both_criteria_runs'] = sorted(list(all_both)) + + return aggregated + + def print_summary(self, aggregated): + """Print aggregated summary.""" + print("\n" + "=" * 70) + print("ENSEMBLE AGGREGATED RESULTS") + print("=" * 70) + + for i, it in enumerate(aggregated['iterations'], 1): + print(f"\nIteration {i}:") + print(f" Ensemble: {it['ensemble_dir']}") + print(f" Completed: {it['completed']}") + print(f" Steady-state: {it['steady_state']}") + print(f" Data-compatible: {it['data_compatible']}") + print(f" Both criteria: {it['both_criteria']}") + + print("\nFinal Results (across all iterations):") + print(f" Total completed: {aggregated['total_completed']}") + print( + f"Steady-state runs: {len(aggregated['final_steady_state_runs'])}") + print(f" {aggregated['final_steady_state_runs']}") + print( + f"Data-compatible runs: \ + {len(aggregated['final_data_compatible_runs'])}") + print(f" {aggregated['final_data_compatible_runs']}") + print( + f"Both criteria: {len(aggregated['final_both_criteria_runs'])}") + print(f"{aggregated['final_both_criteria_runs']}") + print("=" * 70 + "\n") + + def save_aggregated(self, aggregated, filename='aggregated_results.json'): + """ + Save aggregated results. + + Parameters + ---------- + aggregated : dict + Aggregated results dictionary + + filename : str + Output filename + """ + filepath = os.path.join(self.base_dir, filename) + with open(filepath, 'w') as f: + json.dump(aggregated, f, indent=2) + print(f"Aggregated results saved to {filepath}") diff --git a/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/test_case.py b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/test_case.py new file mode 100644 index 0000000000..9e2b9c5486 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/test_case.py @@ -0,0 +1,70 @@ +""" +Analysis ensemble test case for SGH template. + +Analyzes a completed ensemble run (spinup or restart) and produces +summary statistics and visualizations. + +Usage: + compass setup -t landice/ensemble_generator/sgh_ensemble_analysis \\ + -w /work/analysis -f analysis_ensemble.cfg + compass run -w /work/analysis +""" + +import os + +from compass.testcase import TestCase + +from .analysis_step import AnalysisStep + + +class AnalysisEnsemble(TestCase): + """ + A test case for analyzing completed ensemble runs. + + This test case: + 1. Reads a completed ensemble directory + 2. Analyzes each run for steady-state and data compatibility + 3. Generates analysis_summary.json with results + """ + + def __init__(self, test_group): + """ + Create the analysis ensemble test case. + + Parameters + ---------- + test_group : compass test group + The test group that this test case belongs to + """ + name = 'sgh_ensemble_analysis' + super().__init__(test_group=test_group, name=name) + + def configure(self): + """ + Configure analysis by reading ensemble directory to analyze. + """ + config = self.config + + try: + ensemble_dir = config.get('analysis_ensemble', + 'ensemble_work_dir') + except Exception: + raise ValueError( + "analysis_ensemble config must specify:\n" + " ensemble_work_dir\n" + "Add to config file:\n" + "[analysis_ensemble]\n" + "ensemble_work_dir = /path/to/ensemble/work/dir\n" + "ensemble_config_file = /path/to/ensemble.cfg" + ) + + if not os.path.exists(ensemble_dir): + raise ValueError( + f"ensemble_work_dir not found: {ensemble_dir}" + ) + + # Add single analysis step + self.add_step(AnalysisStep( + test_case=self, + ensemble_dir=ensemble_dir + )) diff --git a/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/validate_mali_with_spec.py b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/validate_mali_with_spec.py new file mode 100644 index 0000000000..1044ff50bb --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_ensemble_analysis/validate_mali_with_spec.py @@ -0,0 +1,348 @@ +#!/usr/bin/env python +""" +Validate MALI subglacial hydrology simulations using radar specularity content. + +Calculates a validation score for MALI subglacial hydrology simulations +using radar specularity content data from Young 2016. + +Values of 0 represent specularity content below 20%. +Values of 3.3 represent specularity content above 20% and energy 1 microsecond +below the bed 15 dB lower than the bed echo. +Values of 6.7 represent specularity content above 20% and energy 1 microsecond +below the bed 15 dB within the bed echo. +""" + +import json +from argparse import ArgumentParser + +import cmocean +import matplotlib.pyplot as plt +import numpy as np +import tifffile as tiff +import xarray as xr +from scipy.interpolate import griddata +from scipy.stats import binned_statistic_2d + + +class validateWithSpec: + """ + Validator for MALI simulations using specularity content observations. + """ + + def __init__(self): + """Initialize validator and parse command-line arguments.""" + print("Gathering Information ...") + parser = ArgumentParser( + prog='validate_mali_with_spec.py', + description='Calculate validation score for MALI subglacial \ + hydrology simulations using specularity content') + parser.add_argument("--maliFile", dest="maliFile", required=True, + help="MALI output file to validate") + parser.add_argument( + "--specTiff", + dest="specTiff", + required=True, + help='Tiff file containing specularity content (Young 2016)') + parser.add_argument("--compRes", dest="compRes", type=float, + help="Grid resolution for interpolation (meters)", + default=5000.0) + parser.add_argument( + "--ba_threshold", + dest="ba_threshold", + type=float, + help="Balanced accuracy threshold for data compatibility", + default=0.65) + parser.add_argument("--output_json", dest="output_json", + help="JSON file to save validation results", + default="validation_results.json") + parser.add_argument("--plot", dest="plot", action='store_true', + help="Generate comparison plots") + + args = parser.parse_args() + self.options = args + + def interpolate_to_common_grid(self): + """Interpolate MALI and specularity data to common grid.""" + # Open MALI file and read bed roughness parameter + ds_mali = xr.open_dataset( + self.options.maliFile, + decode_times=False, + decode_cf=False) + + # Read Wr from global attribute config_SGH_bed_roughness_max + try: + Wr = float(ds_mali.attrs['config_SGH_bed_roughness_max']) + print(f"Using bed roughness parameter Wr from MALI file: {Wr}") + except (KeyError, ValueError, TypeError): + print( + "Warning: config_SGH_bed_roughness_max \ + not found in MALI file attributes") + print("Using default value Wr = 0.1") + Wr = 0.1 + + self.options.Wr = Wr + + # Establish common grid + res = self.options.compRes + + xCell = ds_mali['xCell'][:].values + yCell = ds_mali['yCell'][:].values + + xmin = np.min(xCell) + xmax = np.max(xCell) + ymin = np.min(yCell) + ymax = np.max(yCell) + + x_edges = np.arange(xmin, xmax + res, res) + y_edges = np.arange(ymin, ymax + res, res) + + x_centers = (x_edges[:-1] + x_edges[1:]) / 2 + y_centers = (y_edges[:-1] + y_edges[1:]) / 2 + + # Remap MALI data + Xgrid, Ygrid = np.meshgrid(x_centers, y_centers) + + W = ds_mali['waterThickness'][-1, :].values + W_remapped = griddata(points=(xCell, yCell), + values=W, + xi=(Xgrid, Ygrid), + method='linear') + + Z = ds_mali['bedTopography'][-1, :].values + Z_remapped = griddata(points=(xCell, yCell), + values=Z, + xi=(Xgrid, Ygrid), + method='linear') + + H = ds_mali['thickness'][-1, :].values + H_remapped = griddata(points=(xCell, yCell), + values=H, + xi=(Xgrid, Ygrid), + method='linear') + + # Open and process specularity TIFF + with tiff.TiffFile(self.options.specTiff) as tif: + page = tif.pages[0] + specData = page.asarray() + scale = page.tags["ModelPixelScaleTag"].value + tiepoint = page.tags["ModelTiepointTag"].value + + pixelWidth = scale[0] + pixelHeight = scale[1] + + i, j, k, x0, y0, z0 = tiepoint + + rows, cols = specData.shape + + x = x0 + np.arange(cols) * pixelWidth + y = y0 - np.arange(rows) * pixelHeight + + specData = specData.astype(float) + specData[specData == 0] = np.nan + + [Xspec, Yspec] = np.meshgrid(x, y) + specData = specData.ravel() + Xspec = Xspec.ravel() + Yspec = Yspec.ravel() + + mask = np.isfinite(specData) + specData = specData[mask] + Xspec = Xspec[mask] + Yspec = Yspec[mask] + + spec_remapped, x_edges_out, y_edges_out, binnum = binned_statistic_2d( + Xspec, Yspec, specData, + statistic='mean', + bins=[x_edges, y_edges] + ) + spec_remapped = spec_remapped.T + + # Filter specularity data + floating = (910 / 1028) * H_remapped + Z_remapped <= 0 + spec_remapped[floating] = np.nan + spec_remapped[H_remapped == 0] = np.nan + + east_AIS = Xgrid >= 0 + + east_valid = east_AIS & np.isfinite( + spec_remapped) & np.isfinite(W_remapped) + west_valid = ~east_AIS & np.isfinite( + spec_remapped) & np.isfinite(W_remapped) + + # Calculate Rwt (relative water thickness) + Rwt_e = W_remapped[east_valid] / Wr + Rwt_w = W_remapped[west_valid] / Wr + + # Define comparison thresholds + Sthresh = 3.33 # Physically-based specularity threshold + Rthresh = np.arange(0.95, 1.0, 0.01) + + Strue_e = spec_remapped[east_valid] >= Sthresh + Sfalse_e = spec_remapped[east_valid] < Sthresh + Strue_w = spec_remapped[west_valid] >= Sthresh + Sfalse_w = spec_remapped[west_valid] < Sthresh + + Strue_e = Strue_e[:, None] + Sfalse_e = Sfalse_e[:, None] + Strue_w = Strue_w[:, None] + Sfalse_w = Sfalse_w[:, None] + + Rtrue_e = Rwt_e[:, None] >= Rthresh + Rfalse_e = ~Rtrue_e + Rtrue_w = Rwt_w[:, None] >= Rthresh + Rfalse_w = ~Rtrue_w + + tp_e = np.sum(Strue_e & Rtrue_e, axis=0) + tn_e = np.sum(Sfalse_e & Rfalse_e, axis=0) + fp_e = np.sum(Sfalse_e & Rtrue_e, axis=0) + fn_e = np.sum(Strue_e & Rfalse_e, axis=0) + + tp_w = np.sum(Strue_w & Rtrue_w, axis=0) + tn_w = np.sum(Sfalse_w & Rfalse_w, axis=0) + fp_w = np.sum(Sfalse_w & Rtrue_w, axis=0) + fn_w = np.sum(Strue_w & Rfalse_w, axis=0) + + true_agree_e = tp_e / (tp_e + fn_e) + false_agree_e = tn_e / (tn_e + fp_e) + + true_agree_w = tp_w / (tp_w + fn_w) + false_agree_w = tn_w / (tn_w + fp_w) + + balanced_score_e = 0.5 * (true_agree_e + false_agree_e) + balanced_score_w = 0.5 * (true_agree_w + false_agree_w) + + self.BA_e = np.max(balanced_score_e) + self.BA_w = np.max(balanced_score_w) + print(f"balanced accuracy east: {self.BA_e:.4f}") + print(f"balanced accuracy west: {self.BA_w:.4f}") + + self.Xgrid = Xgrid + self.Ygrid = Ygrid + self.spec = spec_remapped + self.W = W_remapped + self.H = H_remapped + self.Z = Z_remapped + self.Rthresh = Rthresh + self.ind_Rmax = np.argmax(balanced_score_e + balanced_score_w) + self.Sthresh = Sthresh + self.floating = floating + + def check_data_compatibility(self): + """ + Determine if model results are data-compatible. + + Returns + ------- + bool + True if both east and west BA >= threshold + + dict + Compatibility metrics + """ + threshold = self.options.ba_threshold + + is_compatible = (self.BA_e >= threshold and self.BA_w >= threshold) + + compatibility_metrics = { + 'BA_east': float(self.BA_e), + 'BA_west': float(self.BA_w), + 'threshold': float(threshold), + 'bed_roughness_Wr': float(self.options.Wr), + 'is_compatible': is_compatible, + 'BA_east_passes': bool(self.BA_e >= threshold), + 'BA_west_passes': bool(self.BA_w >= threshold), + } + + return is_compatible, compatibility_metrics + + def plot_comparison_maps(self): + """Generate comparison maps.""" + fig, ax = plt.subplots(figsize=(8, 5)) + H = self.H.copy() + H[self.floating] = np.nan + ax.contourf(self.Xgrid, self.Ygrid, H, levels=[ + 0.1, np.nanmax(self.H)], colors=[[0.9, 0.9, 0.9]]) + + cmap = cmocean.cm.matter + cmap = cmocean.tools.crop_by_percent(cmap, 45, which='max', N=None) + s = np.full(self.Xgrid.shape, np.nan) + s[self.spec >= self.Sthresh] = 1 + s[self.spec < self.Sthresh] = 0 + ax.pcolor(self.Xgrid, self.Ygrid, s, cmap=cmap) + + lev = self.Rthresh[self.ind_Rmax] + ax.contour( + self.Xgrid, + self.Ygrid, + self.W / + self.options.Wr, + levels=[lev], + colors='k', + linewidths=0.75) + + ax.set_xlim(-2e6, 2.6e6) + ax.set_ylim(-2e6, 0) + ax.text(-1.5e6, -1.55e6, + f"balanced accuracy west: \ + {np.round(self.BA_w, 2)}", fontsize=10) + ax.text(-1.5e6, -1.8e6, + f"balanced accuracy east: \ + {np.round(self.BA_e, 2)}", fontsize=10) + ax.set_aspect('equal', adjustable='box') + plt.savefig( + "spec_subglacialHydro_validation.png", + dpi=1000, + bbox_inches="tight") + print("Saved validation plot: spec_subglacialHydro_validation.png") + + def save_results(self, is_compatible, compatibility_metrics): + """Save validation results to JSON.""" + results = { + 'validation_type': 'specularity_content', + 'is_data_compatible': is_compatible, + 'metrics': compatibility_metrics, + 'mali_file': self.options.maliFile, + 'spec_file': self.options.specTiff, + } + + with open(self.options.output_json, 'w') as f: + json.dump(results, f, indent=2) + + print(f"Validation results saved to {self.options.output_json}") + + +def main(): + """Main entry point.""" + validator = validateWithSpec() + + validator.interpolate_to_common_grid() + + is_compatible, metrics = validator.check_data_compatibility() + + print("\n" + "=" * 60) + print("DATA COMPATIBILITY ASSESSMENT") + print("=" * 60) + print(f"Bed roughness (Wr): {metrics['bed_roughness_Wr']:.4f}") + print( + f"East BA: { + metrics['BA_east']:.4f} (threshold: { + metrics['threshold']:.4f}) - { + 'PASS' if metrics['BA_east_passes'] else 'FAIL'}") + print( + f"West BA: { + metrics['BA_west']:.4f} (threshold: { + metrics['threshold']:.4f}) - { + 'PASS' if metrics['BA_west_passes'] else 'FAIL'}") + print( + f"Overall: { + 'DATA COMPATIBLE' if is_compatible else 'NOT DATA COMPATIBLE'}") + print("=" * 60 + "\n") + + if hasattr(validator.options, 'plot') and validator.options.plot: + validator.plot_comparison_maps() + + validator.save_results(is_compatible, metrics) + + +if __name__ == "__main__": + main() diff --git a/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/README.md b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/README.md new file mode 100644 index 0000000000..890e7e8e2c --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/README.md @@ -0,0 +1,357 @@ +i# SGH Ensemble Restart + +Restarts incomplete ensemble members from checkpoints to reach steady state and/or data compatibility. + +## Overview + +This test case continues incomplete ensemble members from their last checkpoint. It: + +1. **Identifies** runs that didn't reach steady state +2. **Verifies** they have sufficient progress (minimum simulation years) +3. **Schedules** continuations with automated job submission +4. **Tracks** restart attempts to prevent infinite loops + +## Quick Start + +After analyzing an ensemble with `sgh_ensemble_analysis`: + +```bash +# 1. Schedule restarts (creates config file) +python3 << 'PYTHON' +from compass.landice.tests.ensemble_generator.ensemble_templates.sgh_ensemble.restart import schedule_restarts + +config, runs = schedule_restarts( + '/work/analysis/analysis_summary.json', + '/work/restart_ens' +) +print(f"Identified {len(runs)} runs to restart") +PYTHON + +# 2. Run restart ensemble +compass setup -t landice/ensemble_generator/sgh_restart_ensemble \ + -w /work/restart \ + -f /work/restart_ens/restart_ensemble.cfg +compass run -w /work/restart + +# 3. Re-analyze to check progress +compass setup -t landice/ensemble_generator/sgh_ensemble_analysis \ + -w /work/analysis2 \ + -f /work/analysis2_config.cfg +compass run -w /work/analysis2 +``` + +## Configuration + +The restart config is **generated automatically** by `schedule_restarts()`, but you can also create one manually. + +### Required Settings + +```ini +[restart_ensemble] +spinup_work_dir = /path/to/original/spinup_ensemble +``` + +This should point to the directory containing `run000/`, `run001/`, etc. from the original ensemble. + +### Tuning Parameters + +```ini +[restart_ensemble] + +# Maximum consecutive restart attempts per run +# Prevents infinite loops if a run keeps failing +max_consecutive_restarts = 3 + +# Minimum simulation years before restart +# Prevents restarting runs that haven't made progress +min_simulation_years_before_restart = 50.0 + +# Whether to auto-restart incomplete runs +# Set to False for manual control (requires config edits) +auto_restart_incomplete = True +``` + +**Tuning `min_simulation_years_before_restart`**: +- Lower (20-30 yrs): More frequent restarts, higher computational cost +- Default (50 yrs): Good balance for typical simulations +- Higher (100+ yrs): Fewer restarts, larger jumps in time + +**Tuning `max_consecutive_restarts`**: +- Lower (2): Stops after 2 attempts, saves resources +- Default (3): 3 attempts = ~150+ years possible +- Higher (4-5): Allows more attempts, more expensive + +## How Restarts Work + +### Restart Files + +Each restart is organized as: + +``` +spinup_ensemble/run003/ +├── output/ +│ ├── globalStats.nc (original output) +│ ├── rst.2050-01-01.nc (original checkpoint) +│ └── history.nc +├── restart_attempt_1/ (first restart) +│ ├── job_script.sh +│ ├── namelist.landice +│ ├── output/ (new output from restart) +│ └── rst.restart.nc +├── restart_attempt_2/ (second restart) +│ ├── job_script.sh +│ ├── output/ +│ └── rst.restart.nc +└── restart_attempt_3/ (third restart) + └── ... +``` + +Each restart: +- Reads the previous checkpoint +- Updates timestamps +- Continues to the original `stop_time` +- Saves output to separate directory + +### Completion Detection + +The restart process checks: +1. Does the run have output? → No restart if missing +2. Has it completed? → No restart if already finished +3. Is it at steady state? → No restart if already satisfied +4. Has it made progress? → No restart if too short +5. Too many attempts? → No restart if max exceeded + +## Workflow: Identify → Schedule → Run → Re-analyze + +### Step 1: Analyze Spinup Ensemble + +```bash +compass setup -t landice/ensemble_generator/sgh_ensemble_analysis \ + -w /work/analysis1 -f spinup_analysis.cfg +compass run -w /work/analysis1 +``` + +Output: `/work/analysis1/analysis_summary.json` with categorized runs + +### Step 2: Identify Restarts + +```bash +python3 << 'PYTHON' +from compass.landice.tests.ensemble_generator.ensemble_templates.sgh_ensemble.restart import schedule_restarts + +config, runs = schedule_restarts( + '/work/analysis1/analysis_summary.json', + '/work/restart_ens', + min_years=50.0, # Don't restart runs shorter than 50 years + max_attempts=3 # Max 3 restart attempts +) + +if runs: + print(f"Will restart {len(runs)} runs: {runs}") +else: + print("No runs to restart!") +PYTHON +``` + +This generates `/work/restart_ens/restart_ensemble.cfg` with: +- `spinup_work_dir` pointing to original ensemble +- List of runs to restart +- All parameters configured + +### Step 3: Set Up Restart Ensemble + +```bash +compass setup -t landice/ensemble_generator/sgh_restart_ensemble \ + -w /work/restart \ + -f /work/restart_ens/restart_ensemble.cfg +``` + +This creates restart steps for each identified run. + +### Step 4: Run Restarts + +```bash +compass run -w /work/restart +``` + +Ensemble manager submits SLURM jobs for all restarts and monitors them. + +### Step 5: Re-analyze Restarts + +```bash +compass setup -t landice/ensemble_generator/sgh_ensemble_analysis \ + -w /work/analysis2 -f restart_analysis.cfg +compass run -w /work/analysis2 +``` + +Where `restart_analysis.cfg` points to: +```ini +[analysis_ensemble] +ensemble_work_dir = /work/restart/sgh_restart_ensemble +config_file = /work/restart_ens/restart_ensemble.cfg +``` + +Check results: +```bash +cat /work/analysis2/analysis_summary.json | python -m json.tool +``` + +### Step 6: Iterate if Needed + +If some runs still need restart: + +```python +# Repeat steps 2-5 to schedule another round of restarts +config, runs = schedule_restarts( + '/work/analysis2/analysis_summary.json', + '/work/restart_ens2' +) +``` + +## Understanding Restart Decisions + +### Why a Run is Restarted + +✅ **Restarted if**: +- Has output but didn't complete +- Made sufficient progress (≥ min_simulation_years) +- Not at steady state yet +- Below max restart attempts + +### Why a Run is NOT Restarted + +❌ **Skipped if**: +- Already completed (`restart_timestamp == stop_time`) +- Already at steady state +- Too short (simulation < min_simulation_years) +- No output files found +- Max restart attempts reached +- No analysis results available + +### Example Output + +``` +Identifying restart candidates... + run000: Restart candidate (85.2 yrs, 0 attempts) + run001: Already completed + run002: Restart candidate (63.5 yrs, 1 attempt) + run003: Too short (42.3 < 50.0 yrs) + run004: Already at steady state + ... + +Runs to restart: 10 +Already at steady-state: 15 +Data compatible: 12 +Both criteria met: 10 +``` + +## Advanced Configuration + +### Manual Restart Selection + +To restart specific runs only, create config manually: + +```ini +[restart_ensemble] +spinup_work_dir = /work/spinup_ensemble +# Restarts will be auto-detected from analysis results +``` + +And edit `/work/restart_ens/restart_ensemble.cfg` before setup if needed. + +### Conservative Restarts + +Require longer simulations before restart: + +```ini +[restart_ensemble] +min_simulation_years_before_restart = 100.0 # Very conservative +max_consecutive_restarts = 2 # Few attempts +``` + +### Aggressive Restarts + +More frequent restarts: + +```ini +[restart_ensemble] +min_simulation_years_before_restart = 30.0 # Frequent restarts +max_consecutive_restarts = 5 # Many attempts +``` + +## Troubleshooting + +### "No runs to restart" + +- All runs are already complete or at steady state +- Check analysis results to confirm +- Run analysis on ensemble to find incomplete runs + +### "Max restart attempts reached" + +- Run has been restarted 3 times (or configured max) +- Check if the run has persistent issues: + ```bash + ls /work/spinup_ensemble/run003/restart_attempt_*/log.landice* + ``` +- May need to adjust parameters or investigate model failures + +### Restart jobs not submitting + +- Check that `spinup_work_dir` exists and has run directories +- Verify `ensemble_manager` step is configured +- Check compass logs for errors + +### Output not being saved + +- Check `/path/to/run/restart_attempt_N/output/` +- Ensure disk space available +- Check SLURM logs for job failures + +## Monitoring Restarts + +Track restart progress: + +```bash +# Check restart attempt directories +for run_dir in /work/spinup_ensemble/run*; do + run_name=$(basename $run_dir) + attempts=$(ls -d $run_dir/restart_attempt_* 2>/dev/null | wc -l) + echo "$run_name: $attempts restart attempts" +done + +# Check job queue +squeue -u $USER | grep uq_run + +# Monitor output +tail -f /work/restart/sgh_restart_ensemble/run*/restart_attempt_*/log.landice.*.log +``` + +## Restart Attempt Statistics + +After completion, analyze restart success: + +```python +import os +import json +from pathlib import Path + +spinup_dir = Path('/work/spinup_ensemble') + +for run_dir in sorted(spinup_dir.glob('run*')): + attempts = len(list(run_dir.glob('restart_attempt_*'))) + + # Check if now at steady state + analysis_file = run_dir / 'analysis_results.json' + if analysis_file.exists(): + with open(analysis_file) as f: + results = json.load(f) + ss = results.get('steady_state', {}).get('is_steady_state', False) + print(f"{run_dir.name}: {attempts} attempts → {'STEADY' if ss else 'NOT STEADY'}") +``` + +## See Also + +- `analysis/`: Analyze runs to identify restarts +- `spinup/`: Initial ensemble setup +- `branch/`: Branch from spinup for projection scenarios diff --git a/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/__init__.py b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/__init__.py new file mode 100644 index 0000000000..6b853b81f2 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/__init__.py @@ -0,0 +1,21 @@ +""" +SGH Ensemble Restart Package + +Provides test case and scheduling for restarting incomplete ensemble members. + +This module identifies runs from a spinup_ensemble that did not complete +or reach steady state, and continues them from their last checkpoint. + +Usage: + compass setup -t landice/ensemble_generator/sgh_restart_ensemble + -w /work/restart -f restart_ensemble.cfg + compass run -w /work/restart +""" + +from .restart_scheduler import RestartScheduler +from .test_case import RestartEnsemble + +__all__ = [ + 'RestartEnsemble', + 'RestartScheduler', +] diff --git a/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/ensemble_generator.cfg b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/ensemble_generator.cfg new file mode 100644 index 0000000000..b75c6735f3 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/ensemble_generator.cfg @@ -0,0 +1,64 @@ +# Restart ensemble configuration template +# +# This is a TEMPLATE. A real restart_ensemble.cfg should be generated +# using restart_scheduler.schedule_restarts() which will fill in the +# specific runs and paths for your ensemble. +# +# To create a restart ensemble after running spinup: +# +# 1. Analyze the completed spinup ensemble: +# from compass.landice.tests.ensemble_generator.ensemble_templates.sgh_ensemble.analysis import analyze_ensemble +# analyze_ensemble('/work/ensemble/spinup_ensemble', '/work/config.cfg') +# +# 2. Schedule restarts: +# from compass.landice.tests.ensemble_generator.ensemble_templates.sgh_ensemble.restart import schedule_restarts +# config_file, runs = schedule_restarts( +# '/work/ensemble/spinup_ensemble/analysis_summary.json', +# '/work/restart_ensemble' +# ) +# +# 3. Set up and run the restart ensemble: +# compass setup -t restart_ensemble -w /work/restart_ensemble -f +# compass run -w /work/restart_ensemble + +[ensemble_generator] +ensemble_template = sgh_ensemble + +[restart_ensemble] + +# REQUIRED: Path to the spinup ensemble work directory +# This should point to the spinup_ensemble directory that contains +# the run000, run001, etc. subdirectories to be restarted +spinup_work_dir = REPLACE_WITH_SPINUP_WORK_DIR + +# Maximum consecutive restart attempts per run +# After this many restarts, the run will not be restarted again +# (prevents infinite loops if a run keeps failing) +max_consecutive_restarts = 3 + +# Minimum simulation length (years) before attempting restart +# Runs that haven't completed this many years of simulation won't be restarted +# (ensures meaningful progress is made before each restart) +min_simulation_years_before_restart = 50.0 + +# Whether to automatically restart incomplete runs +# If False, only explicitly marked runs will be restarted +auto_restart_incomplete = True + +# Analysis parameters (used to determine completion status) +steady_state_window_years = 10.0 +steady_state_imbalance_threshold = 0.05 +balanced_accuracy_threshold = 0.65 + +# Specularity content TIFF file for validation (optional) +# Set to None or comment out if not available +spec_tiff_file = None + +[ensemble] + +# Number of parallel tasks per restart job +ntasks = 128 + +# CFL fraction for adaptive timestepper +# Same consideration as spinup_ensemble +cfl_fraction = 0.7 diff --git a/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/restart_member.py b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/restart_member.py new file mode 100644 index 0000000000..d80f062360 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/restart_member.py @@ -0,0 +1,241 @@ +""" +Step for restarting a single incomplete ensemble member. +""" + +import os +import shutil + +from compass.io import symlink +from compass.job import write_job_script +from compass.model import run_model +from compass.step import Step + + +class RestartMember(Step): + """ + A step for restarting an incomplete ensemble member from checkpoint. + + This step: + 1. Links to the original run's restart files + 2. Updates configuration for restart (timestamps, namelist) + 3. Sets up job script + 4. Runs the restart + + Attributes + ---------- + run_num : int + The run number for this ensemble member + + spinup_work_dir : str + Path to the original spinup ensemble work directory + + restart_attempt : int + Which restart attempt this is (1 = first, 2 = second, etc.) + """ + + def __init__(self, test_case, run_num, spinup_work_dir): + """ + Create a restart step for an ensemble member + + Parameters + ---------- + test_case : compass.TestCase + The test case this step belongs to + + run_num : int + The run number for this ensemble member + + spinup_work_dir : str + Path to the directory containing the original spinup runs + """ + self.run_num = run_num + self.spinup_work_dir = spinup_work_dir + self.name = f'run{run_num:03}_restart' + + super().__init__(test_case=test_case, name=self.name) + + def setup(self): + """ + Set up this restart by: + 1. Identifying the restart attempt number + 2. Copying necessary files from original run + 3. Updating restart configuration + 4. Setting up job script + """ + + print(f'Setting up restart for run number {self.run_num}') + + config = self.config + run_name = f'run{self.run_num:03}' + original_run_dir = os.path.join(self.spinup_work_dir, run_name) + + if not os.path.exists(original_run_dir): + raise RuntimeError( + f"Original run directory not found: {original_run_dir}") + + # Determine restart attempt number + self.restart_attempt = self._get_restart_attempt_number( + original_run_dir) + restart_subdir = os.path.join( + self.work_dir, f'restart_attempt_{ + self.restart_attempt}') + os.makedirs(restart_subdir, exist_ok=True) + + # Read restart timestamp to determine simulation state + restart_timestamp_file = os.path.join( + original_run_dir, 'restart_timestamp') + if not os.path.exists(restart_timestamp_file): + raise RuntimeError(f"No restart_timestamp in {original_run_dir}") + + with open(restart_timestamp_file, 'r') as f: + restart_time = f.read().strip() + + print(f" {run_name}: Restarting from timestamp {restart_time}") + print(f" {run_name}: Restart attempt {self.restart_attempt}") + + # Copy essential configuration files + files_to_copy = [ + 'namelist.landice', + 'streams.landice', + 'albany_input.yaml', + 'run_info.cfg' + ] + + for fname in files_to_copy: + src = os.path.join(original_run_dir, fname) + dst = os.path.join(restart_subdir, fname) + if os.path.exists(src): + shutil.copy(src, dst) + + # Add model as input + self.add_model_as_input() + + # Copy or symlink restart file from original run + self._copy_restart_file(original_run_dir, restart_subdir, restart_time) + + # Copy graph file if it exists + graph_file = os.path.join(original_run_dir, 'graph.info') + if os.path.exists(graph_file): + shutil.copy(graph_file, restart_subdir) + + # Set up job script + self.ntasks = config.getint('ensemble', 'ntasks', fallback=128) + self.min_tasks = self.ntasks + + config.set('job', 'job_name', f'uq_{run_name}_r{self.restart_attempt}') + machine = config.get('deploy', 'machine') + + # Create pre/post run commands + pre_run_cmd = ( + 'LOGDIR=restart_logs_`date +"%Y-%m-%d_%H-%M-%S"`;' + 'mkdir -p $LOGDIR; cp log* $LOGDIR 2>/dev/null || true; ' + 'date' + ) + post_run_cmd = "date" + + write_job_script(config, machine, + target_cores=self.ntasks, min_cores=self.min_tasks, + work_dir=restart_subdir, + pre_run_commands=pre_run_cmd, + post_run_commands=post_run_cmd) + + # Create symlink to load script if available + if 'LOAD_COMPASS_ENV' in os.environ: + script_filename = os.environ['LOAD_COMPASS_ENV'] + symlink(script_filename, os.path.join(restart_subdir, + 'load_compass_env.sh')) + + # Store for run method + self.restart_work_dir = restart_subdir + self.original_run_dir = original_run_dir + + def run(self): + """ + Run this restart of the ensemble member. + """ + print( + f"Running restart for run{ + self.run_num:03} (attempt { + self.restart_attempt})") + run_model(self) + + def _get_restart_attempt_number(self, original_run_dir): + """ + Determine which restart attempt this is. + + Parameters + ---------- + original_run_dir : str + Directory of the original run + + Returns + ------- + int + Restart attempt number (1 for first restart, 2 for second, etc.) + """ + # Count existing restart_attempt_* subdirectories + restart_dirs = [] + if os.path.exists(original_run_dir): + restart_dirs = [d for d in os.listdir(original_run_dir) + if d.startswith('restart_attempt_')] + + return len(restart_dirs) + 1 + + def _copy_restart_file( + self, + original_run_dir, + restart_subdir, + restart_time): + """ + Copy the appropriate restart file to the restart directory. + + Parameters + ---------- + original_run_dir : str + Directory of the original run + + restart_subdir : str + Directory for this restart attempt + + restart_time : str + Time string from restart_timestamp (format: YYYY-MM-DD_HH:MM:SS) + """ + import glob + + # MALI restart files typically named as: rst.YYYY-MM-DD.nc + # Extract just the date part from restart_time + date_part = restart_time.split('_')[0] # YYYY-MM-DD + + # Look for restart file with this date in output directory + output_dir = os.path.join(original_run_dir, 'output') + if os.path.exists(output_dir): + pattern = os.path.join(output_dir, f'rst.{date_part}*.nc') + restart_files = glob.glob(pattern) + + if restart_files: + # Use the most recent (last) restart file + src_file = sorted(restart_files)[-1] + dst_file = os.path.join(restart_subdir, 'rst.restart.nc') + shutil.copy(src_file, dst_file) + print(f" Copied restart file: {os.path.basename(src_file)}") + return + + # Look in run directory directly (older style) + pattern = os.path.join(original_run_dir, f'rst.{date_part}*.nc') + restart_files = glob.glob(pattern) + + if restart_files: + src_file = sorted(restart_files)[-1] + dst_file = os.path.join(restart_subdir, 'rst.restart.nc') + shutil.copy(src_file, dst_file) + print(f" Copied restart file: {os.path.basename(src_file)}") + else: + print( + f" WARNING: No restart file found matching date {date_part}") + print(" Searched patterns:") + print(f" {os.path.join(output_dir, f'rst.{date_part}*.nc')}") + print( + f" { + os.path.join( + original_run_dir, + f'rst.{date_part}*.nc')}") diff --git a/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/restart_scheduler.py b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/restart_scheduler.py new file mode 100644 index 0000000000..9aaecb6628 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/restart_scheduler.py @@ -0,0 +1,274 @@ +""" +Schedule restarts for incomplete ensemble runs. + +This module provides utilities to create restart ensemble configurations +based on analysis results from a completed ensemble. +""" + +import json +import os +from datetime import datetime + + +class RestartScheduler: + """ + Create restart ensemble configuration based on analysis results. + + This class reads an analysis_summary.json from a completed ensemble, + identifies runs needing restart, and generates configuration for a + new restart_ensemble test case. + """ + + def __init__(self, summary_file, new_work_dir): + """ + Initialize scheduler. + + Parameters + ---------- + summary_file : str + Path to analysis_summary.json from completed ensemble + + new_work_dir : str + Directory where restart ensemble will be set up + """ + self.summary_file = summary_file + self.new_work_dir = new_work_dir + + if not os.path.exists(summary_file): + raise FileNotFoundError(f"Summary file not found: {summary_file}") + + with open(summary_file, 'r') as f: + self.summary = json.load(f) + + self.original_ensemble_dir = self.summary['ensemble_dir'] + os.makedirs(new_work_dir, exist_ok=True) + + def identify_restart_candidates( + self, + min_years=50.0, + max_attempts=3, + verbose=True): + """ + Identify runs that should be restarted. + + Parameters + ---------- + min_years : float + Minimum simulation years before restart (default: 50.0) + Runs that haven't reached this threshold won't be restarted + + max_attempts : int + Maximum restart attempts per run (default: 3) + Prevents infinite restart loops + + verbose : bool + Whether to print classification details + + Returns + ------- + list + Sorted list of run numbers to restart + """ + restart_candidates = [] + + for run_num in self.summary['restart_needed_runs']: + results = self.summary['individual_results'].get(run_num, {}) + ss_info = results.get('steady_state', {}) + + final_year = ss_info.get('metrics', {}).get('final_year', 0.0) + + if final_year >= min_years: + # Check if run has too many restart attempts + run_dir = os.path.join( + self.original_ensemble_dir, f'run{ + run_num:03}') + + restart_attempts = 0 + if os.path.exists(run_dir): + restart_dirs = [d for d in os.listdir(run_dir) + if d.startswith('restart_attempt_')] + restart_attempts = len(restart_dirs) + + if restart_attempts < max_attempts: + restart_candidates.append(run_num) + if verbose: + print(f" run{run_num:03}: Restart candidate " + f"({final_year:.1f} yrs, \ + {restart_attempts} attempts)") + else: + if verbose: + print( + f"run{run_num:03}: Max attempts reached \ + ({restart_attempts}/{max_attempts})") + else: + if verbose: + print( + f" run{ + run_num:03}: Too short ({ + final_year:.1f} < { + min_years:.1f} yrs)") + + return sorted(restart_candidates) + + def create_config_file(self, restart_runs, base_config_file=None): + """ + Create restart ensemble configuration file. + + Parameters + ---------- + restart_runs : list + Run numbers to restart + + base_config_file : str, optional + Base configuration file to inherit settings from + + Returns + ------- + str + Path to created restart_ensemble.cfg + """ + # Format the run list nicely + run_list_str = ', '.join(map(str, restart_runs[:10])) + if len(restart_runs) > 10: + run_list_str += f', ... and {len(restart_runs) - 10} more' + + config_content = f"""# Restart ensemble configuration +# Created: {datetime.now().isoformat()} +# +# Original spinup ensemble: {self.original_ensemble_dir} +# Restarted from: {self.summary_file} +# +# Runs to restart ({len(restart_runs)} total): +# {run_list_str} + +[ensemble_generator] +ensemble_template = sgh_ensemble + +[restart_ensemble] + +# Path to the spinup ensemble to restart from +spinup_work_dir = {self.original_ensemble_dir} + +# Restart configuration +# Maximum consecutive restart attempts per run (prevents infinite loops) +max_consecutive_restarts = 3 + +# Minimum simulation length (years) before attempting restart +# Prevents restarting runs that are too short +min_simulation_years_before_restart = 50.0 + +# Whether to automatically restart incomplete runs +# Set to False for manual control +auto_restart_incomplete = True + +# Analysis parameters (same as spinup_ensemble) +steady_state_window_years = 10.0 +steady_state_imbalance_threshold = 0.05 +balanced_accuracy_threshold = 0.65 + +# Specularity content TIFF file for validation (optional) +spec_tiff_file = None + +[ensemble] + +# Job parameters for restart jobs +ntasks = 128 +cfl_fraction = 0.7 +""" + + config_file = os.path.join(self.new_work_dir, 'restart_ensemble.cfg') + with open(config_file, 'w') as f: + f.write(config_content) + + print(f"Config file created: {config_file}") + return config_file + + def print_summary(self, restart_runs): + """ + Print restart scheduling summary. + + Parameters + ---------- + restart_runs : list + Run numbers identified for restart + """ + print("\n" + "=" * 70) + print("RESTART ENSEMBLE PLAN") + print("=" * 70) + print(f"Original ensemble: {self.original_ensemble_dir}") + print(f"Restart work dir: {self.new_work_dir}") + print() + print(f"Runs to restart: {len(restart_runs)}") + if restart_runs: + # Print in groups of 10 + for i in range(0, len(restart_runs), 10): + group = restart_runs[i:i + 10] + print(f" {group}") + print() + print( + f"Already at steady state: \ + {len(self.summary['steady_state_runs'])}") + print(f"Data compatible: {len(self.summary['data_compatible_runs'])}") + print(f"Both criteria met: {len(self.summary['both_criteria_runs'])}") + print("=" * 70 + "\n") + + +def schedule_restarts( + summary_file, + new_work_dir, + min_years=50.0, + max_attempts=3): + """ + Convenience function to schedule restarts from analysis summary. + + Parameters + ---------- + summary_file : str + Path to analysis_summary.json + + new_work_dir : str + Directory where restart ensemble will be created + + min_years : float + Minimum simulation years before restart + + max_attempts : int + Maximum restart attempts per run + + Returns + ------- + tuple + (config_file, restart_runs) or (None, []) if no restarts needed + + Examples + -------- + >>> from compass.landice.tests.ensemble_generator. + ensemble_templates.sgh_ensemble.restart + import schedule_restarts + >>> + >>> config_file, restart_runs = schedule_restarts( + ... '/work/ensemble1/spinup_ensemble/analysis_summary.json', + ... '/work/ensemble2', + ... min_years=50.0, + ... max_attempts=3 + ... ) + >>> + >>> if config_file: + ... print(f"Restart config: {config_file}") + ... print(f"Runs to restart: {restart_runs}") + """ + scheduler = RestartScheduler(summary_file, new_work_dir) + + print("Identifying restart candidates...") + restart_runs = scheduler.identify_restart_candidates( + min_years, max_attempts) + + if not restart_runs: + print("No runs to restart!") + return None, [] + + scheduler.print_summary(restart_runs) + + config_file = scheduler.create_config_file(restart_runs) + + return config_file, restart_runs diff --git a/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/test_case.py b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/test_case.py new file mode 100644 index 0000000000..1bfad8e665 --- /dev/null +++ b/compass/landice/tests/ensemble_generator/sgh_restart_ensemble/test_case.py @@ -0,0 +1,237 @@ +""" +Restart ensemble test case for SGH template. + +This test case identifies incomplete runs from a spinup ensemble and sets up +restart steps for them. Each restart step continues the simulation from the +last checkpoint. + +Usage: + compass setup -t landice/ensemble_generator/sgh_restart_ensemble + -w /work/restart -f restart_ensemble.cfg + compass run -w /work/restart +""" + +import glob +import json +import os + +from compass.landice.tests.ensemble_generator.ensemble_manager import ( + EnsembleManager, +) +from compass.testcase import TestCase + +from .restart_member import RestartMember + + +class RestartEnsemble(TestCase): + """ + A test case for restarting incomplete ensemble members. + + This identifies runs from a spinup_ensemble that did not complete + or reach steady state, and continues them from their last checkpoint. + """ + + def __init__(self, test_group): + """ + Create the restart ensemble test case + + Parameters + ---------- + test_group : compass test group + The test group that this test case belongs to + """ + name = 'sgh_restart_ensemble' + super().__init__(test_group=test_group, name=name) + + # Add the ensemble manager (handles job submission) + self.add_step(EnsembleManager(test_case=self)) + + def configure(self): + """ + Configure restart ensemble by identifying incomplete runs. + + This method: + 1. Reads the spinup ensemble directory + 2. Checks analysis results to identify incomplete runs + 3. Creates RestartMember steps for runs needing continuation + 4. Sets up ensemble_manager to handle job submission + """ + config = self.config + section = config.get('restart_ensemble', {}) + + spinup_work_dir = section.get('spinup_work_dir') + + if not spinup_work_dir: + raise ValueError( + "restart_ensemble config must specify spinup_work_dir\n" + "Add to config file:\n" + "[restart_ensemble]\n" + "spinup_work_dir = /path/to/spinup/ensemble" + ) + + if not os.path.exists(spinup_work_dir): + raise ValueError(f"spinup_work_dir not found: {spinup_work_dir}") + + # Get restart configuration + max_consecutive_restarts = section.getint( + 'max_consecutive_restarts', 3) + min_simulation_years = section.getfloat( + 'min_simulation_years_before_restart', 50.0) + auto_restart = section.getboolean('auto_restart_incomplete', True) + + # Scan for existing run directories + run_dirs = sorted(glob.glob(os.path.join(spinup_work_dir, 'run*'))) + + restart_runs = [] + skipped_runs = [] + + for run_dir in run_dirs: + run_name = os.path.basename(run_dir) + try: + run_num = int(run_name.replace('run', '')) + except ValueError: + continue + + # Check if run should be restarted + should_restart, reason = self._should_restart_run( + run_dir=run_dir, + run_num=run_num, + min_years=min_simulation_years, + max_restarts=max_consecutive_restarts, + auto_restart=auto_restart + ) + + if should_restart: + restart_runs.append(run_num) + print(f"Scheduling restart for {run_name}") + + # Add restart member step + self.add_step(RestartMember( + test_case=self, + run_num=run_num, + spinup_work_dir=spinup_work_dir + )) + else: + if reason: + skipped_runs.append((run_num, reason)) + + if skipped_runs: + print("\nSkipped runs:") + for run_num, reason in skipped_runs: + print(f" run{run_num:03}: {reason}") + + self.restart_run_numbers = restart_runs + + # Only run ensemble_manager; it submits individual restart jobs + self.steps_to_run = ['ensemble_manager'] + + def _should_restart_run( + self, + run_dir, + run_num, + min_years, + max_restarts, + auto_restart): + """ + Determine if a run should be restarted. + + Parameters + ---------- + run_dir : str + Directory of the original run + + run_num : int + Run number + + min_years : float + Minimum simulation years required before restart + + max_restarts : int + Maximum number of restart attempts allowed + + auto_restart : bool + Whether to automatically restart incomplete runs + + Returns + ------- + tuple + (should_restart, reason_if_skipped) + """ + + # Check if run has output + output_file = os.path.join(run_dir, 'output', 'globalStats.nc') + if not os.path.exists(output_file): + return False, "No output file" + + # Check if run completed (reached stop time) + restart_timestamp_file = os.path.join(run_dir, 'restart_timestamp') + namelist_file = os.path.join(run_dir, 'namelist.landice') + + if not os.path.exists(restart_timestamp_file): + return False, "No restart_timestamp (run may have failed)" + + try: + with open(restart_timestamp_file, 'r') as f: + current_time = f.read().strip() + + import compass.namelist + namelist = compass.namelist.ingest(namelist_file) + stop_time = \ + namelist['time_management']['config_stop_time'].strip( + ).strip("'") + + if current_time == stop_time: + return False, "Already completed" + + except Exception as e: + return False, f"Error reading completion status: {e}" + + # Check analysis results + analysis_file = os.path.join(run_dir, 'analysis_results.json') + + if os.path.exists(analysis_file): + try: + with open(analysis_file, 'r') as f: + results = json.load(f) + + # If at steady state, don't restart + ss_info = results.get('steady_state', {}) + if ss_info.get('is_steady_state', False): + return False, "Already at steady state" + + # Check simulation length + metrics = ss_info.get('metrics', {}) + sim_length = metrics.get('final_year', 0.0) + + if sim_length < min_years: + return False, f"Too short ({ + sim_length:.1f} < { + min_years:.1f} yrs)" + + except (json.JSONDecodeError, IOError): + # If analysis file is malformed, still allow restart + pass + else: + # No analysis file - if we can't verify it reached min years, don't + # restart + return False, "No analysis results to verify progress" + + # Check number of restart attempts + restart_attempts = 0 + if os.path.exists(run_dir): + restart_dirs = [d for d in os.listdir(run_dir) + if d.startswith('restart_attempt_')] + restart_attempts = len(restart_dirs) + + if restart_attempts >= max_restarts: + return False, f"Max restart attempts reached \ + ({restart_attempts}/{max_restarts})" + + # If all checks pass and auto_restart is enabled + if not auto_restart: + return False, "Auto-restart disabled" + + return True, None + + # no run() method is needed + # no validate() method is needed diff --git a/compass/landice/tests/ensemble_generator/spinup_ensemble/__init__.py b/compass/landice/tests/ensemble_generator/spinup_ensemble/__init__.py index 1b6aae8a80..7f4403e7f8 100644 --- a/compass/landice/tests/ensemble_generator/spinup_ensemble/__init__.py +++ b/compass/landice/tests/ensemble_generator/spinup_ensemble/__init__.py @@ -10,8 +10,11 @@ from compass.landice.tests.ensemble_generator.ensemble_member import ( EnsembleMember, ) +from compass.landice.tests.ensemble_generator.ensemble_template import ( + add_template_file, + get_spinup_template_package, +) from compass.testcase import TestCase -from compass.validate import compare_variables class SpinupEnsemble(TestCase): @@ -59,132 +62,54 @@ def configure(self): configure phase, we must explicitly add the steps to steps_to_run. """ - # Define some constants - rhoi = 910.0 - rhosw = 1028.0 - cp_seawater = 3.974e3 - latent_heat_ice = 335.0e3 - sec_in_yr = 3600.0 * 24.0 * 365.0 - c_melt = (rhosw * cp_seawater / (rhoi * latent_heat_ice))**2 + config = self.config + resource_module = get_spinup_template_package(config) + add_template_file(config, resource_module, 'ensemble_generator.cfg') - section = self.config['ensemble'] + section = config['ensemble_generator'] + spinup_section_name = 'spinup_ensemble' + if not config.has_section(spinup_section_name): + raise ValueError( + f"Missing required config section '{spinup_section_name}'.") + spinup_section = config[spinup_section_name] + parameter_section_name = 'ensemble.parameters' + if not config.has_section(parameter_section_name): + raise ValueError( + f"Missing required config section '{parameter_section_name}'.") + param_section = config[parameter_section_name] # Determine start and end run numbers being requested self.start_run = section.getint('start_run') self.end_run = section.getint('end_run') - # Define parameters being sampled and their ranges - param_list = ['fric_exp', 'mu_scale', 'stiff_scale', - 'von_mises_threshold', 'calv_limit', 'gamma0', - 'meltflux'] - - # Determine how many and which parameters are being used - n_params = 0 - param_dict = {} - for param in param_list: - param_dict[param] = {} - param_dict[param]['active'] = section.getboolean(f'use_{param}') - n_params += param_dict[param]['active'] + parameter_specs = _get_parameter_specs(param_section) + + # Determine how many parameters are being sampled. + n_params = len(parameter_specs) if n_params == 0: sys.exit("ERROR: At least one parameter must be specified.") - # Generate unit parameter vectors - either uniform or Sobol - sampling_method = section.get('sampling_method') max_samples = section.getint('max_samples') if max_samples < self.end_run: sys.exit("ERROR: max_samples is exceeded by end_run") - if sampling_method == 'sobol': - # Generate unit Sobol sequence for number of parameters being used - print(f"Generating Sobol sequence for {n_params} parameter(s)") - sampler = qmc.Sobol(d=n_params, scramble=True, seed=4) - param_unit_values = sampler.random(n=max_samples) - elif sampling_method == 'uniform': - print(f"Generating uniform sampling for {n_params} parameter(s)") - samples = np.linspace(0.0, 1.0, max_samples).reshape(-1, 1) - param_unit_values = np.tile(samples, (1, n_params)) - else: - sys.exit("ERROR: Unsupported sampling method specified.") - - # Define parameter vectors for each param being used - idx = 0 - for param in param_list: - if param_dict[param]['active']: - print('Including parameter ' + param) - min_val = section.getfloat(f'{param}_min') - max_val = section.getfloat(f'{param}_max') - param_dict[param]['vec'] = param_unit_values[:, idx] * \ - (max_val - min_val) + min_val - idx += 1 - else: - param_dict[param]['vec'] = np.full((max_samples,), None) - - # Deal with a few special cases - - # change units on calving speed limit from m/yr to s/yr - if param_dict['calv_limit']['active']: - param_dict['calv_limit']['vec'] = \ - param_dict['calv_limit']['vec'][:] / sec_in_yr - - # melt flux needs to be converted to deltaT - if param_dict['meltflux']['active']: - # First calculate mean TF for this domain - iceshelf_area_obs = section.getfloat('iceshelf_area_obs') - input_file_path = section.get('input_file_path') - TF_file_path = section.get('TF_file_path') - mean_TF, iceshelf_area = calc_mean_TF(input_file_path, - TF_file_path) - - # Adjust observed melt flux for ice-shelf area in init. condition - print(f'IS area: model={iceshelf_area}, Obs={iceshelf_area_obs}') - area_correction = iceshelf_area / iceshelf_area_obs - print(f"Ice-shelf area correction is {area_correction}.") - if (np.absolute(area_correction - 1.0) > 0.2): - print("WARNING: ice-shelf area correction is larger than " - "20%. Check data consistency before proceeding.") - param_dict['meltflux']['vec'] *= iceshelf_area / iceshelf_area_obs - - # Set up an array of TF values to use for linear interpolation - # Make it span a large enough range to capture deltaT what would - # be needed for the range of gamma0 values considered. - # Not possible to know a priori, so pick a wide range. - TFs = np.linspace(-5.0, 10.0, num=int(15.0 / 0.01)) - deltaT_vec = np.zeros(max_samples) - # For each run, calculate the deltaT needed to obtain the target - # melt flux - for ii in range(self.start_run, self.end_run + 1): - # spatially averaged version of ISMIP6 melt param.: - meltfluxes = (param_dict['gamma0']['vec'][ii] * c_melt * TFs * - np.absolute(TFs) * - iceshelf_area) * rhoi / 1.0e12 # Gt/yr - # interpolate deltaT value. Use nan values outside of range - # so out of range results get detected - deltaT_vec[ii] = np.interp(param_dict['meltflux']['vec'][ii], - meltfluxes, TFs, - left=np.nan, - right=np.nan) - mean_TF - if np.isnan(deltaT_vec[ii]): - sys.exit("ERROR: interpolated deltaT out of range. " - "Adjust definition of 'TFs'") - else: - deltaT_vec = [None] * max_samples - - # add runs as steps based on the run range requested - if self.end_run > max_samples: - sys.exit("Error: end_run specified in config exceeds maximum " - "sample size available in param_vector_filename") - for run_num in range(self.start_run, self.end_run + 1): - self.add_step(EnsembleMember( - test_case=self, run_num=run_num, - basal_fric_exp=param_dict['fric_exp']['vec'][run_num], - mu_scale=param_dict['mu_scale']['vec'][run_num], - stiff_scale=param_dict['stiff_scale']['vec'][run_num], - von_mises_threshold=param_dict['von_mises_threshold']['vec'][run_num], # noqa - calv_spd_lim=param_dict['calv_limit']['vec'][run_num], - gamma0=param_dict['gamma0']['vec'][run_num], - meltflux=param_dict['meltflux']['vec'][run_num], - deltaT=deltaT_vec[run_num])) - # Note: do not add to steps_to_run, because ensemble_manager - # will handle submitting and running the runs + sampling_method = section.get('sampling_method') + parameter_specs = _populate_parameter_vectors( + parameter_specs=parameter_specs, + sampling_method=sampling_method, + max_samples=max_samples) + + spec_by_name = {spec['name']: spec for spec in parameter_specs} + + deltaT_vec = _compute_delta_t_vec( + config=config, spinup_section=spinup_section, + spec_by_name=spec_by_name, + max_samples=max_samples, start_run=self.start_run, + end_run=self.end_run) + + _add_member_steps( + test_case=self, parameter_specs=parameter_specs, + spec_by_name=spec_by_name, deltaT_vec=deltaT_vec, + resource_module=resource_module, max_samples=max_samples) # Have 'compass run' only run the run_manager but not any actual runs. # This is because the individual runs will be submitted as jobs @@ -194,3 +119,278 @@ def configure(self): # no run() method is needed # no validate() method is needed + + +def _get_parameter_specs(section): + """Build parameter specification dictionaries from config options. + + Parameters with an ``nl.`` prefix are treated as namelist parameters and + include one or more target namelist option names. Other parameters are + interpreted as supported special parameters (for example ``gamma0``). + + Returns + ------- + list of dict + Ordered parameter metadata with sampled bounds and placeholders for + populated sample vectors. + """ + specs = [] + special_params = {'fric_exp', 'mu_scale', 'stiff_scale', + 'gamma0', 'meltflux'} + + for option_name, raw_value in section.items(): + if option_name.endswith('.option_name'): + continue + parameter_name = option_name + bounds = _parse_range(raw_value, parameter_name) + + if parameter_name.startswith('nl.'): + option_key = f'{parameter_name}.option_name' + if option_key not in section: + raise ValueError( + f"Namelist parameter '{parameter_name}' must define " + f"'{option_key}'.") + namelist_options = _split_entries(section[option_key]) + if len(namelist_options) == 0: + raise ValueError( + f"Namelist parameter '{parameter_name}' has no " + "option names configured.") + specs.append({ + 'name': parameter_name, + 'type': 'namelist', + 'run_info_name': parameter_name[len('nl.'):], + 'option_names': namelist_options, + 'min': bounds[0], + 'max': bounds[1], + 'vec': None + }) + else: + if parameter_name not in special_params: + raise ValueError( + f"Unsupported special parameter '{parameter_name}'.") + specs.append({ + 'name': parameter_name, + 'type': 'special', + 'min': bounds[0], + 'max': bounds[1], + 'vec': None + }) + + return specs + + +def _populate_parameter_vectors(parameter_specs, sampling_method, + max_samples): + """Generate and scale samples to each parameter range. + + This function updates each ``spec['vec']`` in ``parameter_specs`` and + returns the same list for explicit readability at call site. + ``sobol`` creates a space-filling sequence in unit space, + ``uniform`` creates linearly spaced samples, and ``log-uniform`` samples + linearly in log10 space (requiring strictly positive bounds). + + Returns + ------- + list of dict + The same ``parameter_specs`` list with each ``spec['vec']`` populated. + """ + n_params = len(parameter_specs) + if sampling_method == 'sobol': + print(f"Generating Sobol sequence for {n_params} parameter(s)") + sampler = qmc.Sobol(d=n_params, scramble=True, seed=4) + param_unit_values = sampler.random(n=max_samples) + elif sampling_method in {'uniform', 'log-uniform'}: + print(f"Generating {sampling_method} sampling for " + f"{n_params} parameter(s)") + samples = np.linspace(0.0, 1.0, max_samples).reshape(-1, 1) + param_unit_values = np.tile(samples, (1, n_params)) + else: + sys.exit("ERROR: Unsupported sampling method specified.") + + if sampling_method == 'log-uniform': + for spec in parameter_specs: + if spec['min'] <= 0.0 or spec['max'] <= 0.0: + sys.exit( + "ERROR: log-uniform sampling requires positive min/max " + f"for parameter '{spec['name']}'.") + + for idx, spec in enumerate(parameter_specs): + print('Including parameter ' + spec['name']) + if sampling_method == 'log-uniform': + log_min = np.log10(spec['min']) + log_max = np.log10(spec['max']) + spec['vec'] = 10.0 ** (param_unit_values[:, idx] * + (log_max - log_min) + log_min) + else: + spec['vec'] = param_unit_values[:, idx] * \ + (spec['max'] - spec['min']) + spec['min'] + return parameter_specs + + +def _compute_delta_t_vec(config, spinup_section, spec_by_name, max_samples, + start_run, end_run): + """Compute per-run ``deltaT`` values when ``meltflux`` is active. + + If ``meltflux`` is not sampled, this returns a list of ``None`` values. + When active, the function applies ice-shelf area correction to sampled + melt flux and interpolates the ``deltaT`` needed to match each target + melt flux over the requested run range. + + Returns + ------- + list or numpy.ndarray + ``[None] * max_samples`` when ``meltflux`` is inactive, otherwise a + ``numpy.ndarray`` containing per-run ``deltaT`` values. + """ + if 'meltflux' not in spec_by_name: + return [None] * max_samples + + if 'gamma0' not in spec_by_name: + sys.exit("ERROR: parameter 'meltflux' requires 'gamma0'.") + if not config.has_option('spinup_ensemble', 'iceshelf_area_obs'): + sys.exit( + "ERROR: parameter 'meltflux' requires " + "'iceshelf_area_obs' in [spinup_ensemble].") + + iceshelf_area_obs = spinup_section.getfloat('iceshelf_area_obs') + input_file_path = spinup_section.get('input_file_path') + TF_file_path = spinup_section.get('TF_file_path') + mean_TF, iceshelf_area = calc_mean_TF(input_file_path, TF_file_path) + + print(f'IS area: model={iceshelf_area}, Obs={iceshelf_area_obs}') + area_correction = iceshelf_area / iceshelf_area_obs + print(f"Ice-shelf area correction is {area_correction}.") + if np.absolute(area_correction - 1.0) > 0.2: + print("WARNING: ice-shelf area correction is larger than " + "20%. Check data consistency before proceeding.") + + spec_by_name['meltflux']['vec'] *= area_correction + + rhoi = 910.0 + rhosw = 1028.0 + cp_seawater = 3.974e3 + latent_heat_ice = 335.0e3 + c_melt = (rhosw * cp_seawater / (rhoi * latent_heat_ice))**2 + TFs = np.linspace(-5.0, 10.0, num=int(15.0 / 0.01)) + deltaT_vec = np.zeros(max_samples) + for ii in range(start_run, end_run + 1): + meltfluxes = (spec_by_name['gamma0']['vec'][ii] * c_melt * + TFs * np.absolute(TFs) * iceshelf_area) * \ + rhoi / 1.0e12 # Gt/yr + deltaT_vec[ii] = np.interp( + spec_by_name['meltflux']['vec'][ii], meltfluxes, TFs, + left=np.nan, right=np.nan) - mean_TF + if np.isnan(deltaT_vec[ii]): + sys.exit("ERROR: interpolated deltaT out of range. " + "Adjust definition of 'TFs'") + + return deltaT_vec + + +def _build_namelist_values(parameter_specs, run_num): + """For parameter specs of type 'namelist', + collect namelist option values for a given run number + and save them in a dictionary keyed by namelist option name. + These will be applied when the runs are set up. + + Returns + ------- + tuple of dict + ``(namelist_option_values, namelist_parameter_values)`` for the + requested ``run_num``. + """ + namelist_option_values = {} + namelist_parameter_values = {} + + for spec in parameter_specs: + if spec['type'] != 'namelist': + continue + value = spec['vec'][run_num] + for namelist_option in spec['option_names']: + namelist_option_values[namelist_option] = value + namelist_parameter_values[spec['run_info_name']] = value + + return namelist_option_values, namelist_parameter_values + + +def _add_member_steps(test_case, parameter_specs, spec_by_name, deltaT_vec, + resource_module, max_samples): + """Create and register ``EnsembleMember`` steps for requested runs. + + This helper assembles namelist and special-parameter values for each run + and adds one member step per run to ``test_case``. + """ + if test_case.end_run > max_samples: + sys.exit("Error: end_run specified in config exceeds maximum " + "sample size available in param_vector_filename") + + for run_num in range(test_case.start_run, test_case.end_run + 1): + namelist_option_values, namelist_parameter_values = \ + _build_namelist_values(parameter_specs, run_num) + + fric_exp = _get_special_value(spec_by_name, 'fric_exp', run_num) + mu_scale = _get_special_value(spec_by_name, 'mu_scale', run_num) + stiff_scale = _get_special_value(spec_by_name, 'stiff_scale', + run_num) + gamma0 = _get_special_value(spec_by_name, 'gamma0', run_num) + meltflux = _get_special_value(spec_by_name, 'meltflux', run_num) + + test_case.add_step(EnsembleMember( + test_case=test_case, run_num=run_num, + basal_fric_exp=fric_exp, + mu_scale=mu_scale, + stiff_scale=stiff_scale, + gamma0=gamma0, + meltflux=meltflux, + deltaT=deltaT_vec[run_num], + namelist_option_values=namelist_option_values, + namelist_parameter_values=namelist_parameter_values, + resource_module=resource_module)) + # Note: do not add to steps_to_run, because ensemble_manager + # will handle submitting and running the runs + + +def _split_entries(raw): + """Split comma- or whitespace-delimited config lists. + + Backslash-newline sequences used for line continuation are stripped so + that multi-line values are treated as a single logical line. Remaining + backslashes are also removed to avoid spurious option tokens. + + Returns + ------- + list of str + Non-empty parsed entries. + """ + cleaned = raw.replace('\\\r\n', ' ').replace('\\\n', ' ') + cleaned = cleaned.replace('\\', ' ') + return [entry for entry in cleaned.replace(',', ' ').split() if entry] + + +def _parse_range(raw, parameter_name): + """Parse parameter min,max bounds from a comma-delimited value. + + Returns + ------- + tuple of float + ``(min_value, max_value)`` parsed from ``raw``. + """ + values = [entry.strip() for entry in raw.split(',') if entry.strip()] + if len(values) != 2: + raise ValueError( + f"Parameter '{parameter_name}' must contain exactly " + "two comma-separated values.") + return float(values[0]), float(values[1]) + + +def _get_special_value(spec_by_name, name, run_num): + """Get sampled value for a special parameter or ``None`` if inactive. + + Returns + ------- + float or None + Sampled value for ``name`` at ``run_num`` when present. + """ + if name not in spec_by_name: + return None + return spec_by_name[name]['vec'][run_num] diff --git a/docs/developers_guide/landice/api.rst b/docs/developers_guide/landice/api.rst index ae9736f688..5a11405506 100644 --- a/docs/developers_guide/landice/api.rst +++ b/docs/developers_guide/landice/api.rst @@ -192,6 +192,11 @@ ensemble_generator ensemble_member.EnsembleMember.setup ensemble_member.EnsembleMember.run + ensemble_template.get_ensemble_template_name + ensemble_template.get_spinup_template_package + ensemble_template.get_branch_template_package + ensemble_template.add_template_file + spinup_ensemble.SpinupEnsemble spinup_ensemble.SpinupEnsemble.configure diff --git a/docs/developers_guide/landice/test_groups/ensemble_generator.rst b/docs/developers_guide/landice/test_groups/ensemble_generator.rst index 9f62d40fc5..309d7f80e5 100644 --- a/docs/developers_guide/landice/test_groups/ensemble_generator.rst +++ b/docs/developers_guide/landice/test_groups/ensemble_generator.rst @@ -18,6 +18,17 @@ framework The shared config options for the ``ensemble_generator`` test group are described in :ref:`landice_ensemble_generator` in the User's Guide. +Model-specific inputs for this test group now live under: + +.. code-block:: none + + compass.landice.tests.ensemble_generator.ensemble_templates. + +with ``spinup`` and ``branch`` subpackages that each contain their own cfg, +namelist, and streams resources (plus ``albany_input.yaml`` for spinup). +The selected template name comes from +``[ensemble_generator] ensemble_template``. + ensemble_member ~~~~~~~~~~~~~~~ The class :py:class:`compass.landice.tests.ensemble_generator.EnsembleMember` @@ -104,11 +115,28 @@ phase. Also, by waiting until configure to define the ensemble members, it is possible to have the start and end run numbers set in the config, because the config is not parsed by the constructor. -The ``configure`` method is where most of the work happens. Here, the start -and end run numbers are read from the config, a parameter array is generated, -and the parameters to be varied and over what range are defined. +The ``configure`` method is where most of the work happens. +There is no default configuration for this test case, so the user must +provide a cfg file with the necessary options. This will typically be the +cfg located in the desired template directory or a user-modified copy of it. +With the cfg provided, the individual ensemble members will be set up. +Spinup run-control options (for example, ``start_run``, ``end_run``, +``sampling_method``, ``max_samples``, ``cfl_fraction``, and ``ntasks``) +are read from ``[ensemble_generator]``, while spinup resource paths and +related values (for example ``input_file_path`` and ``iceshelf_area_obs``) +are read from ``[spinup_ensemble]``. +Supported sampling methods are ``sobol``, ``uniform``, and ``log-uniform``. The values for each parameter are passed to the ``EnsembleMember`` constructor to define each run. + +Parameter definitions now come from ``[ensemble.parameters]`` where each +parameter uses `` = min, max`` and ordering follows the order in +that section. Parameters with names prefixed by ``nl.`` are interpreted as +generic float-valued namelist perturbations and must define +``.option_name`` with one or more namelist options. Parameters without +the ``nl.`` prefix are reserved for special perturbations that use custom +logic (currently ``fric_exp``, ``mu_scale``, ``stiff_scale``, ``gamma0``, +and ``meltflux``). Finally, each run is now added to the test case as a step to run, because they were not automatically added by compass during the test case constructor phase. @@ -134,13 +162,17 @@ The constructor adds the ensemble_manager as a step, as with the spinup_ensemble The ``configure`` method searches over the range of runs requested and assesses if the corresponding spinup_ensemble member reached the requested branch time. -If so, and if the branch_ensemble memebr directory does not already exist, that +If so, and if the branch_ensemble member directory does not already exist, that run is added as a step. Within each run (step), the restart file from the branch year is copied to the branch run directory. The time stamp is reassigned to 2015 (this could be made a cfg option in the future). Also copied over are -the namelist and albany_input.yamlm files. The namelist is updated with -settings specific to the branch ensemble, and a streams file specific to the -branch run is added. Finally, details for managing runs are set up, including -a job script. +the namelist and, when present (for Albany-based configurations), the +``albany_input.yaml`` file. The namelist is updated with settings specific to +the branch ensemble, and a streams file specific to the branch run is added. +Finally, details for managing runs are set up, including a job script. + +As in spinup, the branch configure method first loads +``ensemble_templates//branch/branch_ensemble.cfg`` based on +``[ensemble_generator] ensemble_template``. As in the spinup_ensemble, the ``run`` step just runs the model. diff --git a/docs/users_guide/landice/test_groups/ensemble_generator.rst b/docs/users_guide/landice/test_groups/ensemble_generator.rst index d8f77e4a4c..0304e6d3e7 100644 --- a/docs/users_guide/landice/test_groups/ensemble_generator.rst +++ b/docs/users_guide/landice/test_groups/ensemble_generator.rst @@ -6,7 +6,8 @@ ensemble_generator The ``landice/ensemble_generator`` test group creates ensembles of MALI simulations with different parameter values. The ensemble framework sets up a user-defined number of simulations with parameter values selected -from either uniform sampling or a space-filling Sobol sequence. +from uniform sampling, log-uniform sampling, or a space-filling Sobol +sequence. A test case in this test group consists of a number of ensemble members, and one ensemble manager. @@ -23,28 +24,55 @@ look as expected before spending time on a larger ensemble. This also allows one to add more ensemble members from the Sobol sequence later if UQ analysis indicates the original sample size was insufficient. -A number of possible parameters are supported and whether they are active and -what parameter value ranges should be used are specified in a user-supplied -config file. Currently these parameters are supported: +Parameter types +--------------- + +Parameters are defined in ``[ensemble.parameters]`` and fall into two +categories: + +* ``special`` parameters: parameters without the ``nl.`` prefix that use + custom setup logic beyond namelist replacement + +* ``namelist`` parameters: parameters prefixed with ``nl.`` that map directly + to one or more float namelist options through ``.option_name``. + Note that only float namelist options are currently supported, but the framework + does not validate that the options defined in the config file are actually float + namelist options. Typically, ``.option_name`` will indicate a single + namelist option, but it can indicate multiple options if the same parameter + should be applied to multiple namelist options (e.g., for grounded and + floating von Mises threshold stresses). -* basal friction power law exponent +The currently supported special parameters are: -* scaling factor on muFriction +* ``fric_exp``: basal friction power-law exponent (requires modifying + ``muFriction`` and ``albany_input.yaml``) -* scaling factor on stiffnessFactor +* ``mu_scale``: multiplicative scale factor for ``muFriction`` in the + modified input file -* von Mises threshold stress for calving +* ``stiff_scale``: multiplicative scale factor for ``stiffnessFactor`` in the + modified input file -* calving rate speed limit +* ``gamma0``: ISMIP6-AIS basal-melt sensitivity coefficient -* gamma0 melt sensitivity parameter in ISMIP6-AIS ice-shelf basal melting - parameterization +* ``meltflux``: target ice-shelf basal melt flux, converted to ``deltaT`` + using ``gamma0`` and domain-mean thermal forcing -* target ice-shelf basal melt rate for ISMIP6-AIS ice-shelf basal melting - parameterization. In the model setup, the deltaT thermal forcing bias - adjustment is adjusted to obtain the target melt rate for a given gamma0 +Test cases +---------- -Additional parameters can be easily added in the future. +The test group includes two test cases: + +* ``spinup_ensemble``: a set of simulations from the same initial condition + but with different parameter values. This could either be fixed climate + relaxation spinup or forced by time-evolving historical conditions. + +* ``branch_ensemble``: a set of simulations branched from each member of the + spinup_ensemble in a specified year with a different forcing. Multiple + branch ensembles can be branched from one spinup_ensemble + +Test case operations +-------------------- ``compass setup`` will set up the simulations and the ensemble manager. ``compass run`` from the test case work directory will submit each run as a @@ -72,57 +100,85 @@ Future improvements may include: * safety checks or warnings before submitting ensembles that will use large amounts of computing resources -* a method for maintaining namelist, streams, and albany_input.yaml files for - different ensembles. Currently, these input files are specific to the Amery - Ice Shelf ensemble run in 2023. +Ensemble templates +------------------ + +This test group uses a template-based configuration workflow. +Instead of maintaining one set of test-group resource files, each model +configuration lives in its own subdirectory under +``ensemble_templates/`` with separate spinup and branch +cfg/namelist/streams resources. Users typically select a template via the +``[ensemble_generator] ensemble_template`` option or create a new template. +The user may also provide custom overrides in a user cfg file. +A new ensemble template should be added for each new study by creating +a new subdirectory under ``ensemble_templates/`` with the same structure as +the default template and following a naming convention like: +````, e.g., ``amery4km.probproj.2024`` or +``ais4km.hydro.2026``. + +The selected template controls which config files and model resource files are +used for the spinup and branch cases. The package layout is: + +.. code-block:: none + + compass/landice/tests/ensemble_generator/ensemble_templates// + spinup/ + ensemble_generator.cfg + namelist.landice + streams.landice + albany_input.yaml + branch/ + branch_ensemble.cfg + namelist.landice + streams.landice -The test group includes two test cases: +config options +-------------- -* ``spinup_ensemble``: a set of simulations from the same initial condition - but with different parameter values. This could either be fixed climate - relaxation spinup or forced by time-evolving historical conditions. +The shared config option for this test group is: -* ``branch_ensemble``: a set of simulations branched from each member of the - spinup_ensemble in a specified year with a different forcing. Multiple - branch ensembles can be branched from one spinup_ensemble +.. code-block:: cfg -config options --------------- -Test cases in this test group have the following common config options. + [ensemble_generator] -This test group is intended for expert users, and it is expected that it -will typically be run with a customized cfg file. Note the default run -numbers create a small ensemble, but uncertainty quantification applications -will typically need dozens or more simulations. + # name of the ensemble template to use + # resources are loaded from: + # compass.landice.tests.ensemble_generator.ensemble_templates. + ensemble_template = default -The test-case-specific config options are: +The template-specific spinup config options (from +``ensemble_templates//spinup/ensemble_generator.cfg``) are: .. code-block:: cfg - [ensemble] + [ensemble_generator] # start and end numbers for runs to set up and run # Run numbers should be zero-based. # Additional runs can be added and run to an existing ensemble # without affecting existing runs, but trying to set up a run # that already exists will generate a warning and skip that run. - # If using uniform sampling, start_run should be 0 and end_run should be - # equal to (max_samples - 1), otherwise unexpected behavior may result. + # If using uniform or log-uniform sampling, start_run should be 0 and + # end_run should be equal to (max_samples - 1), otherwise unexpected + # behavior may result. # These values do not affect viz/analysis, which will include any # runs it finds. start_run = 0 end_run = 3 - # sampling_method can be either 'sobol' for a space-filling Sobol sequence - # or 'uniform' for uniform sampling. Uniform sampling is most appropriate - # for a single parameter sensitivity study. It will sample uniformly across - # all dimensions simultaneously, thus sampling only a small fraction of - # parameter space + # sampling_method can be 'sobol' for a space-filling Sobol sequence, + # 'uniform' for linear sampling, or 'log-uniform' for logarithmic + # sampling between min and max parameter bounds. + # Uniform and log-uniform are most appropriate for a single-parameter + # sensitivity study because they sample each active parameter using the + # same rank ordering, thus sampling only a small fraction of parameter + # space in higher dimensions. sampling_method = sobol # maximum number of samples to be considered. # max_samples needs to be greater or equal to (end_run + 1) - # When using uniform sampling, max_samples should equal (end_run + 1). + # When using uniform or log-uniform sampling, max_samples should equal + # (end_run + 1). # When using Sobol sequence, max_samples ought to be a power of 2. # max_samples should not be changed after the first set of ensemble. # So, when using Sobol sequence, max_samples might be set larger than @@ -153,90 +209,63 @@ The test-case-specific config options are: # to inform the choice for a large production ensemble. cfl_fraction = 0.7 - # Path to the initial condition input file. - # Eventually this could be hard-coded to use files on the input data - # server, but initially we want flexibility to experiment with different - # inputs and forcings - input_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/Amery.nc + # number of tasks that each ensemble member should be run with + # Eventually, compass could determine this, but we want explicit control for now + ntasks = 128 - # the value of the friction exponent used for the calculation of muFriction - # in the input file - orig_fric_exp = 0.2 + [spinup_ensemble] - # Path to ISMIP6 ice-shelf basal melt parameter input file. - basal_melt_param_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/basal_melt/parameterizations/Amery_4to20km_basin_and_coeff_gamma0_DeltaT_quadratic_non_local_median_allBasin2.nc + # Path to the initial condition input file. + # Eventually this could be hard-coded to use files on the input data + # server, but initially we want flexibility to experiment with different + # inputs and forcings + input_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/Amery.nc - # Path to thermal forcing file for the mesh to be used - TF_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/ocean_thermal_forcing/obs/Amery_4to20km_obs_TF_1995-2017_8km_x_60m.nc + # the value of the friction exponent used for the calculation of muFriction + # in the input file + orig_fric_exp = 0.2 - # Path to SMB forcing file for the mesh to be used - SMB_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/atmosphere_forcing/RACMO_climatology_1995-2017/Amery_4to20km_RACMO2.3p2_ANT27_smb_climatology_1995-2017_no_xtime_noBareLandAdvance.nc + # Path to ISMIP6 ice-shelf basal melt parameter input file. + basal_melt_param_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/basal_melt/parameterizations/Amery_4to20km_basin_and_coeff_gamma0_DeltaT_quadratic_non_local_median_allBasin2.nc - # number of tasks that each ensemble member should be run with - # Eventually, compass could determine this, but we want explicit control for now - ntasks = 128 + # Path to thermal forcing file for the mesh to be used + TF_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/ocean_thermal_forcing/obs/Amery_4to20km_obs_TF_1995-2017_8km_x_60m.nc + + # Path to SMB forcing file for the mesh to be used + SMB_file_path = /global/cfs/cdirs/fanssie/MALI_projects/Amery_UQ/Amery_4to20km_from_whole_AIS/forcing/atmosphere_forcing/RACMO_climatology_1995-2017/Amery_4to20km_RACMO2.3p2_ANT27_smb_climatology_1995-2017_no_xtime_noBareLandAdvance.nc + + # For meltflux perturbations, this observed ice-shelf area is used when + # converting target melt flux to deltaT. + iceshelf_area_obs = 60654.e6 + +The parameter sampling definitions live in a separate section, +``[ensemble.parameters]``. The order listed sets the sampling +dimension ordering, special parameters are unprefixed, and namelist +parameters use the ``nl.`` prefix with a companion ``.option_name``. + +For ``log-uniform`` sampling, each parameter bound must be strictly +positive because sampling is performed in log space. + +.. code-block:: cfg + + [ensemble.parameters] + + # special parameters (handled by custom code) + fric_exp = 0.1, 0.33333 + mu_scale = 0.8, 1.2 + stiff_scale = 0.8, 1.2 + gamma0 = 9620.0, 471000.0 + meltflux = 12.0, 58.0 + + # namelist float parameters (generic handling) + nl.von_mises_threshold = 80.0e3, 180.0e3 + nl.von_mises_threshold.option_name = + config_grounded_von_Mises_threshold_stress, + config_floating_von_Mises_threshold_stress + + nl.calv_spd_limit = 0.0001585, 0.001585 + nl.calv_spd_limit.option_name = config_calving_speed_limit - # whether basal friction exponent is being varied - # [unitless] - use_fric_exp = True - # min value to vary over - fric_exp_min = 0.1 - # max value to vary over - fric_exp_max = 0.33333 - - # whether a scaling factor on muFriction is being varied - # [unitless: 1.0=no scaling] - use_mu_scale = True - # min value to vary over - mu_scale_min = 0.8 - # max value to vary over - mu_scale_max = 1.2 - - # whether a scaling factor on stiffnessFactor is being varied - # [unitless: 1.0=no scaling] - use_stiff_scale = True - # min value to vary over - stiff_scale_min = 0.8 - # max value to vary over - stiff_scale_max = 1.2 - - # whether the von Mises threshold stress (sigma_max) is being varied - # [units: Pa] - use_von_mises_threshold = True - # min value to vary over - von_mises_threshold_min = 80.0e3 - # max value to vary over - von_mises_threshold_max = 180.0e3 - - # whether the calving speed limit is being varied - # [units: km/yr] - use_calv_limit = False - # min value to vary over - calv_limit_min = 5.0 - # max value to vary over - calv_limit_max = 50.0 - - # whether ocean melt parameterization coefficient is being varied - # [units: m/yr] - use_gamma0 = True - # min value to vary over - gamma0_min = 9620.0 - # max value to vary over - gamma0_max = 471000.0 - - # whether target ice-shelf basal melt flux is being varied - # [units: Gt/yr] - use_meltflux = True - # min value to vary over - meltflux_min = 12. - # max value to vary over - meltflux_max = 58. - # ice-shelf area associated with target melt rates - # [units: m^2] - iceshelf_area_obs = 60654.e6 - -A user should copy the default config file to a user-defined config file -before setting up the test case and any necessary adjustments made. Importantly, the user-defined config should be modified to also include the following options that will be used for submitting the jobs for each ensemble member. @@ -263,25 +292,12 @@ spinup_ensemble ``landice/ensemble_generator/spinup_ensemble`` uses the ensemble framework to create an ensemble of simulations integrated over a specified time range. The test case -can be applied to any domain and set of input files. If the default namelist -and streams settings are not appropriate, they can be adjusted or a new test -case can be set up mirroring the existing one. - -The default model configuration uses: +can be applied to any domain and set of input files using the ensemble templates +discussed above. -* first-order velocity solver - -* power law basal friction - -* evolving temperature - -* von Mises calving - -* ISMIP6 surface mass balance and sub-ice-shelf melting using climatological - mean forcing - -The initial condition and forcing files are specified in the -``ensemble_generator.cfg`` file or a user modification of it. +The initial condition and forcing files are specified in the selected +template file +``compass/landice/tests/ensemble_generator/ensemble_templates//spinup/ensemble_generator.cfg`` branch_ensemble --------------- @@ -291,11 +307,17 @@ an ensemble of simulations that are branched from corresponding runs of the ``spinup_ensemble`` at a specified year with a different forcing. In general, any namelist or streams modifications can be applied to the branch runs. -The branch_ensemble test-case-specific config options are: +The branch_ensemble config options are read from the selected template file +``compass/landice/tests/ensemble_generator/ensemble_templates//branch/branch_ensemble.cfg``. +The default template options are: .. code-block:: cfg - # config options for setting up an ensemble + # selector for ensemble template resources + [ensemble_generator] + + # subdirectory within ensemble_templates/ where branch_ensemble options are located + ensemble_template = default # config options for branching an ensemble [branch_ensemble] @@ -325,8 +347,8 @@ The branch_ensemble test-case-specific config options are: # path to pickle file containing filtering information generated by plot_ensemble.py ensemble_pickle_file = None -Steps for setting up and running an ensmble -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Steps for setting up and running an ensemble +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ 1. With a compass conda environment set up, run, e.g., ``compass setup -t landice/ensemble_generator/spinup_ensemble -w WORK_DIR_PATH -f USER.cfg`` @@ -334,9 +356,9 @@ Steps for setting up and running an ensmble ensemble (typically a scratch drive) and ``USER.cfg`` is the user-defined config described in the previous section that includes options for ``[parallel]`` and ``[job]``, as well as any required - modifications to the ``[ensemble]`` section. Likely, most or all - attributes in the ``[ensemble]`` section need to be customized for a - given application. + modifications to the ``[ensemble_generator]`` and ``[spinup_ensemble]`` + sections. Likely, most or all attributes in these sections need to be + customized for a given application. 2. After ``compass setup`` completes and all runs are set up, go to the ``WORK_DIR_PATH`` and change to the