@@ -837,38 +837,38 @@ def get_trials_data_frame(self) -> pd.DataFrame:
837837 return self .experiment .to_df ()
838838
839839 def get_max_parallelism (self ) -> list [tuple [int , int ]]:
840- """Retrieves maximum number of trials that can be scheduled in parallel
840+ """Retrieves maximum number of trials that can be scheduled concurrently
841841 at different stages of optimization.
842842
843843 Some optimization algorithms profit significantly from sequential
844844 optimization (i.e. suggest a few points, get updated with data for them,
845845 repeat, see https://ax.dev/docs/bayesopt.html).
846- Parallelism setting indicates how many trials should be running simulteneously
846+ Concurrency setting indicates how many trials should be running simultaneously
847847 (generated, but not yet completed with data).
848848
849849 The output of this method is mapping of form
850- {num_trials -> max_parallelism_setting }, where the max_parallelism_setting
851- is used for num_trials trials. If max_parallelism_setting is -1, as
852- many of the trials can be ran in parallel , as necessary. If num_trials
853- in a tuple is -1, then the corresponding max_parallelism_setting
850+ {num_trials -> max_concurrency_setting }, where the max_concurrency_setting
851+ is used for num_trials trials. If max_concurrency_setting is -1, as
852+ many of the trials can be ran concurrently , as necessary. If num_trials
853+ in a tuple is -1, then the corresponding max_concurrency_setting
854854 should be used for all subsequent trials.
855855
856856 For example, if the returned list is [(5, -1), (12, 6), (-1, 3)],
857- the schedule could be: run 5 trials with any parallelism , run 6 trials in
858- parallel twice, run 3 trials in parallel for as long as needed. Here,
857+ the schedule could be: run 5 trials with any concurrency , run 6 trials
858+ concurrently twice, run 3 trials concurrently for as long as needed. Here,
859859 'running' a trial means obtaining a next trial from `AxClient` through
860860 get_next_trials and completing it with data when available.
861861
862862 Returns:
863- Mapping of form {num_trials -> max_parallelism_setting }.
863+ Mapping of form {num_trials -> max_concurrency_setting }.
864864 """
865- parallelism_settings = []
865+ concurrency_settings = []
866866 for node in self .generation_strategy ._nodes :
867- # Extract max_parallelism from MaxGenerationParallelism criterion
868- max_parallelism = None
867+ # Extract max_concurrency from MaxGenerationParallelism criterion
868+ max_concurrency = None
869869 for tc in node .transition_criteria :
870870 if isinstance (tc , MaxGenerationParallelism ):
871- max_parallelism = tc .threshold
871+ max_concurrency = tc .threshold
872872 break
873873 # Try to get num_trials from the node. If there's no MinTrials
874874 # criterion (unlimited trials), num_trials will raise UserInputError.
@@ -877,13 +877,13 @@ def get_max_parallelism(self) -> list[tuple[int, int]]:
877877 num_trials = node .num_trials
878878 except UserInputError :
879879 num_trials = - 1
880- parallelism_settings .append (
880+ concurrency_settings .append (
881881 (
882882 num_trials ,
883- max_parallelism if max_parallelism is not None else num_trials ,
883+ max_concurrency if max_concurrency is not None else num_trials ,
884884 )
885885 )
886- return parallelism_settings
886+ return concurrency_settings
887887
888888 def get_optimization_trace (
889889 self , objective_optimum : float | None = None
0 commit comments