diff --git a/.github/workflows/python-app.yaml b/.github/workflows/python-app.yaml index f4dbc28c5..66ceceab4 100644 --- a/.github/workflows/python-app.yaml +++ b/.github/workflows/python-app.yaml @@ -24,6 +24,8 @@ concurrency: env: PYTHON_VERSION: "3.11" + MPLBACKEND: Agg # Non-interactive matplotlib backend for CI/testing + PLOTLY_RENDERER: json # Headless plotly renderer for CI/testing jobs: lint: diff --git a/CHANGELOG.md b/CHANGELOG.md index 8d81c9a0b..befccf890 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -51,13 +51,23 @@ If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOp ## [Unreleased] - ????-??-?? -**Summary**: +**Summary**: Enhanced solver configuration with new CONFIG.Solving section for centralized solver parameter management. If upgrading from v2.x, see the [v3.0.0 release notes](https://github.com/flixOpt/flixOpt/releases/tag/v3.0.0) and [Migration Guide](https://flixopt.github.io/flixopt/latest/user-guide/migration-guide-v3/). ### ✨ Added +**Solver configuration:** +- **New `CONFIG.Solving` configuration section** for centralized solver parameter management: + - `mip_gap`: Default MIP gap tolerance for solver convergence (default: 0.01) + - `time_limit_seconds`: Default time limit in seconds for solver runs (default: 300) + - `log_to_console`: Whether solver should output to console (default: True) + - `log_main_results`: Whether to log main results after solving (default: True) +- Solvers (`HighsSolver`, `GurobiSolver`) now use `CONFIG.Solving` defaults for parameters, allowing global configuration +- Solver parameters can still be explicitly overridden when creating solver instances + ### 💥 Breaking Changes +- Individual solver output is now hidden in **SegmentedCalculation**. To return to the prior behaviour, set `show_individual_solves=True` in `do_modeling_and_solve()`. ### ♻️ Changed diff --git a/examples/00_Minmal/minimal_example.py b/examples/00_Minmal/minimal_example.py index 6a0ed3831..92e6801b2 100644 --- a/examples/00_Minmal/minimal_example.py +++ b/examples/00_Minmal/minimal_example.py @@ -9,8 +9,7 @@ import flixopt as fx if __name__ == '__main__': - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.silent() flow_system = fx.FlowSystem(pd.date_range('2020-01-01', periods=3, freq='h')) flow_system.add_elements( diff --git a/examples/01_Simple/simple_example.py b/examples/01_Simple/simple_example.py index 6b62d6712..fd5a3d9b7 100644 --- a/examples/01_Simple/simple_example.py +++ b/examples/01_Simple/simple_example.py @@ -8,9 +8,8 @@ import flixopt as fx if __name__ == '__main__': - # Enable console logging - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.exploring() + # --- Create Time Series Data --- # Heat demand profile (e.g., kW) over time and corresponding power prices heat_demand_per_h = np.array([30, 0, 90, 110, 110, 20, 20, 20, 20]) @@ -101,7 +100,7 @@ flow_system.add_elements(costs, CO2, boiler, storage, chp, heat_sink, gas_source, power_sink) # Visualize the flow system for validation purposes - flow_system.plot_network(show=True) + flow_system.plot_network() # --- Define and Run Calculation --- # Create a calculation object to model the Flow System diff --git a/examples/02_Complex/complex_example.py b/examples/02_Complex/complex_example.py index 805cb08f6..b8ef76a03 100644 --- a/examples/02_Complex/complex_example.py +++ b/examples/02_Complex/complex_example.py @@ -9,9 +9,8 @@ import flixopt as fx if __name__ == '__main__': - # Enable console logging - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.exploring() + # --- Experiment Options --- # Configure options for testing various parameters and behaviors check_penalty = False diff --git a/examples/02_Complex/complex_example_results.py b/examples/02_Complex/complex_example_results.py index 96d06dd04..96191c4d8 100644 --- a/examples/02_Complex/complex_example_results.py +++ b/examples/02_Complex/complex_example_results.py @@ -5,9 +5,8 @@ import flixopt as fx if __name__ == '__main__': - # Enable console logging - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.exploring() + # --- Load Results --- try: results = fx.results.CalculationResults.from_file('results', 'complex example') @@ -19,7 +18,7 @@ ) from e # --- Basic overview --- - results.plot_network(show=True) + results.plot_network() results['Fernwärme'].plot_node_balance() # --- Detailed Plots --- diff --git a/examples/03_Calculation_types/example_calculation_types.py b/examples/03_Calculation_types/example_calculation_types.py index c5df50034..e339c1c24 100644 --- a/examples/03_Calculation_types/example_calculation_types.py +++ b/examples/03_Calculation_types/example_calculation_types.py @@ -11,9 +11,8 @@ import flixopt as fx if __name__ == '__main__': - # Enable console logging - fx.CONFIG.Logging.console = True - fx.CONFIG.apply() + fx.CONFIG.exploring() + # Calculation Types full, segmented, aggregated = True, True, True @@ -165,7 +164,7 @@ a_kwk, a_speicher, ) - flow_system.plot_network(controls=False, show=True) + flow_system.plot_network() # Calculations calculations: list[fx.FullCalculation | fx.AggregatedCalculation | fx.SegmentedCalculation] = [] diff --git a/examples/04_Scenarios/scenario_example.py b/examples/04_Scenarios/scenario_example.py index d258d4142..bf4f24617 100644 --- a/examples/04_Scenarios/scenario_example.py +++ b/examples/04_Scenarios/scenario_example.py @@ -8,6 +8,8 @@ import flixopt as fx if __name__ == '__main__': + fx.CONFIG.exploring() + # Create datetime array starting from '2020-01-01' for one week timesteps = pd.date_range('2020-01-01', periods=24 * 7, freq='h') scenarios = pd.Index(['Base Case', 'High Demand']) @@ -186,7 +188,7 @@ flow_system.add_elements(costs, CO2, boiler, storage, chp, heat_sink, gas_source, power_sink) # Visualize the flow system for validation purposes - flow_system.plot_network(show=True) + flow_system.plot_network() # --- Define and Run Calculation --- # Create a calculation object to model the Flow System @@ -215,7 +217,6 @@ # Convert the results for the storage component to a dataframe and display df = calculation.results['Storage'].node_balance_with_charge_state() - print(df) # Save results to file for later usage calculation.results.to_file() diff --git a/examples/05_Two-stage-optimization/two_stage_optimization.py b/examples/05_Two-stage-optimization/two_stage_optimization.py index dde3ae069..9647e803c 100644 --- a/examples/05_Two-stage-optimization/two_stage_optimization.py +++ b/examples/05_Two-stage-optimization/two_stage_optimization.py @@ -19,6 +19,8 @@ logger = logging.getLogger('flixopt') if __name__ == '__main__': + fx.CONFIG.exploring() + # Data Import data_import = pd.read_csv( pathlib.Path(__file__).parent.parent / 'resources' / 'Zeitreihen2020.csv', index_col=0 diff --git a/flixopt/calculation.py b/flixopt/calculation.py index 5e919dbf5..feb077dcf 100644 --- a/flixopt/calculation.py +++ b/flixopt/calculation.py @@ -13,6 +13,7 @@ import logging import math import pathlib +import sys import timeit import warnings from collections import Counter @@ -20,6 +21,7 @@ import numpy as np import yaml +from tqdm import tqdm from . import io as fx_io from .aggregation import Aggregation, AggregationModel, AggregationParameters @@ -225,7 +227,7 @@ def fix_sizes(self, ds: xr.Dataset, decimal_rounding: int | None = 5) -> FullCal return self def solve( - self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool = True + self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool | None = None ) -> FullCalculation: t_start = timeit.default_timer() @@ -235,6 +237,8 @@ def solve( **solver.options, ) self.durations['solving'] = round(timeit.default_timer() - t_start, 2) + logger.info(f'Model solved with {solver.name} in {self.durations["solving"]:.2f} seconds.') + logger.info(f'Model status after solve: {self.model.status}') if self.model.status == 'warning': # Save the model and the flow_system to file in case of infeasibility @@ -248,7 +252,8 @@ def solve( ) # Log the formatted output - if log_main_results: + should_log = log_main_results if log_main_results is not None else CONFIG.Solving.log_main_results + if should_log: logger.info( f'{" Main Results ":#^80}\n' + yaml.dump( @@ -366,7 +371,7 @@ def _perform_aggregation(self): ) self.aggregation.cluster() - self.aggregation.plot(show=True, save=self.folder / 'aggregation.html') + self.aggregation.plot(show=CONFIG.Plotting.default_show, save=self.folder / 'aggregation.html') if self.aggregation_parameters.aggregate_data_and_fix_non_binary_vars: ds = self.flow_system.to_dataset() for name, series in self.aggregation.aggregated_data.items(): @@ -567,48 +572,111 @@ def _create_sub_calculations(self): f'({timesteps_of_segment[0]} -> {timesteps_of_segment[-1]}):' ) + def _solve_single_segment( + self, + i: int, + calculation: FullCalculation, + solver: _Solver, + log_file: pathlib.Path | None, + log_main_results: bool, + suppress_output: bool, + ) -> None: + """Solve a single segment calculation.""" + if i > 0 and self.nr_of_previous_values > 0: + self._transfer_start_values(i) + + calculation.do_modeling() + + # Warn about Investments, but only in first run + if i == 0: + invest_elements = [ + model.label_full + for component in calculation.flow_system.components.values() + for model in component.submodel.all_submodels + if isinstance(model, InvestmentModel) + ] + if invest_elements: + logger.critical( + f'Investments are not supported in Segmented Calculation! ' + f'Following InvestmentModels were found: {invest_elements}' + ) + + log_path = pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log' + + if suppress_output: + with fx_io.suppress_output(): + calculation.solve(solver, log_file=log_path, log_main_results=log_main_results) + else: + calculation.solve(solver, log_file=log_path, log_main_results=log_main_results) + def do_modeling_and_solve( - self, solver: _Solver, log_file: pathlib.Path | None = None, log_main_results: bool = False + self, + solver: _Solver, + log_file: pathlib.Path | None = None, + log_main_results: bool = False, + show_individual_solves: bool = False, ) -> SegmentedCalculation: + """Model and solve all segments of the segmented calculation. + + This method creates sub-calculations for each time segment, then iteratively + models and solves each segment. It supports two output modes: a progress bar + for compact output, or detailed individual solve information. + + Args: + solver: The solver instance to use for optimization (e.g., Gurobi, HiGHS). + log_file: Optional path to the solver log file. If None, defaults to + folder/name.log. + log_main_results: Whether to log main results (objective, effects, etc.) + after each segment solve. Defaults to False. + show_individual_solves: If True, shows detailed output for each segment + solve with logger messages. If False (default), shows a compact progress + bar with suppressed solver output for cleaner display. + + Returns: + Self, for method chaining. + + Note: + The method automatically transfers all start values between segments to ensure + continuity of storage states and flow rates across segment boundaries. + """ logger.info(f'{"":#^80}') logger.info(f'{" Segmented Solving ":#^80}') self._create_sub_calculations() - for i, calculation in enumerate(self.sub_calculations): - logger.info( - f'{self.segment_names[i]} [{i + 1:>2}/{len(self.segment_names):<2}] ' - f'({calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]}):' + if show_individual_solves: + # Path 1: Show individual solves with detailed output + for i, calculation in enumerate(self.sub_calculations): + logger.info( + f'Solving segment {i + 1}/{len(self.sub_calculations)}: ' + f'{calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]}' + ) + self._solve_single_segment(i, calculation, solver, log_file, log_main_results, suppress_output=False) + else: + # Path 2: Show only progress bar with suppressed output + progress_bar = tqdm( + enumerate(self.sub_calculations), + total=len(self.sub_calculations), + desc='Solving segments', + unit='segment', + file=sys.stdout, + disable=not CONFIG.Solving.log_to_console, ) - if i > 0 and self.nr_of_previous_values > 0: - self._transfer_start_values(i) - - calculation.do_modeling() - - # Warn about Investments, but only in fist run - if i == 0: - invest_elements = [ - model.label_full - for component in calculation.flow_system.components.values() - for model in component.submodel.all_submodels - if isinstance(model, InvestmentModel) - ] - if invest_elements: - logger.critical( - f'Investments are not supported in Segmented Calculation! ' - f'Following InvestmentModels were found: {invest_elements}' + try: + for i, calculation in progress_bar: + progress_bar.set_description( + f'Solving ({calculation.flow_system.timesteps[0]} -> {calculation.flow_system.timesteps[-1]})' ) - - calculation.solve( - solver, - log_file=pathlib.Path(log_file) if log_file is not None else self.folder / f'{self.name}.log', - log_main_results=log_main_results, - ) + self._solve_single_segment(i, calculation, solver, log_file, log_main_results, suppress_output=True) + finally: + progress_bar.close() for calc in self.sub_calculations: for key, value in calc.durations.items(): self.durations[key] += value + logger.info(f'Model solved with {solver.name} in {self.durations["solving"]:.2f} seconds.') + self.results = SegmentedCalculationResults.from_calculation(self) return self diff --git a/flixopt/config.py b/flixopt/config.py index 670f86da2..a74740efb 100644 --- a/flixopt/config.py +++ b/flixopt/config.py @@ -63,6 +63,14 @@ 'default_qualitative_colorscale': 'plotly', } ), + 'solving': MappingProxyType( + { + 'mip_gap': 0.01, + 'time_limit_seconds': 300, + 'log_to_console': True, + 'log_main_results': True, + } + ), } ) @@ -75,6 +83,8 @@ class CONFIG: Attributes: Logging: Logging configuration. Modeling: Optimization modeling parameters. + Solving: Solver configuration and default parameters. + Plotting: Plotting configuration. config_name: Configuration name. Examples: @@ -91,6 +101,9 @@ class CONFIG: level: DEBUG console: true file: app.log + solving: + mip_gap: 0.001 + time_limit_seconds: 600 ``` """ @@ -194,6 +207,30 @@ class Modeling: epsilon: float = _DEFAULTS['modeling']['epsilon'] big_binary_bound: int = _DEFAULTS['modeling']['big_binary_bound'] + class Solving: + """Solver configuration and default parameters. + + Attributes: + mip_gap: Default MIP gap tolerance for solver convergence. + time_limit_seconds: Default time limit in seconds for solver runs. + log_to_console: Whether solver should output to console. + log_main_results: Whether to log main results after solving. + + Examples: + ```python + # Set tighter convergence and longer timeout + CONFIG.Solving.mip_gap = 0.001 + CONFIG.Solving.time_limit_seconds = 600 + CONFIG.Solving.log_to_console = False + CONFIG.apply() + ``` + """ + + mip_gap: float = _DEFAULTS['solving']['mip_gap'] + time_limit_seconds: int = _DEFAULTS['solving']['time_limit_seconds'] + log_to_console: bool = _DEFAULTS['solving']['log_to_console'] + log_main_results: bool = _DEFAULTS['solving']['log_main_results'] + class Plotting: """Plotting configuration. @@ -246,6 +283,12 @@ def reset(cls): for key, value in _DEFAULTS['modeling'].items(): setattr(cls.Modeling, key, value) + for key, value in _DEFAULTS['solving'].items(): + setattr(cls.Solving, key, value) + + for key, value in _DEFAULTS['plotting'].items(): + setattr(cls.Plotting, key, value) + cls.config_name = _DEFAULTS['config_name'] cls.apply() @@ -329,6 +372,12 @@ def _apply_config_dict(cls, config_dict: dict): elif key == 'modeling' and isinstance(value, dict): for nested_key, nested_value in value.items(): setattr(cls.Modeling, nested_key, nested_value) + elif key == 'solving' and isinstance(value, dict): + for nested_key, nested_value in value.items(): + setattr(cls.Solving, nested_key, nested_value) + elif key == 'plotting' and isinstance(value, dict): + for nested_key, nested_value in value.items(): + setattr(cls.Plotting, nested_key, nested_value) elif hasattr(cls, key): setattr(cls, key, value) @@ -366,6 +415,12 @@ def to_dict(cls) -> dict: 'epsilon': cls.Modeling.epsilon, 'big_binary_bound': cls.Modeling.big_binary_bound, }, + 'solving': { + 'mip_gap': cls.Solving.mip_gap, + 'time_limit_seconds': cls.Solving.time_limit_seconds, + 'log_to_console': cls.Solving.log_to_console, + 'log_main_results': cls.Solving.log_main_results, + }, 'plotting': { 'default_show': cls.Plotting.default_show, 'default_engine': cls.Plotting.default_engine, @@ -376,6 +431,66 @@ def to_dict(cls) -> dict: }, } + @classmethod + def silent(cls) -> type[CONFIG]: + """Configure for silent operation. + + Disables console logging, solver output, and result logging + for clean production runs. Does not show plots. Automatically calls apply(). + """ + cls.Logging.console = False + cls.Plotting.default_show = False + cls.Logging.file = None + cls.Solving.log_to_console = False + cls.Solving.log_main_results = False + cls.apply() + return cls + + @classmethod + def debug(cls) -> type[CONFIG]: + """Configure for debug mode with verbose output. + + Enables console logging at DEBUG level and all solver output for + troubleshooting. Automatically calls apply(). + """ + cls.Logging.console = True + cls.Logging.level = 'DEBUG' + cls.Solving.log_to_console = True + cls.Solving.log_main_results = True + cls.apply() + return cls + + @classmethod + def exploring(cls) -> type[CONFIG]: + """Configure for exploring flixopt + + Enables console logging at INFO level and all solver output. + Also enables browser plotting for plotly with showing plots per default + """ + cls.Logging.console = True + cls.Logging.level = 'INFO' + cls.Solving.log_to_console = True + cls.Solving.log_main_results = True + cls.browser_plotting() + cls.apply() + return cls + + @classmethod + def browser_plotting(cls) -> type[CONFIG]: + """Configure for interactive usage with plotly to open plots in browser. + + Sets plotly.io.renderers.default = 'browser'. Useful for running examples + and viewing interactive plots. Does NOT modify CONFIG.Plotting settings. + """ + cls.Plotting.default_show = True + cls.apply() + + import plotly.io as pio + + pio.renderers.default = 'browser' + + return cls + class MultilineFormatter(logging.Formatter): """Formatter that handles multi-line messages with consistent prefixes. diff --git a/flixopt/io.py b/flixopt/io.py index 7f832ed0e..c5f839ed9 100644 --- a/flixopt/io.py +++ b/flixopt/io.py @@ -3,8 +3,11 @@ import inspect import json import logging +import os import pathlib import re +import sys +from contextlib import contextmanager from dataclasses import dataclass from typing import TYPE_CHECKING, Any @@ -931,3 +934,59 @@ def build_metadata_info(parts: list[str], prefix: str = ' | ') -> str: return '' info = ' | '.join(parts) return prefix + info if prefix else info + + +@contextmanager +def suppress_output(): + """ + Suppress all console output including C-level output from solvers. + + WARNING: Not thread-safe. Modifies global file descriptors. + Use only with sequential execution or multiprocessing. + """ + # Save original file descriptors + old_stdout_fd = os.dup(1) + old_stderr_fd = os.dup(2) + devnull_fd = None + + try: + # Open devnull + devnull_fd = os.open(os.devnull, os.O_WRONLY) + + # Flush Python buffers before redirecting + sys.stdout.flush() + sys.stderr.flush() + + # Redirect file descriptors to devnull + os.dup2(devnull_fd, 1) + os.dup2(devnull_fd, 2) + + yield + + finally: + # Restore original file descriptors with nested try blocks + # to ensure all cleanup happens even if one step fails + try: + # Flush any buffered output in the redirected streams + sys.stdout.flush() + sys.stderr.flush() + except (OSError, ValueError): + pass # Stream might be closed or invalid + + try: + os.dup2(old_stdout_fd, 1) + except OSError: + pass # Failed to restore stdout, continue cleanup + + try: + os.dup2(old_stderr_fd, 2) + except OSError: + pass # Failed to restore stderr, continue cleanup + + # Close all file descriptors + for fd in [devnull_fd, old_stdout_fd, old_stderr_fd]: + if fd is not None: + try: + os.close(fd) + except OSError: + pass # FD already closed or invalid diff --git a/flixopt/results.py b/flixopt/results.py index 26eaf9d5d..c02e5b769 100644 --- a/flixopt/results.py +++ b/flixopt/results.py @@ -1029,14 +1029,14 @@ def plot_network( ] ) = True, path: pathlib.Path | None = None, - show: bool = False, + show: bool | None = None, ) -> pyvis.network.Network | None: """Plot interactive network visualization of the system. Args: controls: Enable/disable interactive controls. path: Save path for network HTML. - show: Whether to display the plot. + show: Whether to display the plot. If None, uses CONFIG.Plotting.default_show. """ if path is None: path = self.folder / f'{self.name}--network.html' diff --git a/flixopt/solvers.py b/flixopt/solvers.py index 410d69434..e5db61192 100644 --- a/flixopt/solvers.py +++ b/flixopt/solvers.py @@ -8,6 +8,8 @@ from dataclasses import dataclass, field from typing import Any, ClassVar +from flixopt.config import CONFIG + logger = logging.getLogger('flixopt') @@ -17,14 +19,16 @@ class _Solver: Abstract base class for solvers. Args: - mip_gap: Acceptable relative optimality gap in [0.0, 1.0]. - time_limit_seconds: Time limit in seconds. + mip_gap: Acceptable relative optimality gap in [0.0, 1.0]. Defaults to CONFIG.Solving.mip_gap. + time_limit_seconds: Time limit in seconds. Defaults to CONFIG.Solving.time_limit_seconds. + log_to_console: If False, no output to console. Defaults to CONFIG.Solving.log_to_console. extra_options: Additional solver options merged into `options`. """ name: ClassVar[str] - mip_gap: float - time_limit_seconds: int + mip_gap: float = field(default_factory=lambda: CONFIG.Solving.mip_gap) + time_limit_seconds: int = field(default_factory=lambda: CONFIG.Solving.time_limit_seconds) + log_to_console: bool = field(default_factory=lambda: CONFIG.Solving.log_to_console) extra_options: dict[str, Any] = field(default_factory=dict) @property @@ -45,6 +49,7 @@ class GurobiSolver(_Solver): Args: mip_gap: Acceptable relative optimality gap in [0.0, 1.0]; mapped to Gurobi `MIPGap`. time_limit_seconds: Time limit in seconds; mapped to Gurobi `TimeLimit`. + log_to_console: If False, no output to console. extra_options: Additional solver options merged into `options`. """ @@ -55,6 +60,7 @@ def _options(self) -> dict[str, Any]: return { 'MIPGap': self.mip_gap, 'TimeLimit': self.time_limit_seconds, + 'LogToConsole': 1 if self.log_to_console else 0, } @@ -65,6 +71,7 @@ class HighsSolver(_Solver): Attributes: mip_gap: Acceptable relative optimality gap in [0.0, 1.0]; mapped to HiGHS `mip_rel_gap`. time_limit_seconds: Time limit in seconds; mapped to HiGHS `time_limit`. + log_to_console: If False, no output to console. extra_options: Additional solver options merged into `options`. threads (int | None): Number of threads to use. If None, HiGHS chooses. """ @@ -78,4 +85,5 @@ def _options(self) -> dict[str, Any]: 'mip_rel_gap': self.mip_gap, 'time_limit': self.time_limit_seconds, 'threads': self.threads, + 'log_to_console': self.log_to_console, } diff --git a/pyproject.toml b/pyproject.toml index cedcc3350..2b77963ae 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,6 +41,7 @@ dependencies = [ # Utilities "pyyaml >= 6.0.0, < 7", "rich >= 13.0.0, < 15", + "tqdm >= 4.66.0, < 5", "tomli >= 2.0.1, < 3; python_version < '3.11'", # Only needed with python 3.10 or earlier # Default solver "highspy >= 1.5.3, < 2", diff --git a/tests/conftest.py b/tests/conftest.py index bd940b843..50c58e1ab 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -828,26 +828,3 @@ def cleanup_figures(): import matplotlib.pyplot as plt plt.close('all') - - -@pytest.fixture(scope='session', autouse=True) -def set_test_environment(): - """ - Configure plotting for test environment. - - This fixture runs once per test session to: - - Set matplotlib to use non-interactive 'Agg' backend - - Set plotly to use non-interactive 'json' renderer - - Prevent GUI windows from opening during tests - """ - import matplotlib - - matplotlib.use('Agg') # Use non-interactive backend - - import plotly.io as pio - - pio.renderers.default = 'json' # Use non-interactive renderer - - fx.CONFIG.Plotting.default_show = False - - yield diff --git a/tests/test_config.py b/tests/test_config.py index 60ed80555..a78330eb4 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -31,6 +31,10 @@ def test_config_defaults(self): assert CONFIG.Modeling.big == 10_000_000 assert CONFIG.Modeling.epsilon == 1e-5 assert CONFIG.Modeling.big_binary_bound == 100_000 + assert CONFIG.Solving.mip_gap == 0.01 + assert CONFIG.Solving.time_limit_seconds == 300 + assert CONFIG.Solving.log_to_console is True + assert CONFIG.Solving.log_main_results is True assert CONFIG.config_name == 'flixopt' def test_module_initialization(self): @@ -106,6 +110,11 @@ def test_config_to_dict(self): assert config_dict['logging']['rich'] is False assert 'modeling' in config_dict assert config_dict['modeling']['big'] == 10_000_000 + assert 'solving' in config_dict + assert config_dict['solving']['mip_gap'] == 0.01 + assert config_dict['solving']['time_limit_seconds'] == 300 + assert config_dict['solving']['log_to_console'] is True + assert config_dict['solving']['log_main_results'] is True def test_config_load_from_file(self, tmp_path): """Test loading configuration from YAML file.""" @@ -119,6 +128,10 @@ def test_config_load_from_file(self, tmp_path): modeling: big: 20000000 epsilon: 1e-6 +solving: + mip_gap: 0.001 + time_limit_seconds: 600 + log_main_results: false """ config_file.write_text(config_content) @@ -130,6 +143,9 @@ def test_config_load_from_file(self, tmp_path): assert CONFIG.Modeling.big == 20000000 # YAML may load epsilon as string, so convert for comparison assert float(CONFIG.Modeling.epsilon) == 1e-6 + assert CONFIG.Solving.mip_gap == 0.001 + assert CONFIG.Solving.time_limit_seconds == 600 + assert CONFIG.Solving.log_main_results is False def test_config_load_from_file_not_found(self): """Test that loading from non-existent file raises error.""" @@ -264,6 +280,10 @@ def test_custom_config_yaml_complete(self, tmp_path): big: 50000000 epsilon: 1e-4 big_binary_bound: 200000 +solving: + mip_gap: 0.005 + time_limit_seconds: 900 + log_main_results: false """ config_file.write_text(config_content) @@ -278,6 +298,9 @@ def test_custom_config_yaml_complete(self, tmp_path): assert CONFIG.Modeling.big == 50000000 assert float(CONFIG.Modeling.epsilon) == 1e-4 assert CONFIG.Modeling.big_binary_bound == 200000 + assert CONFIG.Solving.mip_gap == 0.005 + assert CONFIG.Solving.time_limit_seconds == 900 + assert CONFIG.Solving.log_main_results is False # Verify logging was applied logger = logging.getLogger('flixopt') @@ -426,6 +449,10 @@ def test_config_reset(self): CONFIG.Modeling.big = 99999999 CONFIG.Modeling.epsilon = 1e-8 CONFIG.Modeling.big_binary_bound = 500000 + CONFIG.Solving.mip_gap = 0.0001 + CONFIG.Solving.time_limit_seconds = 1800 + CONFIG.Solving.log_to_console = False + CONFIG.Solving.log_main_results = False CONFIG.config_name = 'test_config' # Reset should restore all defaults @@ -439,6 +466,10 @@ def test_config_reset(self): assert CONFIG.Modeling.big == 10_000_000 assert CONFIG.Modeling.epsilon == 1e-5 assert CONFIG.Modeling.big_binary_bound == 100_000 + assert CONFIG.Solving.mip_gap == 0.01 + assert CONFIG.Solving.time_limit_seconds == 300 + assert CONFIG.Solving.log_to_console is True + assert CONFIG.Solving.log_main_results is True assert CONFIG.config_name == 'flixopt' # Verify logging was also reset @@ -460,11 +491,17 @@ def test_reset_matches_class_defaults(self): CONFIG.Modeling.big = 999999 CONFIG.Modeling.epsilon = 1e-10 CONFIG.Modeling.big_binary_bound = 999999 + CONFIG.Solving.mip_gap = 0.0001 + CONFIG.Solving.time_limit_seconds = 9999 + CONFIG.Solving.log_to_console = False + CONFIG.Solving.log_main_results = False CONFIG.config_name = 'modified' # Verify values are actually different from defaults assert CONFIG.Logging.level != _DEFAULTS['logging']['level'] assert CONFIG.Modeling.big != _DEFAULTS['modeling']['big'] + assert CONFIG.Solving.mip_gap != _DEFAULTS['solving']['mip_gap'] + assert CONFIG.Solving.log_to_console != _DEFAULTS['solving']['log_to_console'] # Now reset CONFIG.reset() @@ -477,4 +514,104 @@ def test_reset_matches_class_defaults(self): assert CONFIG.Modeling.big == _DEFAULTS['modeling']['big'] assert CONFIG.Modeling.epsilon == _DEFAULTS['modeling']['epsilon'] assert CONFIG.Modeling.big_binary_bound == _DEFAULTS['modeling']['big_binary_bound'] + assert CONFIG.Solving.mip_gap == _DEFAULTS['solving']['mip_gap'] + assert CONFIG.Solving.time_limit_seconds == _DEFAULTS['solving']['time_limit_seconds'] + assert CONFIG.Solving.log_to_console == _DEFAULTS['solving']['log_to_console'] + assert CONFIG.Solving.log_main_results == _DEFAULTS['solving']['log_main_results'] assert CONFIG.config_name == _DEFAULTS['config_name'] + + def test_solving_config_defaults(self): + """Test that CONFIG.Solving has correct default values.""" + assert CONFIG.Solving.mip_gap == 0.01 + assert CONFIG.Solving.time_limit_seconds == 300 + assert CONFIG.Solving.log_to_console is True + assert CONFIG.Solving.log_main_results is True + + def test_solving_config_modification(self): + """Test that CONFIG.Solving attributes can be modified.""" + # Modify solving config + CONFIG.Solving.mip_gap = 0.005 + CONFIG.Solving.time_limit_seconds = 600 + CONFIG.Solving.log_main_results = False + CONFIG.apply() + + # Verify modifications + assert CONFIG.Solving.mip_gap == 0.005 + assert CONFIG.Solving.time_limit_seconds == 600 + assert CONFIG.Solving.log_main_results is False + + def test_solving_config_integration_with_solvers(self): + """Test that solvers use CONFIG.Solving defaults.""" + from flixopt import solvers + + # Test with default config + CONFIG.reset() + solver1 = solvers.HighsSolver() + assert solver1.mip_gap == CONFIG.Solving.mip_gap + assert solver1.time_limit_seconds == CONFIG.Solving.time_limit_seconds + + # Modify config and create new solver + CONFIG.Solving.mip_gap = 0.002 + CONFIG.Solving.time_limit_seconds = 900 + CONFIG.apply() + + solver2 = solvers.GurobiSolver() + assert solver2.mip_gap == 0.002 + assert solver2.time_limit_seconds == 900 + + # Explicit values should override config + solver3 = solvers.HighsSolver(mip_gap=0.1, time_limit_seconds=60) + assert solver3.mip_gap == 0.1 + assert solver3.time_limit_seconds == 60 + + def test_solving_config_yaml_loading(self, tmp_path): + """Test loading solving config from YAML file.""" + config_file = tmp_path / 'solving_config.yaml' + config_content = """ +solving: + mip_gap: 0.0001 + time_limit_seconds: 1200 + log_main_results: false +""" + config_file.write_text(config_content) + + CONFIG.load_from_file(config_file) + + assert CONFIG.Solving.mip_gap == 0.0001 + assert CONFIG.Solving.time_limit_seconds == 1200 + assert CONFIG.Solving.log_main_results is False + + def test_solving_config_in_to_dict(self): + """Test that CONFIG.Solving is included in to_dict().""" + CONFIG.Solving.mip_gap = 0.003 + CONFIG.Solving.time_limit_seconds = 450 + CONFIG.Solving.log_main_results = False + + config_dict = CONFIG.to_dict() + + assert 'solving' in config_dict + assert config_dict['solving']['mip_gap'] == 0.003 + assert config_dict['solving']['time_limit_seconds'] == 450 + assert config_dict['solving']['log_main_results'] is False + + def test_solving_config_persistence(self): + """Test that Solving config is independent of other configs.""" + # Set custom solving values + CONFIG.Solving.mip_gap = 0.007 + CONFIG.Solving.time_limit_seconds = 750 + + # Change and apply logging config + CONFIG.Logging.console = True + CONFIG.apply() + + # Solving values should be unchanged + assert CONFIG.Solving.mip_gap == 0.007 + assert CONFIG.Solving.time_limit_seconds == 750 + + # Change modeling config + CONFIG.Modeling.big = 99999999 + CONFIG.apply() + + # Solving values should still be unchanged + assert CONFIG.Solving.mip_gap == 0.007 + assert CONFIG.Solving.time_limit_seconds == 750 diff --git a/tests/test_io.py b/tests/test_io.py index dbbc4cc72..6d225734e 100644 --- a/tests/test_io.py +++ b/tests/test_io.py @@ -80,5 +80,148 @@ def test_flow_system_io(flow_system): flow_system.__str__() +def test_suppress_output_file_descriptors(tmp_path): + """Test that suppress_output() redirects file descriptors to /dev/null.""" + import os + import sys + + from flixopt.io import suppress_output + + # Create temporary files to capture output + test_file = tmp_path / 'test_output.txt' + + # Test that FD 1 (stdout) is redirected during suppression + with open(test_file, 'w') as f: + original_stdout_fd = os.dup(1) # Save original stdout FD + try: + # Redirect FD 1 to our test file + os.dup2(f.fileno(), 1) + os.write(1, b'before suppression\n') + + with suppress_output(): + # Inside suppress_output, writes should go to /dev/null, not our file + os.write(1, b'during suppression\n') + + # After suppress_output, writes should go to our file again + os.write(1, b'after suppression\n') + finally: + # Restore original stdout + os.dup2(original_stdout_fd, 1) + os.close(original_stdout_fd) + + # Read the file and verify content + content = test_file.read_text() + assert 'before suppression' in content + assert 'during suppression' not in content # This should NOT be in the file + assert 'after suppression' in content + + +def test_suppress_output_python_level(): + """Test that Python-level stdout/stderr continue to work after suppress_output().""" + import io + import sys + + from flixopt.io import suppress_output + + # Create a StringIO to capture Python-level output + captured_output = io.StringIO() + + # After suppress_output exits, Python streams should be functional + with suppress_output(): + pass # Just enter and exit the context + + # Redirect sys.stdout to our StringIO + old_stdout = sys.stdout + try: + sys.stdout = captured_output + print('test message') + finally: + sys.stdout = old_stdout + + # Verify Python-level stdout works + assert 'test message' in captured_output.getvalue() + + +def test_suppress_output_exception_handling(): + """Test that suppress_output() properly restores streams even on exception.""" + import sys + + from flixopt.io import suppress_output + + # Save original file descriptors + original_stdout_fd = sys.stdout.fileno() + original_stderr_fd = sys.stderr.fileno() + + try: + with suppress_output(): + raise ValueError('Test exception') + except ValueError: + pass + + # Verify streams are restored after exception + assert sys.stdout.fileno() == original_stdout_fd + assert sys.stderr.fileno() == original_stderr_fd + + # Verify we can still write to stdout/stderr + sys.stdout.write('test after exception\n') + sys.stdout.flush() + + +def test_suppress_output_c_level(): + """Test that suppress_output() suppresses C-level output (file descriptor level).""" + import os + import sys + + from flixopt.io import suppress_output + + # This test verifies that even low-level C writes are suppressed + # by writing directly to file descriptor 1 (stdout) + with suppress_output(): + # Try to write directly to FD 1 (stdout) - should be suppressed + os.write(1, b'C-level stdout write\n') + # Try to write directly to FD 2 (stderr) - should be suppressed + os.write(2, b'C-level stderr write\n') + + # After exiting context, ensure streams work + sys.stdout.write('After C-level test\n') + sys.stdout.flush() + + +def test_tqdm_cleanup_on_exception(): + """Test that tqdm progress bar is properly cleaned up even when exceptions occur. + + This test verifies the pattern used in SegmentedCalculation where a try/finally + block ensures progress_bar.close() is called even if an exception occurs. + """ + from tqdm import tqdm + + # Create a progress bar (disabled to avoid output during tests) + items = enumerate(range(5)) + progress_bar = tqdm(items, total=5, desc='Test progress', disable=True) + + # Track whether cleanup was called + cleanup_called = False + exception_raised = False + + try: + try: + for idx, _ in progress_bar: + if idx == 2: + raise ValueError('Test exception') + finally: + # This should always execute, even with exception + progress_bar.close() + cleanup_called = True + except ValueError: + exception_raised = True + + # Verify both that the exception was raised AND cleanup happened + assert exception_raised, 'Test exception should have been raised' + assert cleanup_called, 'Cleanup should have been called even with exception' + + # Verify that close() is idempotent - calling it again should not raise + progress_bar.close() # Should not raise even if already closed + + if __name__ == '__main__': pytest.main(['-v', '--disable-warnings'])